Skip to content

Commit

Permalink
fix above comments
Browse files Browse the repository at this point in the history
  • Loading branch information
chengduoZH committed Aug 21, 2017
1 parent 38cc5da commit d5768eb
Show file tree
Hide file tree
Showing 14 changed files with 247 additions and 201 deletions.
58 changes: 38 additions & 20 deletions paddle/cuda/include/hl_matrix.h
Original file line number Diff line number Diff line change
Expand Up @@ -240,16 +240,25 @@ extern void hl_matrix_rotate(
* @param[in] strideW stride in the width.
* @param[in] paddingD padding in the depth.
* @param[in] paddingH padding in the height.
* @param[in] paddingW padding in the width.
* @param[in] paddingW padding in the width.
* @param[out] matDst output matrix.
*
*
*/
extern void hl_matrix_vol2Col(real* matSrc,
int channel, int depth, int height, int width,
int filterD, int filterH, int filterW,
int strideD, int strideH, int strideW,
int paddingD, int paddingH, int paddingW,
real* matDst);
extern void hl_matrix_vol2Col(const real* dataSrc,
int channels,
int depth,
int height,
int width,
int filterD,
int filterH,
int filterW,
int strideD,
int strideH,
int strideW,
int paddingD,
int paddingH,
int paddingW,
real* dataDst);

/**
* @brief Matrix col2Vol: Convert col matrix into 3D volume
Expand All @@ -267,19 +276,28 @@ extern void hl_matrix_vol2Col(real* matSrc,
* @param[in] strideW stride in the width.
* @param[in] paddingD padding in the depth.
* @param[in] paddingH padding in the height.
* @param[in] paddingW padding in the width.
* @param[in] paddingW padding in the width.
* @param[in] matSrc input matrix.
* @param[in] beta input
* @param[in] alpha input
*
* @param[in] beta input
* @param[in] alpha input
*
*/
extern void hl_matrix_col2Vol(real* matDst,
int channels, int depth, int height, int width,
int filterD, int filterH, int filterW,
int strideD, int strideH, int strideW,
int paddingD, int paddingH, int paddingW,
real* matSrc,
real alpha, real beta);

extern void hl_matrix_col2Vol(real* dataDst,
int channels,
int depth,
int height,
int width,
int filterD,
int filterH,
int filterW,
int strideD,
int strideH,
int strideW,
int paddingD,
int paddingH,
int paddingW,
const real* dataSrc,
real alpha,
real beta);

#endif /* HL_MATRIX_H_ */
47 changes: 33 additions & 14 deletions paddle/cuda/include/stub/hl_matrix_stub.h
Original file line number Diff line number Diff line change
Expand Up @@ -99,19 +99,38 @@ inline void hl_matrix_collect_shared_bias(real* B_d,
inline void hl_matrix_rotate(
real* mat, real* matRot, int dimM, int dimN, bool clockWise) {}

inline void hl_matrix_vol2Col(real* data,
int channels, int depth, int height, int width,
int filterD, int filterH, int filterW,
int strideD, int strideH, int strideW,
int paddingD, int paddingH, int paddingW,
real* data_col) {}

inline void hl_matrix_col2Vol(real* data,
int channels, int depth, int height, int width,
int filterD, int filterH, int filterW,
int strideD, int strideH, int strideW,
int paddingD, int paddingH, int paddingW,
real* data_Im,
real alpha, real beta) {}
inline void hl_matrix_vol2Col(const real* dataSrc,
int channels,
int depth,
int height,
int width,
int filterD,
int filterH,
int filterW,
int strideD,
int strideH,
int strideW,
int paddingD,
int paddingH,
int paddingW,
real* dataDst) {}

inline void hl_matrix_col2Vol(real* dataDst,
int channels,
int depth,
int height,
int width,
int filterD,
int filterH,
int filterW,
int strideD,
int strideH,
int strideW,
int paddingD,
int paddingH,
int paddingW,
const real* dataSrc,
real alpha,
real beta) {}

#endif // HL_MATRIX_STUB_H_
84 changes: 42 additions & 42 deletions paddle/cuda/src/hl_cuda_matrix.cu
Original file line number Diff line number Diff line change
Expand Up @@ -594,7 +594,7 @@ void hl_matrix_rotate(
}

__global__ void keMatrixVol2Col(int num_kernels,
real* dataSrc,
const real* dataSrc,
real* dataDst,
int depth,
int height,
Expand Down Expand Up @@ -643,7 +643,7 @@ __global__ void keMatrixVol2Col(int num_kernels,
}
}

void hl_matrix_vol2Col(real* dataSrc,
void hl_matrix_vol2Col(const real* dataSrc,
int channels,
int depth,
int height,
Expand All @@ -666,30 +666,30 @@ void hl_matrix_vol2Col(real* dataSrc,
const int threads = 512;
const int blocks = DIVUP(num_kernels, threads);

keMatrixVol2Col<<<blocks, threads>>>(num_kernels,
dataSrc,
dataDst,
depth,
height,
width,
filterD,
filterH,
filterW,
strideD,
strideH,
strideW,
paddingD,
paddingH,
paddingW,
depth_col,
height_col,
width_col);
keMatrixVol2Col<<<blocks, threads, 0, STREAM_DEFAULT>>>(num_kernels,
dataSrc,
dataDst,
depth,
height,
width,
filterD,
filterH,
filterW,
strideD,
strideH,
strideW,
paddingD,
paddingH,
paddingW,
depth_col,
height_col,
width_col);
CHECK_SYNC("hl_matrix_vol2Col failed");
}

__global__ void keMatrixCol2Vol(int num_kernels,
real* dataDst,
real* dataSrc,
const real* dataSrc,
int depth,
int height,
int width,
Expand Down Expand Up @@ -759,7 +759,7 @@ void hl_matrix_col2Vol(real* dataDst,
int paddingD,
int paddingH,
int paddingW,
real* dataSrc,
const real* dataSrc,
real alpha,
real beta) {
int depth_col = (depth + 2 * paddingD - filterD) / strideD + 1;
Expand All @@ -770,26 +770,26 @@ void hl_matrix_col2Vol(real* dataDst,
const int threads = 512;
const int blocks = DIVUP(num_kernels, threads);

keMatrixCol2Vol<<<blocks, threads>>>(num_kernels,
dataDst,
dataSrc,
depth,
height,
width,
filterD,
filterH,
filterW,
strideD,
strideH,
strideW,
paddingD,
paddingH,
paddingW,
depth_col,
height_col,
width_col,
alpha,
beta);
keMatrixCol2Vol<<<blocks, threads, 0, STREAM_DEFAULT>>>(num_kernels,
dataDst,
dataSrc,
depth,
height,
width,
filterD,
filterH,
filterW,
strideD,
strideH,
strideW,
paddingD,
paddingH,
paddingW,
depth_col,
height_col,
width_col,
alpha,
beta);

CHECK_SYNC("hl_matrix_col2Vol failed");
}
26 changes: 18 additions & 8 deletions paddle/gserver/layers/Conv3DLayer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,16 +28,26 @@ bool Conv3DLayer::init(const LayerMap &layerMap,
const ConvConfig &conf = inputConfig.conv_conf();
M_.push_back(numFilters_ / conf.groups());
K_.push_back(filterPixels_[index] * filterChannels_[index]);
if (nullptr != weights_[index]->getW())
weights_[index]->getW()->reshape(weights_[index]->getW()->getWidth(),
weights_[index]->getW()->getHeight());
if (nullptr != weights_[index]->getWGrad())
weights_[index]->getWGrad()->reshape(
weights_[index]->getWGrad()->getWidth(),
weights_[index]->getWGrad()->getHeight());

// create a new weight
size_t height, width;
width = filterPixels_[index] * filterChannels_[index];
height = numFilters_;
CHECK_EQ(parameters_[index]->getSize(), width * height);
Weight *w = new Weight(height, width, parameters_[index]);
weights_.emplace_back(w);
++index;
}
CHECK(inputLayers_.size() == parameters_.size());
if (biasParameter_.get()) {
if (sharedBiases_) {
CHECK_EQ((size_t)numFilters_, biasParameter_->getSize());
biases_ =
std::unique_ptr<Weight>(new Weight(1, numFilters_, biasParameter_));
} else {
biases_ =
std::unique_ptr<Weight>(new Weight(1, getSize(), biasParameter_));
}
}
return true;
}

Expand Down
14 changes: 4 additions & 10 deletions paddle/gserver/layers/Conv3DLayer.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */


#pragma once

#include <vector>
#include "ConvBaseLayer.h"
#include "paddle/math/Matrix.h"
#include "paddle/math/MathUtils.h"
#include <vector>
#include "paddle/math/Matrix.h"

namespace paddle {

Expand All @@ -30,21 +28,17 @@ namespace paddle {
class Conv3DLayer : public ConvBaseLayer {
public:
explicit Conv3DLayer(const LayerConfig& config) : ConvBaseLayer(config) {}

~Conv3DLayer() {}

bool init(const LayerMap &layerMap, const ParameterMap &parameterMap);

size_t getSize();
bool init(const LayerMap& layerMap, const ParameterMap& parameterMap);

void forward(PassType passType);
void addBias();

void backward(const UpdateCallback& callback);

void bpropBiases();
void bpropData(int i);
void bpropWeights(int i);
size_t getSize();

protected:
// Figure out the dimensions for individual gemms.
Expand Down
26 changes: 3 additions & 23 deletions paddle/gserver/layers/ConvBaseLayer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,7 @@ bool ConvBaseLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) {
/* Initialize the basic parent class */
Layer::init(layerMap, parameterMap);
isDeconv_ = (config_.type() == "exconv" || config_.type() == "cudnn_conv" ||
config_.type() == "conv3d" || config_.type() == "deconv3d")
isDeconv_ = (config_.type() == "exconv" || config_.type() == "cudnn_conv")
? false
: true;

Expand Down Expand Up @@ -56,28 +55,9 @@ bool ConvBaseLayer::init(const LayerMap& layerMap,
}

CHECK(inputLayers_.size() == parameters_.size());
for (size_t i = 0; i < inputLayers_.size(); i++) {
size_t height, width;
height = filterPixels_[i] * filterChannels_[i];
width = (!isDeconv_) ? numFilters_ : channels_[i];

// create a new weight
CHECK_EQ(parameters_[i]->getSize(), width * height);
Weight* w = new Weight(height, width, parameters_[i]);
weights_.emplace_back(w);
}

/* initialize the biases_ */
if (biasParameter_.get()) {
if (sharedBiases_) {
CHECK_EQ((size_t)numFilters_, biasParameter_->getSize());
biases_ =
std::unique_ptr<Weight>(new Weight(1, numFilters_, biasParameter_));
} else {
biases_ =
std::unique_ptr<Weight>(new Weight(1, getSize(), biasParameter_));
}
}
// create new weights_ in derived class
// create new biases_ in derived class

// default caffe model
caffeMode_ = true;
Expand Down
1 change: 0 additions & 1 deletion paddle/gserver/layers/ConvBaseLayer.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ namespace paddle {
* with learned filters and (optionally) adds biases.
*/


class ConvBaseLayer : public Layer {
protected:
typedef std::vector<int> IntV;
Expand Down
Loading

0 comments on commit d5768eb

Please sign in to comment.