Skip to content

Commit

Permalink
add more tests
Browse files Browse the repository at this point in the history
  • Loading branch information
kexinzhao committed Mar 16, 2018
1 parent a13ec34 commit e967d19
Show file tree
Hide file tree
Showing 3 changed files with 75 additions and 34 deletions.
3 changes: 2 additions & 1 deletion paddle/fluid/operators/conv_cudnn_op.cu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,8 @@ class CUDNNConvGradOpKernel : public framework::OpKernel<T> {
platform::CUDAPlace gpu = boost::get<platform::CUDAPlace>(ctx.GetPlace());
cudnn_workspace = paddle::memory::Alloc(gpu, workspace_size_in_bytes);
// ------------------- cudnn conv backward data ---------------------
T alpha = 1.0f, beta = 0.0f;
typename platform::CudnnDataType<T>::ScalingParamType alpha = 1.0f,
beta = 0.0f;
if (input_grad) {
T* input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace());
// Because beta is zero, it is unnecessary to reset input_grad.
Expand Down
23 changes: 10 additions & 13 deletions python/paddle/fluid/tests/unittests/op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -470,29 +470,26 @@ def _numpy_to_lod_tensor(np_value, lod, place):
return tensor

@staticmethod
def create_view(input):
"""Create a view of the input numpy array
def np_dtype_to_fluid_dtype(input):
"""Change the dtype of float16 numpy array
numpy float16 is binded to paddle::platform::float16
in tensor_py.h via the help of numpy uint16 because
in tensor_py.h via the help of uint16 data type since
the internal memory representation of float16 is
uint16_t in paddle or np.uint16 in numpy, which are
themselves binded together.
uint16_t in paddle and np.uint16 in numpy, which are
themselves binded together by pybind.
Args:
input: input numpy array
Returns:
input_view: if the dtype of input is np.float16, input_view
will reinterpret input as with dtype np.uint16.
Otherwise, input_view will be input itself.
input: if the dtype of input is np.float16, its dtype will be
changed to np.uint16 so that the internal memory will be
reinterpreted input as of dtype np.uint16.
"""
if input.dtype == np.float16:
# view will only reinterpret memory without copying
input_view = input.view(np.uint16)
else:
input_view = input
return input_view
input.dtype = np.uint16
return input

def _get_gradient(self, input_to_check, place, output_names, no_grad_set):
prog = Program()
Expand Down
83 changes: 63 additions & 20 deletions python/paddle/fluid/tests/unittests/test_conv2d_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,18 +82,9 @@ def setUp(self):
output = conv2d_forward_naive(input, filter, self.groups,
conv2d_param).astype(self.dtype)

# numpy float16 is binded to paddle::platform::float16
# in tensor_py.h via the help of numpy uint16 because
# the internal memory representation of float16 is
# uint16_t in paddle or np.uint16 in numpy, which are
# themselves binded together.
self.inputs = {
#'Input': (input.view(np.uint16)
# if self.dtype == np.float16 else input),
#'Filter': (filter.view(np.uint16)
# if self.dtype == np.float16 else filter)
'Input': OpTest.create_view(input),
'Filter': OpTest.create_view(filter)
'Input': OpTest.np_dtype_to_fluid_dtype(input),
'Filter': OpTest.np_dtype_to_fluid_dtype(filter)
}
self.attrs = {
'strides': self.stride,
Expand All @@ -113,6 +104,8 @@ def test_check_output(self):
self.check_output()

def test_check_grad(self):
if self.dtype == np.float16:
return
if self.use_cudnn:
place = core.CUDAPlace(0)
self.check_grad_with_place(
Expand All @@ -125,6 +118,8 @@ def test_check_grad(self):
set(['Input', 'Filter']), 'Output', max_relative_error=0.02)

def test_check_grad_no_filter(self):
if self.dtype == np.float16:
return
if self.use_cudnn:
place = core.CUDAPlace(0)
self.check_grad_with_place(
Expand All @@ -140,6 +135,8 @@ def test_check_grad_no_filter(self):
no_grad_set=set(['Filter']))

def test_check_grad_no_input(self):
if self.dtype == np.float16:
return
if self.use_cudnn:
place = core.CUDAPlace(0)
self.check_grad_with_place(
Expand Down Expand Up @@ -259,46 +256,92 @@ def test_check_output(self):
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)

def test_check_grad(self):
pass

def test_check_grad_no_filter(self):
pass

def test_check_grad_no_input(self):
pass


class TestCUDNNWithPad(TestWithPad):
def init_op_type(self):
self.use_cudnn = True
self.op_type = "conv2d"


class TestFP16CUDNNWithPad(TestCUDNNWithPad):
def init_data_type(self):
self.dtype = np.float16

def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)


class TestCUDNNWithStride(TestWithStride):
def init_op_type(self):
self.use_cudnn = True
self.op_type = "conv2d"


class TestFP16CUDNNWithStride(TestCUDNNWithStride):
def init_data_type(self):
self.dtype = np.float16

def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)


class TestCUDNNWithGroup(TestWithGroup):
def init_op_type(self):
self.use_cudnn = True
self.op_type = "conv2d"


class TestFP16CUDNNWithGroup(TestCUDNNWithGroup):
def init_data_type(self):
self.dtype = np.float16

def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)


class TestCUDNNWith1x1(TestWith1x1):
def init_op_type(self):
self.use_cudnn = True
self.op_type = "conv2d"


class TestFP16CUDNNWith1x1(TestCUDNNWith1x1):
def init_data_type(self):
self.dtype = np.float16

def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)


class TestCUDNNWithInput1x1Filter1x1(TestWithInput1x1Filter1x1):
def init_op_type(self):
self.use_cudnn = True
self.op_type = "conv2d"


class TestFP16CUDNNWithInput1x1Filter1x1(TestCUDNNWithInput1x1Filter1x1):
def init_data_type(self):
self.dtype = np.float16

def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)


class TestDepthwiseConv(TestConv2dOp):
def init_test_case(self):
self.pad = [1, 1]
Expand Down

0 comments on commit e967d19

Please sign in to comment.