From 38bb371c9d8fa599beef5f99bc2ce0bf6645b81b Mon Sep 17 00:00:00 2001 From: ceci3 <592712189@qq.com> Date: Sun, 9 Aug 2020 08:24:16 +0000 Subject: [PATCH] add unittest,test=develop --- python/paddle/fluid/dygraph/nn.py | 25 ++++++++----------- .../fluid/tests/unittests/test_layers.py | 18 +++++++++++++ .../test_parallel_dygraph_sync_batch_norm.py | 2 +- .../unittests/test_sync_batch_norm_op.py | 18 +++++++++++++ 4 files changed, 47 insertions(+), 16 deletions(-) diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index 94a7375ff5a8e..966300585c1da 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -3267,11 +3267,9 @@ class SyncBatchNorm(layers.Layer): x = np.random.random(size=(3, 10, 3, 7)).astype('float32') with fluid.dygraph.guard(): x = to_variable(x) - if paddle.fluid.is_compiled_with_cuda(): + if fluid.is_compiled_with_cuda(): sync_batch_norm = nn.SyncBatchNorm(10) hidden1 = sync_batch_norm(x) - else: - raise NotImplemented("SyncBatchNorm only support GPU") """ def __init__(self, @@ -3340,19 +3338,12 @@ def forward(self, input): variance_out = self._variance ### train mode: use mini-batch stats, eval mode: use global stats - if self.training: - use_global_stats = False - trainable_statistics = False - else: - use_global_stats = True - trainable_statistics = False - if in_dygraph_mode(): attrs = ("momentum", self._momentum, "epsilon", self._eps, "is_test", not self.training, "data_layout", self._data_layout, "use_mkldnn", False, "fuse_with_relu", - False, "use_global_stats", use_global_stats, - 'trainable_statistics', trainable_statistics) + False, "use_global_stats", not self.training, + 'trainable_statistics', False) sync_batch_norm_out, _, _, _, _, _ = core.ops.sync_batch_norm( input, self.weight, self.bias, self._mean, self._variance, mean_out, variance_out, *attrs) @@ -3369,8 +3360,8 @@ def forward(self, input): "data_layout": self._data_layout, "use_mkldnn": False, "fuse_with_relu": False, - "use_global_stats": use_global_stats, - "trainable_statistics": trainable_statistics, + "use_global_stats": not self.training, + "trainable_statistics": False, } inputs = { @@ -3385,7 +3376,7 @@ def forward(self, input): dtype=self._dtype, stop_gradient=True) saved_variance = self._helper.create_variable_for_type_inference( dtype=self._dtype, stop_gradient=True) - sync_batch_norm_out = input if self._in_place else self._helper.create_variable_for_type_inference( + sync_batch_norm_out = self._helper.create_variable_for_type_inference( self._dtype) outputs = { @@ -3415,6 +3406,10 @@ class Flatten(layers.Layer): start_axis(int): first dim to flatten (default = 1) stop_axis(int): last dim to flatten (default = -1). + Returns: + None + + Examples: .. code-block:: python diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 9da70e85f01c0..91186b2e95ae0 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -283,6 +283,24 @@ def test_layer_norm(self): with self.assertRaises(ValueError): lm(base.to_variable(inp)) + def test_SyncBatchNorm(self): + if core.is_compiled_with_cuda(): + with self.static_graph(): + t = layers.data(name='t', shape=[-1, 3, 5, 5], dtype='float32') + my_sync_bn = nn.SyncBatchNorm(3) + ret = my_sync_bn(t) + static_ret = self.get_static_graph_result( + feed={'t': np.ones( + [3, 3, 5, 5], dtype='float32')}, + fetch_list=[ret])[0] + + with self.dynamic_graph(): + t = np.ones([3, 3, 5, 5], dtype='float32') + my_syncbn = paddle.nn.SyncBatchNorm(3) + dy_ret = my_syncbn(base.to_variable(t)) + dy_ret_value = dy_ret.numpy() + self.assertTrue(np.array_equal(static_ret, static_ret)) + def test_relu(self): with self.static_graph(): t = layers.data(name='t', shape=[3, 3], dtype='float32') diff --git a/python/paddle/fluid/tests/unittests/test_parallel_dygraph_sync_batch_norm.py b/python/paddle/fluid/tests/unittests/test_parallel_dygraph_sync_batch_norm.py index 7d48750b88eb8..84e97127f4868 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_dygraph_sync_batch_norm.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_dygraph_sync_batch_norm.py @@ -25,7 +25,7 @@ class TestParallelDygraphMnist(TestDistBase): def _setup_config(self): self._sync_mode = False self._nccl2_mode = True - self._dygraph = True + self._dygraph = False #True def test_mnist(self): if fluid.core.is_compiled_with_cuda(): diff --git a/python/paddle/fluid/tests/unittests/test_sync_batch_norm_op.py b/python/paddle/fluid/tests/unittests/test_sync_batch_norm_op.py index 8fd118c019303..806b6b90e7e2d 100644 --- a/python/paddle/fluid/tests/unittests/test_sync_batch_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_sync_batch_norm_op.py @@ -25,6 +25,7 @@ import paddle.fluid.core as core import paddle.fluid as fluid from paddle.fluid import compiler +from paddle.fluid import Program, program_guard from op_test import OpTest, _set_use_system_allocator @@ -202,5 +203,22 @@ def setUp(self): self.atol = 1e-2 +class TestDygraphSyncBatchNormAPIError(unittest.TestCase): + def test_errors(self): + if not core.is_compiled_with_cuda(): + return + + with program_guard(Program(), Program()): + my_sync_batch_norm = fluid.dygraph.SyncBatchNorm(10) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CUDAPlace(0)) + self.assertRaises(TypeError, my_sync_batch_norm, x1) + + # the input dtype of SyncBatchNorm must be float16 or float32 or float64 + # float16 only can be set on GPU place + x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="int32") + self.assertRaises(TypeError, my_sync_batch_norm, x2) + + if __name__ == '__main__': unittest.main()