Skip to content

Commit

Permalink
address review comments, test=develop
Browse files Browse the repository at this point in the history
  • Loading branch information
qili93 committed Aug 17, 2020
1 parent d333d8e commit 9bbe51a
Show file tree
Hide file tree
Showing 3 changed files with 62 additions and 83 deletions.
98 changes: 31 additions & 67 deletions python/paddle/fluid/tests/unittests/test_activation_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,8 +138,8 @@ def test_static_api(self):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [11, 17])
out1 = F.logsigmoid(x)
logsigmoid = paddle.nn.LogSigmoid()
out2 = logsigmoid(x)
m = paddle.nn.LogSigmoid()
out2 = m(x)
exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
Expand All @@ -150,22 +150,13 @@ def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_variable(self.x_np)
out1 = F.logsigmoid(x)
logsigmoid = paddle.nn.LogSigmoid()
out2 = logsigmoid(x)
m = paddle.nn.LogSigmoid()
out2 = m(x)
out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
for r in [out1, out2]:
self.assertEqual(np.allclose(out_ref, r.numpy()), True)
paddle.enable_static()

def test_fluid_api(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', [11, 17])
out = fluid.layers.logsigmoid(x)
exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
self.assertEqual(np.allclose(out_ref, res[0]), True)

def test_errors(self):
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
Expand Down Expand Up @@ -715,8 +706,8 @@ def test_static_api(self):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [10, 12])
out1 = F.relu(x)
relu = paddle.nn.ReLU()
out2 = relu(x)
m = paddle.nn.ReLU()
out2 = m(x)
exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
out_ref = np.maximum(self.x_np, 0)
Expand All @@ -727,22 +718,13 @@ def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_variable(self.x_np)
out1 = F.relu(x)
relu = paddle.nn.ReLU()
out2 = relu(x)
m = paddle.nn.ReLU()
out2 = m(x)
out_ref = np.maximum(self.x_np, 0)
for r in [out1, out2]:
self.assertEqual(np.allclose(out_ref, r.numpy()), True)
paddle.enable_static()

def test_fluid_api(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', [10, 12])
out = fluid.layers.relu(x)
exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = np.maximum(self.x_np, 0)
self.assertEqual(np.allclose(out_ref, res[0]), True)

def test_errors(self):
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
Expand Down Expand Up @@ -788,7 +770,7 @@ def test_errors(self):
fluid.layers.leaky_relu(x_fp16)


def ref_gelu(x, approximate):
def gelu(x, approximate):
if approximate:
y_ref = 0.5 * x * (1.0 + np.tanh(
np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
Expand All @@ -803,7 +785,7 @@ def setUp(self):
self.init_dtype()
approximate = True
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
out = ref_gelu(x, approximate)
out = gelu(x, approximate)

self.inputs = {'X': x}
self.outputs = {'Out': out}
Expand All @@ -821,7 +803,7 @@ def setUp(self):
self.init_dtype()
approximate = False
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
out = ref_gelu(x, approximate)
out = gelu(x, approximate)

self.inputs = {'X': x}
self.outputs = {'Out': out}
Expand All @@ -844,41 +826,32 @@ def test_static_api(self):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [11, 17])
out1 = F.gelu(x)
gelu = paddle.nn.GELU()
out2 = gelu(x)
m = paddle.nn.GELU()
out2 = m(x)
exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
out_ref = ref_gelu(self.x_np, False)
out_ref = gelu(self.x_np, False)
for r in res:
self.assertEqual(np.allclose(out_ref, r), True)

def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_variable(self.x_np)
out1 = F.gelu(x)
gelu = paddle.nn.GELU()
out2 = gelu(x)
out_ref = ref_gelu(self.x_np, False)
m = paddle.nn.GELU()
out2 = m(x)
out_ref = gelu(self.x_np, False)
for r in [out1, out2]:
self.assertEqual(np.allclose(out_ref, r.numpy()), True)

out1 = F.gelu(x, True)
gelu = paddle.nn.GELU(True)
out2 = gelu(x)
out_ref = ref_gelu(self.x_np, True)
m = paddle.nn.GELU(True)
out2 = m(x)
out_ref = gelu(self.x_np, True)
for r in [out1, out2]:
self.assertEqual(np.allclose(out_ref, r.numpy()), True)
paddle.enable_static()

def test_fluid_api(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', [11, 17])
out = fluid.layers.gelu(x)
exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_gelu(self.x_np, False)
self.assertEqual(np.allclose(out_ref, res[0]), True)

def test_errors(self):
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
Expand Down Expand Up @@ -1040,7 +1013,7 @@ def test_errors(self):
fluid.layers.soft_relu(x_fp16)


def ref_elu(x, alpha):
def elu(x, alpha):
out_ref = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
return out_ref.astype(x.dtype)

Expand All @@ -1052,7 +1025,7 @@ def setUp(self):

x = np.random.uniform(-3, 3, [10, 12]).astype(self.dtype)
alpha = 1.
out = ref_elu(x, alpha)
out = elu(x, alpha)
# Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
# is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
self.inputs = {'X': x}
Expand All @@ -1076,41 +1049,32 @@ def test_static_api(self):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [10, 12])
out1 = F.elu(x)
elu = paddle.nn.ELU()
out2 = elu(x)
m = paddle.nn.ELU()
out2 = m(x)
exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
out_ref = ref_elu(self.x_np, 1.0)
out_ref = elu(self.x_np, 1.0)
for r in res:
self.assertEqual(np.allclose(out_ref, r), True)

def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_variable(self.x_np)
out1 = F.elu(x)
elu = paddle.nn.ELU()
out2 = elu(x)
out_ref = ref_elu(self.x_np, 1.0)
m = paddle.nn.ELU()
out2 = m(x)
out_ref = elu(self.x_np, 1.0)
for r in [out1, out2]:
self.assertEqual(np.allclose(out_ref, r.numpy()), True)

out1 = F.elu(x, 0.2)
elu = paddle.nn.ELU(0.2)
out2 = elu(x)
out_ref = ref_elu(self.x_np, 0.2)
m = paddle.nn.ELU(0.2)
out2 = m(x)
out_ref = elu(self.x_np, 0.2)
for r in [out1, out2]:
self.assertEqual(np.allclose(out_ref, r.numpy()), True)
paddle.enable_static()

def test_fluid_api(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', [10, 12])
out = fluid.layers.elu(x)
exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_elu(self.x_np, 1.0)
self.assertEqual(np.allclose(out_ref, res[0]), True)

def test_errors(self):
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
Expand Down
15 changes: 11 additions & 4 deletions python/paddle/nn/functional/activation.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,10 +141,17 @@ def gelu(x, approximate=False, name=None):
paddle.disable_static()
x = paddle.to_tensor(np.array([[ 0.87165993, -1.0541513 , -0.37214822],
[ 0.15647964, 0.32496083, 0.33045998]]))
out = F.gelu(x) # [[ 0.70456535, -0.15380788, -0.13207214],
# [ 0.08796856, 0.20387867, 0.2080159 ]]
data = np.random.randn(2, 3).astype("float32")
x = paddle.to_tensor(data)
out = F.gelu(x)
data
# array([[ 0.87165993, -1.0541513 , -0.37214822],
# [ 0.15647964, 0.32496083, 0.33045998]], dtype=float32)
out
# array([[ 0.70456535, -0.15380788, -0.13207214],
# [ 0.08796856, 0.20387867, 0.2080159 ]], dtype=float32)
"""

if in_dygraph_mode():
Expand Down
32 changes: 20 additions & 12 deletions python/paddle/nn/layer/activation.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ class ELU(layers.Layer):
.. math::
elu(x) = max(0, x) + min(0, \\alpha * (e^{x}-1))
ELU(x) = max(0, x) + min(0, \\alpha * (e^{x}-1))
Parameters:
alpha (float, optional): The 'alpha' value of the ELU formulation. Default is 1.0.
Expand All @@ -61,8 +61,9 @@ class ELU(layers.Layer):
x = paddle.to_tensor(np.array([[-1,6],[1,15.6]]))
m = paddle.nn.ELU(0.2)
out = m(x) # [[-0.12642411 6. ]
# [ 1. 15.6 ]]
out = m(x)
# [[-0.12642411 6. ]
# [ 1. 15.6 ]]
"""

def __init__(self, alpha=1.0, name=None):
Expand All @@ -82,13 +83,13 @@ class GELU(layers.Layer):
.. math::
gelu(x) = 0.5 * x * (1 + tanh(\\sqrt{\\frac{2}{\\pi}} * (x + 0.044715x^{3})))
GELU(x) = 0.5 * x * (1 + tanh(\\sqrt{\\frac{2}{\\pi}} * (x + 0.044715x^{3})))
else
.. math::
gelu(x) = 0.5 * x * (1 + erf(\\frac{x}{\\sqrt{2}}))
GELU(x) = 0.5 * x * (1 + erf(\\frac{x}{\\sqrt{2}}))
Parameters:
approximate (bool, optional): Wether to enable approximation. Default is False.
Expand All @@ -107,11 +108,18 @@ class GELU(layers.Layer):
paddle.disable_static()
x = paddle.to_tensor(np.array([[ 0.87165993, -1.0541513 , -0.37214822],
[ 0.15647964, 0.32496083, 0.33045998]]))
data = np.random.randn(2, 3).astype("float32")
x = paddle.to_tensor(data)
m = paddle.nn.GELU()
out = m(x) # [[ 0.70456535, -0.15380788, -0.13207214],
# [ 0.08796856, 0.20387867, 0.2080159 ]]
out = m(x)
data
# array([[ 0.87165993, -1.0541513 , -0.37214822],
# [ 0.15647964, 0.32496083, 0.33045998]], dtype=float32)
out
# array([[ 0.70456535, -0.15380788, -0.13207214],
# [ 0.08796856, 0.20387867, 0.2080159 ]], dtype=float32)
"""

def __init__(self, approximate=False, name=None):
Expand Down Expand Up @@ -312,7 +320,7 @@ class ReLU(layers.Layer):
.. math:
out = max(x, 0)
ReLU(x) = max(x, 0)
Parameters:
name (str, optional): Name for the operation (optional, default is None).
Expand All @@ -332,7 +340,7 @@ class ReLU(layers.Layer):
x = paddle.to_tensor(np.array([-2, 0, 1]).astype('float32'))
m = paddle.nn.ReLU()
out = m(x) # [0, 0, 1]
out = m(x) # [0., 0., 1.]
"""

def __init__(self, name=None):
Expand Down Expand Up @@ -429,7 +437,7 @@ class LogSigmoid(layers.Layer):
.. math:
logsigmoid(x) = \log \frac{1}{1 + e^{-x}}
LogSigmoid(x) = \log \frac{1}{1 + e^{-x}}
Parameters:
x (Tensor): The input Tensor with data type float32, or float64.
Expand Down

0 comments on commit 9bbe51a

Please sign in to comment.