Skip to content

Commit

Permalink
【PIR API adaptor No.126、130】 Migrate lgamma, log1p into pir (PaddlePa…
Browse files Browse the repository at this point in the history
  • Loading branch information
MarioLulab authored and SecretXV committed Nov 28, 2023
1 parent f6423c5 commit 75c0f83
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 14 deletions.
4 changes: 2 additions & 2 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -3287,7 +3287,7 @@ def log1p(x, name=None):
[0.69314718]])
"""

if in_dynamic_mode():
if in_dynamic_or_pir_mode():
return _C_ops.log1p(x)
else:
check_variable_and_dtype(
Expand Down Expand Up @@ -5042,7 +5042,7 @@ def lgamma(x, name=None):
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[1.31452453, 1.76149762, 2.25271273, 1.09579790])
"""
if in_dynamic_mode():
if in_dynamic_or_pir_mode():
return _C_ops.lgamma(x)
else:
check_variable_and_dtype(
Expand Down
20 changes: 14 additions & 6 deletions test/legacy_test/test_activation_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -3648,13 +3648,17 @@ def setUp(self):
self.outputs = {'Out': out}
self.convert_input_output()

def test_check_output(self):
self.check_output(check_pir=True)

def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_pir=True)


class Test_Log1p_Op_Fp16(unittest.TestCase):
@test_with_pir_api
def test_api_fp16(self):
with static_guard():
with static.program_guard(
Expand All @@ -3680,6 +3684,7 @@ def test_api_int(self):
np.testing.assert_allclose(y.numpy(), x_expect, rtol=1e-3)
paddle.enable_static()

@test_with_pir_api
def test_api_bf16(self):
with static_guard():
with static.program_guard(
Expand All @@ -3700,9 +3705,12 @@ def init_shape(self):


class TestLog1pAPI(unittest.TestCase):
@test_with_pir_api
def test_api(self):
with static_guard():
with base.program_guard(base.Program(), base.Program()):
with base.program_guard(
paddle.static.Program(), paddle.static.Program()
):
input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
data_x = paddle.static.data(
name="data_x",
Expand All @@ -3712,9 +3720,9 @@ def test_api(self):

out1 = paddle.log1p(data_x)
exe = base.Executor(place=base.CPUPlace())
exe.run(base.default_startup_program())
exe.run(paddle.static.default_startup_program())
(res1,) = exe.run(
base.default_main_program(),
paddle.static.default_main_program(),
feed={"data_x": input_x},
fetch_list=[out1],
)
Expand Down Expand Up @@ -4860,7 +4868,7 @@ def test_check_grad(self):
else:
create_test_act_fp16_class(TestLog2, check_pir=True)
create_test_act_fp16_class(TestLog10, check_pir=True)
create_test_act_fp16_class(TestLog1p)
create_test_act_fp16_class(TestLog1p, check_pir=True)
create_test_act_fp16_class(TestSquare, check_pir=True)
create_test_act_fp16_class(TestPow, check_prim=True, check_prim_pir=True)
create_test_act_fp16_class(TestPow_API)
Expand Down Expand Up @@ -5015,7 +5023,7 @@ def test_check_grad(self):
else:
create_test_act_bf16_class(TestLog2, check_pir=True)
create_test_act_bf16_class(TestLog10, check_pir=True)
create_test_act_bf16_class(TestLog1p)
create_test_act_bf16_class(TestLog1p, check_pir=True)
create_test_act_bf16_class(TestSquare, check_pir=True)
create_test_act_bf16_class(TestPow, check_prim=True)
create_test_act_bf16_class(TestPow_API)
Expand Down
14 changes: 8 additions & 6 deletions test/legacy_test/test_lgamma_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,26 +43,26 @@ def init_dtype_type(self):
self.dtype = np.float64

def test_check_output(self):
self.check_output()
self.check_output(check_pir=True)

def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', numeric_grad_delta=1e-7)
self.check_grad(['X'], 'Out', numeric_grad_delta=1e-7, check_pir=True)


class TestLgammaOpFp32(TestLgammaOp):
def init_dtype_type(self):
self.dtype = np.float32

def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', numeric_grad_delta=0.005)
self.check_grad(['X'], 'Out', numeric_grad_delta=0.005, check_pir=True)


class TestLgammaFP16Op(TestLgammaOp):
def init_dtype_type(self):
self.dtype = np.float16

def test_check_grad_normal(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_pir=True)


@unittest.skipIf(
Expand All @@ -86,10 +86,12 @@ def setUp(self):

def test_check_output(self):
# After testing, bfloat16 needs to set the parameter place
self.check_output_with_place(core.CUDAPlace(0))
self.check_output_with_place(core.CUDAPlace(0), check_pir=True)

def test_check_grad_normal(self):
self.check_grad_with_place(core.CUDAPlace(0), ['X'], 'Out')
self.check_grad_with_place(
core.CUDAPlace(0), ['X'], 'Out', check_pir=True
)


class TestLgammaOpApi(unittest.TestCase):
Expand Down

0 comments on commit 75c0f83

Please sign in to comment.