diff --git a/python/paddle/base/layer_helper_base.py b/python/paddle/base/layer_helper_base.py index 760986aa3c963..74ba6408ef8d8 100644 --- a/python/paddle/base/layer_helper_base.py +++ b/python/paddle/base/layer_helper_base.py @@ -97,7 +97,9 @@ def to_variable(self, value, name=None): name if name else None, True, ) - elif isinstance(value, (Variable, core.eager.Tensor)): + elif isinstance( + value, (Variable, core.eager.Tensor, paddle.pir.OpResult) + ): return value else: raise TypeError( diff --git a/python/paddle/nn/functional/distance.py b/python/paddle/nn/functional/distance.py index dc69092daed08..113df166a027a 100644 --- a/python/paddle/nn/functional/distance.py +++ b/python/paddle/nn/functional/distance.py @@ -14,7 +14,7 @@ import paddle from paddle import _C_ops -from paddle.framework import in_dynamic_mode +from paddle.framework import in_dynamic_or_pir_mode from ...base.data_feeder import check_type, check_variable_and_dtype from ...base.layer_helper import LayerHelper @@ -67,13 +67,11 @@ def pairwise_distance(x, y, p=2.0, epsilon=1e-6, keepdim=False, name=None): Tensor(shape=[2], dtype=float64, place=Place(cpu), stop_gradient=True, [4.99999860, 4.99999860]) """ - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): sub = _C_ops.subtract(x, y) # p_norm op has not used epsilon, so change it to the following. if epsilon != 0.0: - epsilon = paddle.base.dygraph.base.to_variable( - [epsilon], dtype=sub.dtype - ) + epsilon = paddle.to_tensor([epsilon], dtype=sub.dtype) sub = _C_ops.add(sub, epsilon) return _C_ops.p_norm(sub, p, -1, 0.0, keepdim, False) diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 0cfe560fdc068..203b98c78683b 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -3518,7 +3518,6 @@ def broadcast_to(x, shape, name=None): [[1, 2, 3], [1, 2, 3]]) """ - if in_dynamic_mode(): return _C_ops.expand(x, shape) elif in_pir_mode(): @@ -4918,7 +4917,7 @@ def take_along_axis(arr, indices, axis): if not broadcast_shape: # if indices matrix have larger size than arr, arr should broadcast into indices shape. broadcast_shape = indices.shape - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): indices = paddle.broadcast_to(indices, broadcast_shape) broadcast_shape_list = list(broadcast_shape) broadcast_shape_list[axis] = list(arr.shape)[axis] diff --git a/test/legacy_test/test_broadcast_to_op.py b/test/legacy_test/test_broadcast_to_op.py index 7c8166919a66c..5e2bb7c1ed161 100644 --- a/test/legacy_test/test_broadcast_to_op.py +++ b/test/legacy_test/test_broadcast_to_op.py @@ -18,8 +18,8 @@ import paddle from paddle import base -from paddle.base import Program, program_guard from paddle.pir_utils import test_with_pir_api +from paddle.static import Program, program_guard paddle.enable_static() @@ -45,34 +45,36 @@ class TestBroadcastToAPI(unittest.TestCase): # base.backward.calc_gradient maybe not support pir # AttributeError: 'paddle.base.libpaddle.pir.Program' object has no attribute '_appending_grad_times' def test_api(self): - input = np.random.random([12, 14]).astype("float32") - x = paddle.static.data(name='x', shape=[12, 14], dtype="float32") - - positive_2 = paddle.tensor.fill_constant([1], "int32", 12) - expand_shape = paddle.static.data( - name="expand_shape", - shape=[2], - dtype="int32", - ) - - out_1 = paddle.broadcast_to(x, shape=[12, 14]) - out_2 = paddle.broadcast_to(x, shape=[positive_2, 14]) - out_3 = paddle.broadcast_to(x, shape=expand_shape) - - g0 = base.backward.calc_gradient(out_2, x) - - exe = base.Executor(place=base.CPUPlace()) - res_1, res_2, res_3 = exe.run( - base.default_main_program(), - feed={ - "x": input, - "expand_shape": np.array([12, 14]).astype("int32"), - }, - fetch_list=[out_1, out_2, out_3], - ) - np.testing.assert_array_equal(res_1, np.tile(input, (1, 1))) - np.testing.assert_array_equal(res_2, np.tile(input, (1, 1))) - np.testing.assert_array_equal(res_3, np.tile(input, (1, 1))) + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + input = np.random.random([12, 14]).astype("float32") + x = paddle.static.data(name='x', shape=[12, 14], dtype="float32") + + positive_2 = paddle.tensor.fill_constant([1], "int32", 12) + expand_shape = paddle.static.data( + name="expand_shape", + shape=[2], + dtype="int32", + ) + + out_1 = paddle.broadcast_to(x, shape=[12, 14]) + out_2 = paddle.broadcast_to(x, shape=[positive_2, 14]) + out_3 = paddle.broadcast_to(x, shape=expand_shape) + + g0 = base.backward.calc_gradient(out_2, x) + + exe = base.Executor(place=base.CPUPlace()) + res_1, res_2, res_3 = exe.run( + feed={ + "x": input, + "expand_shape": np.array([12, 14]).astype("int32"), + }, + fetch_list=[out_1, out_2, out_3], + ) + np.testing.assert_array_equal(res_1, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_2, np.tile(input, (1, 1))) + np.testing.assert_array_equal(res_3, np.tile(input, (1, 1))) @test_with_pir_api def test_api_fp16_gpu(self): diff --git a/test/legacy_test/test_pairwise_distance.py b/test/legacy_test/test_pairwise_distance.py index 2f9199f48c04a..099b5ed085065 100644 --- a/test/legacy_test/test_pairwise_distance.py +++ b/test/legacy_test/test_pairwise_distance.py @@ -18,6 +18,7 @@ import paddle from paddle import base +from paddle.pir_utils import test_with_pir_api def np_pairwise_distance(x, y, p=2.0, epsilon=1e-6, keepdim=False): @@ -77,6 +78,7 @@ def test_static( def test_dygraph( place, x_np, y_np, p=2.0, epsilon=1e-6, keepdim=False, functional=False ): + paddle.disable_static() x = paddle.to_tensor(x_np) y = paddle.to_tensor(y_np) if functional: @@ -88,6 +90,7 @@ def test_dygraph( x=x, y=y, p=p, epsilon=epsilon, keepdim=keepdim ) dygraph_ret = dy_distance.numpy() + paddle.enable_static() return dygraph_ret @@ -109,14 +112,6 @@ def test_pairwise_distance(self): x_np = np.random.random(shape).astype(dtype) y_np = np.random.random(shape).astype(dtype) - static_ret = test_static( - place, - x_np, - y_np, - p, - epsilon=epsilon, - keepdim=keepdim, - ) dygraph_ret = test_dygraph( place, x_np, @@ -129,27 +124,14 @@ def test_pairwise_distance(self): x_np, y_np, p, epsilon=epsilon, keepdim=keepdim ) - self.assertEqual( - static_ret.shape, excepted_value.shape - ) self.assertEqual( dygraph_ret.shape, excepted_value.shape ) - np.testing.assert_allclose( - static_ret, excepted_value, rtol=1e-05 - ) np.testing.assert_allclose( dygraph_ret, excepted_value, rtol=1e-05 ) - static_functional_ret = test_static( - place, - x_np, - y_np, - p, - epsilon=epsilon, - keepdim=keepdim, - ) + dygraph_functional_ret = test_dygraph( place, x_np, @@ -159,26 +141,58 @@ def test_pairwise_distance(self): keepdim=keepdim, ) - self.assertEqual( - static_functional_ret.shape, - excepted_value.shape, - ) self.assertEqual( dygraph_functional_ret.shape, excepted_value.shape, ) - np.testing.assert_allclose( - static_functional_ret, - excepted_value, - rtol=1e-05, - ) np.testing.assert_allclose( dygraph_functional_ret, excepted_value, rtol=1e-05, ) + @test_with_pir_api + def dynamic_and_pir_mode_test(): + static_ret = test_static( + place, + x_np, + y_np, + p, + epsilon=epsilon, + keepdim=keepdim, + ) + + self.assertEqual( + static_ret.shape, excepted_value.shape + ) + + np.testing.assert_allclose( + static_ret, excepted_value, rtol=1e-05 + ) + + static_functional_ret = test_static( + place, + x_np, + y_np, + p, + epsilon=epsilon, + keepdim=keepdim, + ) + + self.assertEqual( + static_functional_ret.shape, + excepted_value.shape, + ) + + np.testing.assert_allclose( + static_functional_ret, + excepted_value, + rtol=1e-05, + ) + + dynamic_and_pir_mode_test() + def test_pairwise_distance_broadcast_1(self): shape_x = [100, 100] shape_y = [100, 1] @@ -187,9 +201,7 @@ def test_pairwise_distance_broadcast_1(self): place = paddle.CPUPlace() x_np = np.random.random(shape_x).astype('float32') y_np = np.random.random(shape_y).astype('float32') - static_ret = test_static( - place=place, x_np=x_np, y_np=y_np, epsilon=epsilon, keepdim=keepdim - ) + dygraph_ret = test_dygraph( place=place, x_np=x_np, y_np=y_np, epsilon=epsilon, keepdim=keepdim ) @@ -197,20 +209,10 @@ def test_pairwise_distance_broadcast_1(self): x_np, y_np, epsilon=epsilon, keepdim=keepdim ) - self.assertEqual(static_ret.shape, excepted_value.shape) self.assertEqual(dygraph_ret.shape, excepted_value.shape) - np.testing.assert_allclose(static_ret, excepted_value, rtol=1e-05) np.testing.assert_allclose(dygraph_ret, excepted_value, rtol=1e-05) - static_functional_ret = test_static( - place=place, - x_np=x_np, - y_np=y_np, - epsilon=epsilon, - keepdim=keepdim, - functional=True, - ) dygraph_functional_ret = test_dygraph( place=place, x_np=x_np, @@ -220,16 +222,41 @@ def test_pairwise_distance_broadcast_1(self): functional=True, ) - self.assertEqual(static_functional_ret.shape, excepted_value.shape) self.assertEqual(dygraph_functional_ret.shape, excepted_value.shape) - np.testing.assert_allclose( - static_functional_ret, excepted_value, rtol=1e-05 - ) np.testing.assert_allclose( dygraph_functional_ret, excepted_value, rtol=1e-05 ) + @test_with_pir_api + def dynamic_and_pir_mode_test(): + static_ret = test_static( + place=place, + x_np=x_np, + y_np=y_np, + epsilon=epsilon, + keepdim=keepdim, + ) + + self.assertEqual(static_ret.shape, excepted_value.shape) + + np.testing.assert_allclose(static_ret, excepted_value, rtol=1e-05) + static_functional_ret = test_static( + place=place, + x_np=x_np, + y_np=y_np, + epsilon=epsilon, + keepdim=keepdim, + functional=True, + ) + + self.assertEqual(static_functional_ret.shape, excepted_value.shape) + np.testing.assert_allclose( + static_functional_ret, excepted_value, rtol=1e-05 + ) + + dynamic_and_pir_mode_test() + def test_pairwise_distance_broadcast_2(self): shape_x = [100, 100] shape_y = [100] @@ -238,9 +265,7 @@ def test_pairwise_distance_broadcast_2(self): place = paddle.CPUPlace() x_np = np.random.random(shape_x).astype('float32') y_np = np.random.random(shape_y).astype('float32') - static_ret = test_static( - place=place, x_np=x_np, y_np=y_np, epsilon=epsilon, keepdim=keepdim - ) + dygraph_ret = test_dygraph( place=place, x_np=x_np, y_np=y_np, epsilon=epsilon, keepdim=keepdim ) @@ -249,20 +274,10 @@ def test_pairwise_distance_broadcast_2(self): x_np, y_np, epsilon=epsilon, keepdim=keepdim ) - self.assertEqual(static_ret.shape, excepted_value.shape) self.assertEqual(dygraph_ret.shape, excepted_value.shape) - np.testing.assert_allclose(static_ret, excepted_value, rtol=1e-05) np.testing.assert_allclose(dygraph_ret, excepted_value, rtol=1e-05) - static_functional_ret = test_static( - place=place, - x_np=x_np, - y_np=y_np, - epsilon=epsilon, - keepdim=keepdim, - functional=True, - ) dygraph_functional_ret = test_dygraph( place=place, x_np=x_np, @@ -272,16 +287,44 @@ def test_pairwise_distance_broadcast_2(self): functional=True, ) - self.assertEqual(static_functional_ret.shape, excepted_value.shape) self.assertEqual(dygraph_functional_ret.shape, excepted_value.shape) - np.testing.assert_allclose( - static_functional_ret, excepted_value, rtol=1e-05 - ) np.testing.assert_allclose( dygraph_functional_ret, excepted_value, rtol=1e-05 ) + @test_with_pir_api + def dynamic_and_pir_mode_test(): + static_ret = test_static( + place=place, + x_np=x_np, + y_np=y_np, + epsilon=epsilon, + keepdim=keepdim, + ) + + self.assertEqual(static_ret.shape, excepted_value.shape) + + np.testing.assert_allclose(static_ret, excepted_value, rtol=1e-05) + + static_functional_ret = test_static( + place=place, + x_np=x_np, + y_np=y_np, + epsilon=epsilon, + keepdim=keepdim, + functional=True, + ) + + self.assertEqual(static_functional_ret.shape, excepted_value.shape) + + np.testing.assert_allclose( + static_functional_ret, excepted_value, rtol=1e-05 + ) + + dynamic_and_pir_mode_test() + + @test_with_pir_api def test_pairwise_distance_fp16(self): shape = [100, 100] if not paddle.device.is_compiled_with_cuda(): diff --git a/test/legacy_test/test_take_along_axis_op.py b/test/legacy_test/test_take_along_axis_op.py index b86bb0222ec7e..54aa0388c7541 100644 --- a/test/legacy_test/test_take_along_axis_op.py +++ b/test/legacy_test/test_take_along_axis_op.py @@ -19,6 +19,7 @@ import paddle from paddle.framework import core +from paddle.pir_utils import test_with_pir_api paddle.enable_static() @@ -43,10 +44,12 @@ def setUp(self): self.outputs = {'Result': self.target} def test_check_output(self): - self.check_output(check_cinn=self.check_cinn) + self.check_output(check_cinn=self.check_cinn, check_pir=True) def test_check_grad(self): - self.check_grad(['Input'], 'Result', check_cinn=self.check_cinn) + self.check_grad( + ['Input'], 'Result', check_cinn=self.check_cinn, check_pir=True + ) def init_data(self): self.x_type = "float64" @@ -101,11 +104,17 @@ def setUp(self): self.place = core.CUDAPlace(0) def test_check_output(self): - self.check_output_with_place(self.place, check_cinn=self.check_cinn) + self.check_output_with_place( + self.place, check_cinn=self.check_cinn, check_pir=True + ) def test_check_grad(self): self.check_grad_with_place( - self.place, ['Input'], 'Result', check_cinn=self.check_cinn + self.place, + ['Input'], + 'Result', + check_cinn=self.check_cinn, + check_pir=True, ) def init_data(self): @@ -142,6 +151,7 @@ def setUp(self): if core.is_compiled_with_cuda(): self.place.append(paddle.CUDAPlace(0)) + @test_with_pir_api def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()):