Skip to content

Commit

Permalink
【PIR API adaptor No.218、223、29】Migrate PairwiseDistance / take_along_…
Browse files Browse the repository at this point in the history
…axis into pir (PaddlePaddle#58689)

* PairwiseDistance take_along_axis

* add OpResult

* fix epsilon to_variable && rm test_with_pir_api of test_static

* disable_static

* add broadcast_to test

* rm test_errors of test_broadcast_to_op.TestBroadcastToError

* add some utest
  • Loading branch information
DrRyanHuang authored and SecretXV committed Nov 28, 2023
1 parent 5fbb116 commit 2d3b79f
Show file tree
Hide file tree
Showing 6 changed files with 160 additions and 106 deletions.
4 changes: 3 additions & 1 deletion python/paddle/base/layer_helper_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,9 @@ def to_variable(self, value, name=None):
name if name else None,
True,
)
elif isinstance(value, (Variable, core.eager.Tensor)):
elif isinstance(
value, (Variable, core.eager.Tensor, paddle.pir.OpResult)
):
return value
else:
raise TypeError(
Expand Down
8 changes: 3 additions & 5 deletions python/paddle/nn/functional/distance.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

import paddle
from paddle import _C_ops
from paddle.framework import in_dynamic_mode
from paddle.framework import in_dynamic_or_pir_mode

from ...base.data_feeder import check_type, check_variable_and_dtype
from ...base.layer_helper import LayerHelper
Expand Down Expand Up @@ -67,13 +67,11 @@ def pairwise_distance(x, y, p=2.0, epsilon=1e-6, keepdim=False, name=None):
Tensor(shape=[2], dtype=float64, place=Place(cpu), stop_gradient=True,
[4.99999860, 4.99999860])
"""
if in_dynamic_mode():
if in_dynamic_or_pir_mode():
sub = _C_ops.subtract(x, y)
# p_norm op has not used epsilon, so change it to the following.
if epsilon != 0.0:
epsilon = paddle.base.dygraph.base.to_variable(
[epsilon], dtype=sub.dtype
)
epsilon = paddle.to_tensor([epsilon], dtype=sub.dtype)
sub = _C_ops.add(sub, epsilon)
return _C_ops.p_norm(sub, p, -1, 0.0, keepdim, False)

Expand Down
3 changes: 1 addition & 2 deletions python/paddle/tensor/manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -3518,7 +3518,6 @@ def broadcast_to(x, shape, name=None):
[[1, 2, 3],
[1, 2, 3]])
"""

if in_dynamic_mode():
return _C_ops.expand(x, shape)
elif in_pir_mode():
Expand Down Expand Up @@ -4918,7 +4917,7 @@ def take_along_axis(arr, indices, axis):
if not broadcast_shape:
# if indices matrix have larger size than arr, arr should broadcast into indices shape.
broadcast_shape = indices.shape
if in_dynamic_mode():
if in_dynamic_or_pir_mode():
indices = paddle.broadcast_to(indices, broadcast_shape)
broadcast_shape_list = list(broadcast_shape)
broadcast_shape_list[axis] = list(arr.shape)[axis]
Expand Down
60 changes: 31 additions & 29 deletions test/legacy_test/test_broadcast_to_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@

import paddle
from paddle import base
from paddle.base import Program, program_guard
from paddle.pir_utils import test_with_pir_api
from paddle.static import Program, program_guard

paddle.enable_static()

Expand All @@ -45,34 +45,36 @@ class TestBroadcastToAPI(unittest.TestCase):
# base.backward.calc_gradient maybe not support pir
# AttributeError: 'paddle.base.libpaddle.pir.Program' object has no attribute '_appending_grad_times'
def test_api(self):
input = np.random.random([12, 14]).astype("float32")
x = paddle.static.data(name='x', shape=[12, 14], dtype="float32")

positive_2 = paddle.tensor.fill_constant([1], "int32", 12)
expand_shape = paddle.static.data(
name="expand_shape",
shape=[2],
dtype="int32",
)

out_1 = paddle.broadcast_to(x, shape=[12, 14])
out_2 = paddle.broadcast_to(x, shape=[positive_2, 14])
out_3 = paddle.broadcast_to(x, shape=expand_shape)

g0 = base.backward.calc_gradient(out_2, x)

exe = base.Executor(place=base.CPUPlace())
res_1, res_2, res_3 = exe.run(
base.default_main_program(),
feed={
"x": input,
"expand_shape": np.array([12, 14]).astype("int32"),
},
fetch_list=[out_1, out_2, out_3],
)
np.testing.assert_array_equal(res_1, np.tile(input, (1, 1)))
np.testing.assert_array_equal(res_2, np.tile(input, (1, 1)))
np.testing.assert_array_equal(res_3, np.tile(input, (1, 1)))
with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
input = np.random.random([12, 14]).astype("float32")
x = paddle.static.data(name='x', shape=[12, 14], dtype="float32")

positive_2 = paddle.tensor.fill_constant([1], "int32", 12)
expand_shape = paddle.static.data(
name="expand_shape",
shape=[2],
dtype="int32",
)

out_1 = paddle.broadcast_to(x, shape=[12, 14])
out_2 = paddle.broadcast_to(x, shape=[positive_2, 14])
out_3 = paddle.broadcast_to(x, shape=expand_shape)

g0 = base.backward.calc_gradient(out_2, x)

exe = base.Executor(place=base.CPUPlace())
res_1, res_2, res_3 = exe.run(
feed={
"x": input,
"expand_shape": np.array([12, 14]).astype("int32"),
},
fetch_list=[out_1, out_2, out_3],
)
np.testing.assert_array_equal(res_1, np.tile(input, (1, 1)))
np.testing.assert_array_equal(res_2, np.tile(input, (1, 1)))
np.testing.assert_array_equal(res_3, np.tile(input, (1, 1)))

@test_with_pir_api
def test_api_fp16_gpu(self):
Expand Down
Loading

0 comments on commit 2d3b79f

Please sign in to comment.