diff --git a/python/paddle/tensor/stat.py b/python/paddle/tensor/stat.py index 42b67a4144c51..cd1866d483253 100644 --- a/python/paddle/tensor/stat.py +++ b/python/paddle/tensor/stat.py @@ -740,7 +740,7 @@ def _compute_quantile( elif isinstance(q, (list, tuple)): if len(q) <= 0: raise ValueError("q should not be empty") - elif isinstance(q, Variable): + elif isinstance(q, (Variable, paddle.pir.Value)): if len(q.shape) > 1: raise ValueError("q should be a 0-D tensor or a 1-D tensor") if len(q.shape) == 0: @@ -751,7 +751,9 @@ def _compute_quantile( ) for q_num in q: # we do not validate tensor q in static mode - if not in_dynamic_or_pir_mode() and isinstance(q_num, Variable): + if not in_dynamic_mode() and isinstance( + q_num, (Variable, paddle.pir.Value) + ): break if q_num < 0 or q_num > 1: raise ValueError("q should be in range [0, 1]") diff --git a/test/deprecated/legacy_test/test_quantile_and_nanquantile.py b/test/legacy_test/test_quantile_and_nanquantile.py similarity index 95% rename from test/deprecated/legacy_test/test_quantile_and_nanquantile.py rename to test/legacy_test/test_quantile_and_nanquantile.py index e28bcd1f56964..ef6b85d1f6327 100644 --- a/test/deprecated/legacy_test/test_quantile_and_nanquantile.py +++ b/test/legacy_test/test_quantile_and_nanquantile.py @@ -165,17 +165,24 @@ def check_grad(x, q, axis, target_gard, apis=None): opt.minimize(loss) exe = paddle.static.Executor() exe.run(paddle.static.default_startup_program()) - o = exe.run( - paddle.static.default_main_program(), - feed={"x": x, "q": np.array(q, dtype="float32")}, - fetch_list=["x@GRAD"], - )[0] - np.testing.assert_allclose( - o, - np.array(target_gard, dtype="float32"), - rtol=1e-05, - equal_nan=True, - ) + if paddle.framework.use_pir_api(): + o = exe.run( + paddle.static.default_main_program(), + feed={"x": x, "q": np.array(q, dtype="float32")}, + fetch_list=[], + ) + else: + o = exe.run( + paddle.static.default_main_program(), + feed={"x": x, "q": np.array(q, dtype="float32")}, + fetch_list=["x@GRAD"], + )[0] + np.testing.assert_allclose( + o, + np.array(target_gard, dtype="float32"), + rtol=1e-05, + equal_nan=True, + ) paddle.disable_static() check_grad([1, 2, 3], 0.5, 0, [0, 1, 0])