From b2a0dd202caa55ac65e9ca958c3d442b02bc9804 Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Tue, 9 Jan 2024 11:14:26 +0000 Subject: [PATCH 1/5] rename data_layout --- paddle/fluid/pir/dialect/operator/ir/ops.yaml | 7 ++--- .../pir/dialect/operator/ir/ops_backward.yaml | 14 +++++----- paddle/phi/api/yaml/backward.yaml | 26 +++++++++---------- paddle/phi/api/yaml/fused_ops.yaml | 2 +- paddle/phi/api/yaml/legacy_backward.yaml | 14 +++++----- paddle/phi/api/yaml/legacy_ops.yaml | 4 +-- paddle/phi/api/yaml/ops.yaml | 12 ++++----- paddle/phi/api/yaml/sparse_backward.yaml | 8 +++--- paddle/phi/api/yaml/sparse_ops.yaml | 4 +-- 9 files changed, 46 insertions(+), 45 deletions(-) diff --git a/paddle/fluid/pir/dialect/operator/ir/ops.yaml b/paddle/fluid/pir/dialect/operator/ir/ops.yaml index 97fa1a6879e0a..bdd4abe3e37c9 100644 --- a/paddle/fluid/pir/dialect/operator/ir/ops.yaml +++ b/paddle/fluid/pir/dialect/operator/ir/ops.yaml @@ -133,7 +133,7 @@ backend : place > output - op : batch_norm - args : (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_layout, bool use_global_stats, bool trainable_statistics) + args : (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_format, bool use_global_stats, bool trainable_statistics) output : Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space) infer_meta: func : BatchNormInferMeta @@ -1100,13 +1100,14 @@ optional : out - op : seed - args : (int seed, bool deterministic, str rng_name, bool force_cpu) + args : (int seed, bool deterministic, str rng_name, Place place) output : Tensor(out) infer_meta: func: SeedInferMeta param: [seed] kernel: func: seed + backend : place - op : send_v2 args : (Tensor x, int ring_id = 0, int peer = 0, bool use_calc_stream = false, bool dynamic_shape = false) @@ -1264,7 +1265,7 @@ backward : swish_grad - op : sync_batch_norm_ - args : (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_layout, bool use_global_stats, bool trainable_statistics) + args : (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_format, bool use_global_stats, bool trainable_statistics) output : Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space) infer_meta : func : BatchNormInferMeta diff --git a/paddle/fluid/pir/dialect/operator/ir/ops_backward.yaml b/paddle/fluid/pir/dialect/operator/ir/ops_backward.yaml index 71124cf559396..7182274828d0f 100644 --- a/paddle/fluid/pir/dialect/operator/ir/ops_backward.yaml +++ b/paddle/fluid/pir/dialect/operator/ir/ops_backward.yaml @@ -82,8 +82,8 @@ inplace : (out_grad -> x_grad) - backward_op : batch_norm_double_grad - forward : batch_norm_grad (Tensor x, Tensor scale, Tensor bias, Tensor out_mean, Tensor out_variance, Tensor saved_mean, Tensor saved_variance, Tensor reserve_space, Tensor grad_out, float momentum, float epsilon, str data_layout, bool is_test, bool use_global_stats, bool trainable_statistics) -> Tensor(grad_x), Tensor(grad_scale), Tensor(grad_bias) - args : (Tensor x, Tensor scale, Tensor out_mean, Tensor out_variance, Tensor saved_mean, Tensor saved_variance, Tensor grad_out, Tensor grad_x_grad, Tensor grad_scale_grad, Tensor grad_bias_grad, float momentum, float epsilon, str data_layout, bool is_test, bool use_global_stats, bool trainable_statistics) + forward : batch_norm_grad (Tensor x, Tensor scale, Tensor bias, Tensor out_mean, Tensor out_variance, Tensor saved_mean, Tensor saved_variance, Tensor reserve_space, Tensor grad_out, float momentum, float epsilon, str data_format, bool is_test, bool use_global_stats, bool trainable_statistics) -> Tensor(grad_x), Tensor(grad_scale), Tensor(grad_bias) + args : (Tensor x, Tensor scale, Tensor out_mean, Tensor out_variance, Tensor saved_mean, Tensor saved_variance, Tensor grad_out, Tensor grad_x_grad, Tensor grad_scale_grad, Tensor grad_bias_grad, float momentum, float epsilon, str data_format, bool is_test, bool use_global_stats, bool trainable_statistics) output : Tensor(x_grad), Tensor(scale_grad), Tensor(grad_out_grad) infer_meta : func : GeneralTernaryGradInferMeta @@ -95,8 +95,8 @@ inplace : (grad_out -> grad_out_grad) - backward_op : batch_norm_grad - forward : batch_norm (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_layout, bool use_global_stats, bool trainable_statistics) -> Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space) - args : (Tensor x, Tensor scale, Tensor bias, Tensor mean_out, Tensor variance_out, Tensor saved_mean, Tensor saved_variance, Tensor reserve_space, Tensor out_grad, float momentum, float epsilon, str data_layout, bool is_test, bool use_global_stats, bool trainable_statistics) + forward : batch_norm (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_format, bool use_global_stats, bool trainable_statistics) -> Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space) + args : (Tensor x, Tensor scale, Tensor bias, Tensor mean_out, Tensor variance_out, Tensor saved_mean, Tensor saved_variance, Tensor reserve_space, Tensor out_grad, float momentum, float epsilon, str data_format, bool is_test, bool use_global_stats, bool trainable_statistics) output : Tensor(x_grad), Tensor(scale_grad), Tensor(bias_grad) infer_meta : func : GeneralTernaryGradInferMeta @@ -105,7 +105,7 @@ func : batch_norm_grad data_type : out_grad optional : scale, bias, mean_out, variance_out, reserve_space - composite: batch_norm_grad(x, scale, bias, mean_out, variance_out, saved_mean, saved_variance, reserve_space, out_grad, momentum, epsilon, data_layout, is_test, use_global_stats, trainable_statistics) + composite: batch_norm_grad(x, scale, bias, mean_out, variance_out, saved_mean, saved_variance, reserve_space, out_grad, momentum, epsilon, data_format, is_test, use_global_stats, trainable_statistics) backward : batch_norm_double_grad - backward_op : c_embedding_grad @@ -827,8 +827,8 @@ inplace : (out_grad -> x_grad) - backward_op : sync_batch_norm_grad - forward : sync_batch_norm_ (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_layout, bool use_global_stats, bool trainable_statistics) -> Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space) - args : (Tensor x, Tensor scale, Tensor bias, Tensor saved_mean, Tensor saved_variance, Tensor reserve_space, Tensor out_grad, float momentum, float epsilon, str data_layout, bool is_test, bool use_global_stats, bool trainable_statistics) + forward : sync_batch_norm_ (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_format, bool use_global_stats, bool trainable_statistics) -> Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space) + args : (Tensor x, Tensor scale, Tensor bias, Tensor saved_mean, Tensor saved_variance, Tensor reserve_space, Tensor out_grad, float momentum, float epsilon, str data_format, bool is_test, bool use_global_stats, bool trainable_statistics) output : Tensor(x_grad), Tensor(scale_grad), Tensor(bias_grad) infer_meta : func : GeneralTernaryGradInferMeta diff --git a/paddle/phi/api/yaml/backward.yaml b/paddle/phi/api/yaml/backward.yaml index d5748145ffe49..c35dab83e26e2 100644 --- a/paddle/phi/api/yaml/backward.yaml +++ b/paddle/phi/api/yaml/backward.yaml @@ -180,8 +180,8 @@ inplace : (out_grad -> input_grad) - backward_op : bicubic_interp_grad - forward : bicubic_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) -> Tensor(output) - args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) + forward : bicubic_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_format="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) -> Tensor(output) + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_format, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) output : Tensor(x_grad) infer_meta : func : UnchangedInferMeta @@ -204,8 +204,8 @@ func : bilinear_grad - backward_op : bilinear_interp_grad - forward : bilinear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) -> Tensor(output) - args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) + forward : bilinear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_format="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) -> Tensor(output) + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_format, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) output : Tensor(x_grad) infer_meta : func : UnchangedInferMeta @@ -992,8 +992,8 @@ data_type : x - backward_op : group_norm_grad - forward : group_norm (Tensor x, Tensor scale, Tensor bias, float epsilon = 1e-5, int groups = -1, str data_layout = "NCHW") -> Tensor(y), Tensor(mean), Tensor(variance) - args : (Tensor x, Tensor scale, Tensor bias, Tensor y, Tensor mean, Tensor variance, Tensor y_grad, float epsilon, int groups, str data_layout) + forward : group_norm (Tensor x, Tensor scale, Tensor bias, float epsilon = 1e-5, int groups = -1, str data_format = "NCHW") -> Tensor(y), Tensor(mean), Tensor(variance) + args : (Tensor x, Tensor scale, Tensor bias, Tensor y, Tensor mean, Tensor variance, Tensor y_grad, float epsilon, int groups, str data_format) output : Tensor(x_grad), Tensor(scale_grad), Tensor(bias_grad) infer_meta : func : GeneralTernaryGradInferMeta @@ -1001,7 +1001,7 @@ kernel : func : group_norm_grad data_type : y_grad - composite : group_norm_grad(x, scale, bias, y, mean, variance, y_grad, epsilon, groups, data_layout, x_grad, scale_grad, bias_grad) + composite : group_norm_grad(x, scale, bias, y, mean, variance, y_grad, epsilon, groups, data_format, x_grad, scale_grad, bias_grad) optional: scale, bias inplace : (y_grad -> x_grad) @@ -1328,8 +1328,8 @@ func : lgamma_grad - backward_op : linear_interp_grad - forward : linear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) -> Tensor(output) - args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) + forward : linear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_format="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) -> Tensor(output) + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_format, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) output : Tensor(x_grad) infer_meta : func : UnchangedInferMeta @@ -1617,8 +1617,8 @@ func : nanmedian_grad - backward_op : nearest_interp_grad - forward : nearest_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) -> Tensor(output) - args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) + forward : nearest_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_format="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) -> Tensor(output) + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_format, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) output : Tensor(x_grad) infer_meta : func : UnchangedInferMeta @@ -2484,8 +2484,8 @@ func : triangular_solve_grad - backward_op : trilinear_interp_grad - forward : trilinear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) -> Tensor(output) - args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) + forward : trilinear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_format="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) -> Tensor(output) + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_format, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) output : Tensor(x_grad) infer_meta : func : UnchangedInferMeta diff --git a/paddle/phi/api/yaml/fused_ops.yaml b/paddle/phi/api/yaml/fused_ops.yaml index 235ddaaacc694..a78cd92b90840 100644 --- a/paddle/phi/api/yaml/fused_ops.yaml +++ b/paddle/phi/api/yaml/fused_ops.yaml @@ -45,7 +45,7 @@ support_dygraph_mode : true - op : bn_act_xpu - args : (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, float momentum, float epsilon, str data_layout, int act_type) + args : (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, float momentum, float epsilon, str data_format, int act_type) output : Tensor(out) infer_meta : func : BNActXPUInferMeta diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index 7bda4331420a5..214b4d3d48c34 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -77,8 +77,8 @@ inplace : (out_grad -> x_grad) - backward_op : batch_norm_double_grad - forward : batch_norm_grad (Tensor x, Tensor scale, Tensor bias, Tensor out_mean, Tensor out_variance, Tensor saved_mean, Tensor saved_variance, Tensor reserve_space, Tensor grad_out, float momentum, float epsilon, str data_layout, bool is_test, bool use_global_stats, bool trainable_statistics) -> Tensor(grad_x), Tensor(grad_scale), Tensor(grad_bias) - args : (Tensor x, Tensor scale, Tensor out_mean, Tensor out_variance, Tensor saved_mean, Tensor saved_variance, Tensor grad_out, Tensor grad_x_grad, Tensor grad_scale_grad, Tensor grad_bias_grad, float momentum, float epsilon, str data_layout, bool is_test, bool use_global_stats, bool trainable_statistics) + forward : batch_norm_grad (Tensor x, Tensor scale, Tensor bias, Tensor out_mean, Tensor out_variance, Tensor saved_mean, Tensor saved_variance, Tensor reserve_space, Tensor grad_out, float momentum, float epsilon, str data_format, bool is_test, bool use_global_stats, bool trainable_statistics) -> Tensor(grad_x), Tensor(grad_scale), Tensor(grad_bias) + args : (Tensor x, Tensor scale, Tensor out_mean, Tensor out_variance, Tensor saved_mean, Tensor saved_variance, Tensor grad_out, Tensor grad_x_grad, Tensor grad_scale_grad, Tensor grad_bias_grad, float momentum, float epsilon, str data_format, bool is_test, bool use_global_stats, bool trainable_statistics) output : Tensor(x_grad), Tensor(scale_grad), Tensor(grad_out_grad) infer_meta : func : GeneralTernaryGradInferMeta @@ -90,8 +90,8 @@ inplace : (grad_out -> grad_out_grad) - backward_op : batch_norm_grad - forward : batch_norm (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_layout, bool use_global_stats, bool trainable_statistics) -> Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space) - args : (Tensor x, Tensor scale, Tensor bias, Tensor mean_out, Tensor variance_out, Tensor saved_mean, Tensor saved_variance, Tensor reserve_space, Tensor out_grad, float momentum, float epsilon, str data_layout, bool is_test, bool use_global_stats, bool trainable_statistics) + forward : batch_norm (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_format, bool use_global_stats, bool trainable_statistics) -> Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space) + args : (Tensor x, Tensor scale, Tensor bias, Tensor mean_out, Tensor variance_out, Tensor saved_mean, Tensor saved_variance, Tensor reserve_space, Tensor out_grad, float momentum, float epsilon, str data_format, bool is_test, bool use_global_stats, bool trainable_statistics) output : Tensor(x_grad), Tensor(scale_grad), Tensor(bias_grad) infer_meta : func : GeneralTernaryGradInferMeta @@ -100,7 +100,7 @@ func : batch_norm_grad data_type : out_grad optional : scale, bias, mean_out, variance_out, reserve_space - composite: batch_norm_grad(x, scale, bias, mean_out, variance_out, saved_mean, saved_variance, reserve_space, out_grad, momentum, epsilon, data_layout, is_test, use_global_stats, trainable_statistics) + composite: batch_norm_grad(x, scale, bias, mean_out, variance_out, saved_mean, saved_variance, reserve_space, out_grad, momentum, epsilon, data_format, is_test, use_global_stats, trainable_statistics) backward : batch_norm_double_grad - backward_op : c_embedding_grad @@ -753,8 +753,8 @@ inplace : (out_grad -> x_grad) - backward_op : sync_batch_norm_grad - forward : sync_batch_norm_ (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_layout, bool use_global_stats, bool trainable_statistics) -> Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space) - args : (Tensor x, Tensor scale, Tensor bias, Tensor saved_mean, Tensor saved_variance, Tensor reserve_space, Tensor out_grad, float momentum, float epsilon, str data_layout, bool is_test, bool use_global_stats, bool trainable_statistics) + forward : sync_batch_norm_ (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_format, bool use_global_stats, bool trainable_statistics) -> Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space) + args : (Tensor x, Tensor scale, Tensor bias, Tensor saved_mean, Tensor saved_variance, Tensor reserve_space, Tensor out_grad, float momentum, float epsilon, str data_format, bool is_test, bool use_global_stats, bool trainable_statistics) output : Tensor(x_grad), Tensor(scale_grad), Tensor(bias_grad) infer_meta : func : GeneralTernaryGradInferMeta diff --git a/paddle/phi/api/yaml/legacy_ops.yaml b/paddle/phi/api/yaml/legacy_ops.yaml index dc582641b769e..ca2d8d9c0481d 100755 --- a/paddle/phi/api/yaml/legacy_ops.yaml +++ b/paddle/phi/api/yaml/legacy_ops.yaml @@ -114,7 +114,7 @@ backend : place > output - op : batch_norm - args : (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_layout, bool use_global_stats, bool trainable_statistics) + args : (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_format, bool use_global_stats, bool trainable_statistics) output : Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space) infer_meta: func : BatchNormInferMeta @@ -1094,7 +1094,7 @@ backward : swish_grad - op : sync_batch_norm_ - args : (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_layout, bool use_global_stats, bool trainable_statistics) + args : (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_format, bool use_global_stats, bool trainable_statistics) output : Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space) infer_meta : func : BatchNormInferMeta diff --git a/paddle/phi/api/yaml/ops.yaml b/paddle/phi/api/yaml/ops.yaml index 835046c1e7911..eb957f2ba7103 100644 --- a/paddle/phi/api/yaml/ops.yaml +++ b/paddle/phi/api/yaml/ops.yaml @@ -280,7 +280,7 @@ func : bernoulli - op : bicubic_interp - args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_format="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) output : Tensor(output) infer_meta : func : InterpolateInferMeta @@ -303,7 +303,7 @@ backward : bilinear_grad - op : bilinear_interp - args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_format="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) output : Tensor(output) infer_meta : func : InterpolateInferMeta @@ -1126,7 +1126,7 @@ backward : grid_sample_grad - op : group_norm - args : (Tensor x, Tensor scale, Tensor bias, float epsilon = 1e-5, int groups = -1, str data_layout = "NCHW") + args : (Tensor x, Tensor scale, Tensor bias, float epsilon = 1e-5, int groups = -1, str data_format = "NCHW") output : Tensor(y), Tensor(mean), Tensor(variance) infer_meta : func : GroupNormInferMeta @@ -1478,7 +1478,7 @@ backward : lgamma_grad - op : linear_interp - args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_format="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) output : Tensor(output) infer_meta : func : InterpolateInferMeta @@ -1880,7 +1880,7 @@ backward : nanmedian_grad - op : nearest_interp - args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_format="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) output : Tensor(output) infer_meta : func : InterpolateInferMeta @@ -2691,7 +2691,7 @@ backward : triangular_solve_grad - op : trilinear_interp - args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_format="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) output : Tensor(output) infer_meta : func : InterpolateInferMeta diff --git a/paddle/phi/api/yaml/sparse_backward.yaml b/paddle/phi/api/yaml/sparse_backward.yaml index 8a47be3e30fcd..3e614b942d301 100644 --- a/paddle/phi/api/yaml/sparse_backward.yaml +++ b/paddle/phi/api/yaml/sparse_backward.yaml @@ -101,8 +101,8 @@ atanh_csr_grad {sparse_csr, sparse_csr -> sparse_csr} - backward_op : batch_norm_grad - forward : batch_norm_ (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_layout, bool use_global_stats, bool trainable_statistics) -> Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space) - args : (Tensor x, Tensor scale, Tensor bias, Tensor mean_out, Tensor variance_out, Tensor saved_mean, Tensor saved_variance, Tensor reserve_space, Tensor out_grad, float momentum, float epsilon, str data_layout, bool is_test, bool use_global_stats, bool trainable_statistics) + forward : batch_norm_ (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_format, bool use_global_stats, bool trainable_statistics) -> Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space) + args : (Tensor x, Tensor scale, Tensor bias, Tensor mean_out, Tensor variance_out, Tensor saved_mean, Tensor saved_variance, Tensor reserve_space, Tensor out_grad, float momentum, float epsilon, str data_format, bool is_test, bool use_global_stats, bool trainable_statistics) output : Tensor(x_grad), Tensor(scale_grad), Tensor(bias_grad) infer_meta : func : GeneralTernaryGradInferMeta @@ -380,8 +380,8 @@ sum_csr_grad {sparse_csr, sparse_csr -> sparse_csr} - backward_op : sync_batch_norm_grad - forward : sync_batch_norm_(Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_layout, bool use_global_stats, bool trainable_statistics) -> Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space) - args : (Tensor x, Tensor scale, Tensor bias, Tensor saved_mean, Tensor saved_variance, Tensor reserve_space, Tensor out_grad, float momentum, float epsilon, str data_layout, bool is_test, bool use_global_stats, bool trainable_statistics) + forward : sync_batch_norm_(Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_format, bool use_global_stats, bool trainable_statistics) -> Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space) + args : (Tensor x, Tensor scale, Tensor bias, Tensor saved_mean, Tensor saved_variance, Tensor reserve_space, Tensor out_grad, float momentum, float epsilon, str data_format, bool is_test, bool use_global_stats, bool trainable_statistics) output : Tensor(x_grad), Tensor(scale_grad), Tensor(bias_grad) infer_meta : func : GeneralTernaryGradInferMeta diff --git a/paddle/phi/api/yaml/sparse_ops.yaml b/paddle/phi/api/yaml/sparse_ops.yaml index 5f10334c5b1c0..fdebffcc4f06c 100644 --- a/paddle/phi/api/yaml/sparse_ops.yaml +++ b/paddle/phi/api/yaml/sparse_ops.yaml @@ -88,7 +88,7 @@ backward : atanh_grad - op : batch_norm_ - args : (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_layout, bool use_global_stats, bool trainable_statistics) + args : (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_format, bool use_global_stats, bool trainable_statistics) output : Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space) infer_meta : func : BatchNormInferMeta @@ -347,7 +347,7 @@ backward : sum_grad - op : sync_batch_norm_ - args : (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_layout, bool use_global_stats, bool trainable_statistics) + args : (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_format, bool use_global_stats, bool trainable_statistics) output : Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space) infer_meta : func : BatchNormInferMeta From bd4557780e4fcc9cb422ce1e6d8fc8c0be90b69c Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Wed, 10 Jan 2024 01:14:35 +0000 Subject: [PATCH 2/5] refine --- paddle/fluid/pir/dialect/operator/ir/ops.yaml | 3 +-- paddle/phi/api/yaml/op_compat.yaml | 20 +++++++++++++++++++ 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/pir/dialect/operator/ir/ops.yaml b/paddle/fluid/pir/dialect/operator/ir/ops.yaml index bdd4abe3e37c9..7a483330c5ead 100644 --- a/paddle/fluid/pir/dialect/operator/ir/ops.yaml +++ b/paddle/fluid/pir/dialect/operator/ir/ops.yaml @@ -1100,14 +1100,13 @@ optional : out - op : seed - args : (int seed, bool deterministic, str rng_name, Place place) + args : (int seed, bool deterministic, str rng_name, bool force_cpu) output : Tensor(out) infer_meta: func: SeedInferMeta param: [seed] kernel: func: seed - backend : place - op : send_v2 args : (Tensor x, int ring_id = 0, int peer = 0, bool use_calc_stream = false, bool dynamic_shape = false) diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index 7071df37e4aa5..9ea7e0d43f3fb 100755 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -342,6 +342,8 @@ saved_mean: SavedMean saved_variance: SavedVariance reserve_space: ReserveSpace + attrs: + data_format: data_layout extra : attrs : [bool use_mkldnn = false, bool fuse_with_relu = false] @@ -364,6 +366,8 @@ {x : X, out_size : OutSize, size_tensor : SizeTensor, scale_tensor : Scale} outputs : output : Out + attrs: + data_format: data_layout extra : attrs : [bool use_mkldnn = false] @@ -380,6 +384,8 @@ {x : X, out_size : OutSize, size_tensor : SizeTensor, scale_tensor : Scale} outputs : output : Out + attrs: + data_format: data_layout extra : attrs : [bool use_mkldnn = false] @@ -425,6 +431,10 @@ outputs : out : Out +- op : bn_act_xpu + attrs: + data_format: data_layout + - op : box_coder inputs : {prior_box : PriorBox , prior_box_var : PriorBoxVar, target_box: TargetBox} @@ -1534,6 +1544,8 @@ y : Y mean : Mean variance : Variance + attrs: + data_format: data_layout - op : gru backward : gru_grad @@ -1788,6 +1800,8 @@ {x : X, out_size : OutSize, size_tensor : SizeTensor, scale_tensor : Scale} outputs : output : Out + attrs: + data_format: data_layout extra : attrs : [bool use_mkldnn = false] @@ -2220,6 +2234,8 @@ {x : X, out_size : OutSize, size_tensor : SizeTensor, scale_tensor : Scale} outputs : output : Out + attrs: + data_format: data_layout extra : attrs : [bool use_mkldnn = false] @@ -3089,6 +3105,8 @@ outputs : {out : Y, mean_out : MeanOut, variance_out : VarianceOut, saved_mean : SavedMean, saved_variance : SavedVariance, reserve_space : ReserveSpace} backward : sync_batch_norm_grad + attrs: + data_format: data_layout extra : attrs : [bool use_mkldnn = false, bool fuse_with_relu = false] @@ -3195,6 +3213,8 @@ {x : X, out_size : OutSize, size_tensor : SizeTensor, scale_tensor : Scale} outputs : output : Out + attrs: + data_format: data_layout extra : attrs : [bool use_mkldnn = false] From 8a79090dca0130a99959212d1bbd8a178de6f6b7 Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Wed, 10 Jan 2024 02:44:15 +0000 Subject: [PATCH 3/5] refine --- .../fluid/operators/generator/CMakeLists.txt | 3 +- .../operators/generator/generate_sparse_op.py | 40 ++++++++++++++++++- 2 files changed, 40 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/operators/generator/CMakeLists.txt b/paddle/fluid/operators/generator/CMakeLists.txt index f13cac6eec80c..6e64717dc6129 100644 --- a/paddle/fluid/operators/generator/CMakeLists.txt +++ b/paddle/fluid/operators/generator/CMakeLists.txt @@ -280,7 +280,8 @@ execute_process( COMMAND ${PYTHON_EXECUTABLE} generate_sparse_op.py --ops_yaml_path ./parsed_ops/sparse_ops.parsed.yaml --backward_ops_yaml_path - ./parsed_ops/sparse_backward.parsed.yaml --output_op_path + ./parsed_ops/sparse_backward.parsed.yaml --op_compat_yaml_path + ${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/op_compat.yaml --output_op_path "${generated_sparse_ops_path}.tmp" --output_arg_map_path "${generated_sparse_argument_mapping_path}.tmp" RESULT_VARIABLE _result) diff --git a/paddle/fluid/operators/generator/generate_sparse_op.py b/paddle/fluid/operators/generator/generate_sparse_op.py index 9c92aa3bc3c94..d234f64086718 100644 --- a/paddle/fluid/operators/generator/generate_sparse_op.py +++ b/paddle/fluid/operators/generator/generate_sparse_op.py @@ -33,7 +33,15 @@ to_scalar_tensor_name, to_variable_names, ) -from generate_op import add_fluid_name, process_invoke_op +from generate_op import ( + add_compat_name, + add_fluid_name, + parse_drop_empty_grad, + parse_get_expected_kerneltype, + parse_keep_signature, + process_invoke_op, + to_phi_and_fluid_op_name_without_underline, +) from jinja2 import Environment, FileSystemLoader, StrictUndefined from parse_utils import to_named_dict from tests_utils import ( @@ -90,7 +98,13 @@ def restruct_io(op): SPARSE_OP_PREFIX = 'sparse_' -def main(op_yaml_path, backward_yaml_path, output_op_path, output_arg_map_path): +def main( + op_yaml_path, + backward_yaml_path, + op_compat_yaml_path, + output_op_path, + output_arg_map_path, +): with open(op_yaml_path, "rt") as f: ops = yaml.safe_load(f) ops = [restruct_io(op) for op in ops] @@ -101,6 +115,13 @@ def main(op_yaml_path, backward_yaml_path, output_op_path, output_arg_map_path): backward_ops = [restruct_io(op) for op in backward_ops] backward_op_dict = to_named_dict(backward_ops) + with open(op_compat_yaml_path, "rt") as f: + op_fluid_map_list = yaml.safe_load(f) + for op_args in op_fluid_map_list: + op_args["op"] = to_phi_and_fluid_op_name_without_underline( + op_args["op"] + ) + for op in ops: if op['name'][-1] == '_': op['name'] = op['name'][:-1] @@ -125,6 +146,17 @@ def main(op_yaml_path, backward_yaml_path, output_op_path, output_arg_map_path): param.strip() for param in bw_op['invoke']['args'].split(',') ] + # deal the drop_empty_grad of bw_op by op_compat.yaml + parse_drop_empty_grad(op_fluid_map_list, backward_op_dict) + + parse_get_expected_kerneltype( + op_fluid_map_list, forward_op_dict, backward_op_dict + ) + + parse_keep_signature(op_fluid_map_list, forward_op_dict, backward_op_dict) + + add_compat_name(op_fluid_map_list, forward_op_dict, backward_op_dict) + # prepare for invoke case process_invoke_op(forward_op_dict, backward_op_dict) for bw_op in backward_ops: @@ -181,6 +213,9 @@ def main(op_yaml_path, backward_yaml_path, output_op_path, output_arg_map_path): type=str, help="parsed backward sparse ops yaml file.", ) + parser.add_argument( + '--op_compat_yaml_path', type=str, help="ops args compat yaml file." + ) parser.add_argument( "--output_op_path", type=str, help="path to save generated operators." ) @@ -194,6 +229,7 @@ def main(op_yaml_path, backward_yaml_path, output_op_path, output_arg_map_path): main( args.ops_yaml_path, args.backward_ops_yaml_path, + args.op_compat_yaml_path, args.output_op_path, args.output_arg_map_path, ) From 82ee8b97a26158eef0a615f996b7513658491287 Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Wed, 10 Jan 2024 02:54:23 +0000 Subject: [PATCH 4/5] refine --- .../fluid/operators/generator/CMakeLists.txt | 3 +- .../operators/generator/generate_sparse_op.py | 48 ++++--------------- 2 files changed, 11 insertions(+), 40 deletions(-) diff --git a/paddle/fluid/operators/generator/CMakeLists.txt b/paddle/fluid/operators/generator/CMakeLists.txt index 6e64717dc6129..f13cac6eec80c 100644 --- a/paddle/fluid/operators/generator/CMakeLists.txt +++ b/paddle/fluid/operators/generator/CMakeLists.txt @@ -280,8 +280,7 @@ execute_process( COMMAND ${PYTHON_EXECUTABLE} generate_sparse_op.py --ops_yaml_path ./parsed_ops/sparse_ops.parsed.yaml --backward_ops_yaml_path - ./parsed_ops/sparse_backward.parsed.yaml --op_compat_yaml_path - ${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/op_compat.yaml --output_op_path + ./parsed_ops/sparse_backward.parsed.yaml --output_op_path "${generated_sparse_ops_path}.tmp" --output_arg_map_path "${generated_sparse_argument_mapping_path}.tmp" RESULT_VARIABLE _result) diff --git a/paddle/fluid/operators/generator/generate_sparse_op.py b/paddle/fluid/operators/generator/generate_sparse_op.py index d234f64086718..87589316e8348 100644 --- a/paddle/fluid/operators/generator/generate_sparse_op.py +++ b/paddle/fluid/operators/generator/generate_sparse_op.py @@ -33,15 +33,7 @@ to_scalar_tensor_name, to_variable_names, ) -from generate_op import ( - add_compat_name, - add_fluid_name, - parse_drop_empty_grad, - parse_get_expected_kerneltype, - parse_keep_signature, - process_invoke_op, - to_phi_and_fluid_op_name_without_underline, -) +from generate_op import add_fluid_name, process_invoke_op from jinja2 import Environment, FileSystemLoader, StrictUndefined from parse_utils import to_named_dict from tests_utils import ( @@ -98,13 +90,7 @@ def restruct_io(op): SPARSE_OP_PREFIX = 'sparse_' -def main( - op_yaml_path, - backward_yaml_path, - op_compat_yaml_path, - output_op_path, - output_arg_map_path, -): +def main(op_yaml_path, backward_yaml_path, output_op_path, output_arg_map_path): with open(op_yaml_path, "rt") as f: ops = yaml.safe_load(f) ops = [restruct_io(op) for op in ops] @@ -115,13 +101,6 @@ def main( backward_ops = [restruct_io(op) for op in backward_ops] backward_op_dict = to_named_dict(backward_ops) - with open(op_compat_yaml_path, "rt") as f: - op_fluid_map_list = yaml.safe_load(f) - for op_args in op_fluid_map_list: - op_args["op"] = to_phi_and_fluid_op_name_without_underline( - op_args["op"] - ) - for op in ops: if op['name'][-1] == '_': op['name'] = op['name'][:-1] @@ -129,9 +108,17 @@ def main( op['name'] = op['op_name'] if op["backward"] is not None: op["backward"] = SPARSE_OP_PREFIX + op["backward"] + if op['name'] in [ + SPARSE_OP_PREFIX + "batch_norm", + SPARSE_OP_PREFIX + "sync_batch_norm", + ]: + for item in op["attrs"]: + if item["name"] == "data_format": + item["name"] = "data_layout" add_fluid_name(op["inputs"]) add_fluid_name(op["attrs"]) add_fluid_name(op["outputs"]) + for bw_op in backward_ops: bw_op['op_name'] = SPARSE_OP_PREFIX + bw_op['name'] bw_op['name'] = bw_op['op_name'] @@ -146,17 +133,6 @@ def main( param.strip() for param in bw_op['invoke']['args'].split(',') ] - # deal the drop_empty_grad of bw_op by op_compat.yaml - parse_drop_empty_grad(op_fluid_map_list, backward_op_dict) - - parse_get_expected_kerneltype( - op_fluid_map_list, forward_op_dict, backward_op_dict - ) - - parse_keep_signature(op_fluid_map_list, forward_op_dict, backward_op_dict) - - add_compat_name(op_fluid_map_list, forward_op_dict, backward_op_dict) - # prepare for invoke case process_invoke_op(forward_op_dict, backward_op_dict) for bw_op in backward_ops: @@ -213,9 +189,6 @@ def main( type=str, help="parsed backward sparse ops yaml file.", ) - parser.add_argument( - '--op_compat_yaml_path', type=str, help="ops args compat yaml file." - ) parser.add_argument( "--output_op_path", type=str, help="path to save generated operators." ) @@ -229,7 +202,6 @@ def main( main( args.ops_yaml_path, args.backward_ops_yaml_path, - args.op_compat_yaml_path, args.output_op_path, args.output_arg_map_path, ) From ebc1c967586a633cf084c4cf988324d883db26ee Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Wed, 10 Jan 2024 08:30:57 +0000 Subject: [PATCH 5/5] refine --- .../operators/generator/generate_sparse_op.py | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/paddle/fluid/operators/generator/generate_sparse_op.py b/paddle/fluid/operators/generator/generate_sparse_op.py index 87589316e8348..46dcebc09e8f0 100644 --- a/paddle/fluid/operators/generator/generate_sparse_op.py +++ b/paddle/fluid/operators/generator/generate_sparse_op.py @@ -115,6 +115,14 @@ def main(op_yaml_path, backward_yaml_path, output_op_path, output_arg_map_path): for item in op["attrs"]: if item["name"] == "data_format": item["name"] = "data_layout" + value = op["attr_dict"].pop('data_format') + op["attr_dict"]['data_layout'] = value + for i in range(len(op["kernel"]["param"])): + if op["kernel"]["param"][i] == "data_format": + op["kernel"]["param"][i] = "data_layout" + for i in range(len(op["infer_meta"]["param"])): + if op["infer_meta"]["param"][i] == "data_format": + op["infer_meta"]["param"][i] = "data_layout" add_fluid_name(op["inputs"]) add_fluid_name(op["attrs"]) add_fluid_name(op["outputs"]) @@ -122,6 +130,27 @@ def main(op_yaml_path, backward_yaml_path, output_op_path, output_arg_map_path): for bw_op in backward_ops: bw_op['op_name'] = SPARSE_OP_PREFIX + bw_op['name'] bw_op['name'] = bw_op['op_name'] + + if bw_op['name'] in [ + SPARSE_OP_PREFIX + "batch_norm_grad", + SPARSE_OP_PREFIX + "sync_batch_norm_grad", + ]: + for item in bw_op["attrs"]: + if item["name"] == "data_format": + item["name"] = "data_layout" + for item in bw_op["forward"]["attrs"]: + if item["name"] == "data_format": + item["name"] = "data_layout" + item["fluid_name"] = "data_layout" + value = bw_op["attr_dict"].pop('data_format') + bw_op["attr_dict"]['data_layout'] = value + for i in range(len(bw_op["kernel"]["param"])): + if bw_op["kernel"]["param"][i] == "data_format": + bw_op["kernel"]["param"][i] = "data_layout" + for i in range(len(bw_op["infer_meta"]["param"])): + if bw_op["infer_meta"]["param"][i] == "data_format": + bw_op["infer_meta"]["param"][i] = "data_layout" + add_fluid_name(bw_op["inputs"]) add_fluid_name(bw_op["attrs"]) add_fluid_name(bw_op["outputs"])