Skip to content

Commit

Permalink
【PIR Dist Op Reg No.19】 reg pull_box_sparse (PaddlePaddle#62982)
Browse files Browse the repository at this point in the history
* fix

* fix

* fix

* fix

* fix

* fix

* add test

* add

* fix

* fix

* add out

* fix

* codestyle

* fix

* fix backward

* merge
  • Loading branch information
enkilee authored and yinfan98 committed May 7, 2024
1 parent 6f35286 commit 0d57c4f
Show file tree
Hide file tree
Showing 9 changed files with 133 additions and 0 deletions.
3 changes: 3 additions & 0 deletions paddle/fluid/ir_adaptor/translator/op_compat_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,9 @@ def insert_new_mutable_attributes(
op_arg_name_mappings['push_sparse_v2'].update(
{"out_grad_in": "Out@GRAD", "out_grad_out": "Out@GRAD"}
)
op_arg_name_mappings['push_box_sparse'].update(
{"out_grad_in": "Out@GRAD", "out_grad_out": "Out@GRAD"}
)
op_arg_name_mappings['push_gpups_sparse'].update(
{"out_grad": "Out@GRAD", "out_grad_grad": "Out@GRAD"}
)
Expand Down
4 changes: 4 additions & 0 deletions paddle/fluid/pir/dialect/op_generator/ops_api_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,6 +206,10 @@
'push_dense',
'limit_by_capacity',
'global_scatter',
'pull_box_sparse',
'pull_box_sparse_',
'push_box_sparse',
'push_box_sparse_',
]


Expand Down
9 changes: 9 additions & 0 deletions paddle/fluid/pir/dialect/operator/ir/ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1351,6 +1351,15 @@
func : prune_gate_by_capacity
data_type : gate_idx

- op : pull_box_sparse
args : (Tensor w, Tensor[] ids, bool is_sparse = false, bool is_distributed = false, int size = 1)
output : Tensor[](out){ids.size()}
infer_meta :
func : PullBoxSparseInferMeta
kernel :
func : pull_box_sparse
data_type : ids

- op : pull_gpups_sparse
args : (Tensor w, Tensor[] ids, int[] size={}, bool is_sparse=false, bool is_distributed=false)
output : Tensor[](out){ids.size()}
Expand Down
12 changes: 12 additions & 0 deletions paddle/fluid/pir/dialect/operator/ir/ops_backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -659,6 +659,18 @@
func : prod_grad
composite: prod_grad(x, out, out_grad, dims, keep_dim, reduce_all, x_grad)

- backward_op : push_box_sparse
forward : pull_box_sparse (Tensor w, Tensor[] ids, bool is_sparse = false, bool is_distributed = false, int size = 1) -> Tensor[](out){ids.size()}
args : (Tensor[] ids, Tensor[] out_grad_in, bool is_sparse = false, bool is_distributed = false, int size = 1)
output : Tensor[](out_grad_out){out_grad_in.size()}
infer_meta :
func : UnchangedMultiInferMeta
param : [out_grad_in]
kernel :
func : push_box_sparse
data_type : out_grad_in
inplace : (out_grad_in -> out_grad_out)

- backward_op : rank_attention_grad
forward : rank_attention (Tensor x, Tensor rank_offset, Tensor rank_param, int max_rank = 3, int max_size = 0) -> Tensor(input_help), Tensor(out), Tensor(ins_rank)
args : (Tensor x, Tensor rank_offset, Tensor rank_param, Tensor input_help, Tensor ins_rank, Tensor out_grad, int max_rank = 3, int max_size = 0)
Expand Down
2 changes: 2 additions & 0 deletions paddle/fluid/pir/dialect/operator/utils/utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,8 @@ const std::unordered_set<std::string> LegacyOpList = {
CReduceMinOp::name(),
CReduceProdOp::name(),
CScatterOp::name(),
PullBoxSparseOp::name(),
PushBoxSparseOp::name(),
PushSparseV2Op::name(),
PartialSendOp::name(),
PartialRecvOp::name()};
Expand Down
18 changes: 18 additions & 0 deletions paddle/phi/api/yaml/op_compat.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2691,6 +2691,16 @@
outputs :
out : Out

- op : pull_box_sparse
inputs :
{ w : W, ids: Ids}
outputs :
out : Out
attrs :
sparse : is_sparse
extra :
attrs : [bool is_sparse = false, bool is_distributed = false, int size = 1]

- op : pull_gpups_sparse
backward : push_gpups_sparse
inputs :
Expand All @@ -2706,6 +2716,14 @@
extra :
attrs : [int embedding_dim = 11, int table_id = 0, str accessor_class = "", str ctr_label_name = "", int padding_id = 0, bool scale_sparse_grad = true, 'str[] input_names = {}', bool is_distributed = true]

- op : push_box_sparse
inputs :
ids: Ids
outputs :
out : Out
attrs :
sparse : is_sparse

- op : push_dense
inputs :
ids : Ids
Expand Down
27 changes: 27 additions & 0 deletions paddle/phi/infermeta/binary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2998,6 +2998,33 @@ void PruneGateByCapacityInferMeta(const MetaTensor& gate_idx,
new_gate_idx->set_dtype(gate_idx.dtype());
}

void PullBoxSparseInferMeta(const MetaTensor& w,
const std::vector<const MetaTensor*>& ids,
bool is_sparse,
bool is_distributed,
int size,
std::vector<MetaTensor*> out) {
auto hidden_size = static_cast<int64_t>(size);
const size_t n_ids = ids.size();
for (size_t i = 0; i < n_ids; ++i) {
MetaTensor* output = out[i];
auto ids_dims = ids[i]->dims();
int ids_rank = ids_dims.size();
PADDLE_ENFORCE_EQ(ids_dims[ids_rank - 1],
1UL,
phi::errors::InvalidArgument(
"Shape error in %lu id, the last dimension of the "
"'Ids' tensor must be 1.",
i));
auto out_dim =
common::vectorize(common::slice_ddim(ids_dims, 0, ids_rank - 1));
out_dim.push_back(hidden_size);
output->set_dims(common::make_ddim(out_dim));
output->share_lod(*ids[i]);
output->set_dtype(w.dtype());
}
}

void RepeatInterleaveWithTensorIndexInferMeta(const MetaTensor& x,
const MetaTensor& repeats,
int dim,
Expand Down
7 changes: 7 additions & 0 deletions paddle/phi/infermeta/binary.h
Original file line number Diff line number Diff line change
Expand Up @@ -479,6 +479,13 @@ void PReluInferMeta(const MetaTensor& x,
MetaTensor* out,
MetaConfig config = MetaConfig());

void PullBoxSparseInferMeta(const MetaTensor& w,
const std::vector<const MetaTensor*>& ids,
bool is_sparse,
bool is_distributed,
int size,
std::vector<MetaTensor*> out);

void PullGpupsSparseInferMeta(const MetaTensor& w,
const std::vector<const MetaTensor*>& ids,
const std::vector<int>& size,
Expand Down
51 changes: 51 additions & 0 deletions test/ir/pir/translator/test_pull_box_sparse_translator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest

import test_op_translator

import paddle
from paddle.base.layer_helper import LayerHelper


class TestPullBoxSparseOpTranslator(
test_op_translator.TestOpWithBackwardTranslator
):
def append_op(self):
self.forward_op_type = "pull_box_sparse"
self.backward_op_type = "push_box_sparse"
ids = paddle.ones(shape=(1, 1), dtype='float32')
w = paddle.ones(shape=(1, 1), dtype='float32')
out = paddle.ones(shape=(1, 1), dtype='float32')
attrs = {
'is_sparse': False,
'is_distributed': False,
'size': 1,
}
forward_helper = LayerHelper(self.forward_op_type)
forward_helper.append_op(
type=self.forward_op_type,
inputs={"W": w, "Ids": [ids]},
outputs={"Out": [out]},
attrs=attrs,
)
return out

def test_translator(self):
self.check()


if __name__ == "__main__":
unittest.main()

0 comments on commit 0d57c4f

Please sign in to comment.