-
Notifications
You must be signed in to change notification settings - Fork 6.8k
[MXNET-382] Shape and Size Operator #10889
Changes from 7 commits
4635957
38c15d9
6540678
01d1b95
b513dbe
125c3cd
ac96ef1
93ffddc
3d578d3
ef43d2f
1b7ba47
f8cc278
eb74750
08346da
d2f7999
93fc294
3f84b1a
3d8f560
2074b46
4b1164e
ee97196
10cb562
cbec1e5
039e6d4
ee110fd
460c315
67cbbfe
804dfb4
f17f9c8
c188404
de64b97
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -388,6 +388,39 @@ void CastCompute(const nnvm::NodeAttrs& attrs, | |
}); | ||
} | ||
|
||
template<typename xpu> | ||
void ShapeCompute(const nnvm::NodeAttrs& attrs, | ||
const OpContext& ctx, | ||
const std::vector<TBlob>& inputs, | ||
const std::vector<OpReqType>& req, | ||
const std::vector<TBlob>& outputs) { | ||
CHECK_EQ(inputs.size(), 1U); | ||
CHECK_EQ(outputs.size(), 1U); | ||
CHECK_EQ(req.size(), 1U); | ||
const TBlob& in_data = inputs[0]; | ||
const TBlob& out_data = outputs[0]; | ||
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); | ||
const TShape& in_shape = in_data.shape_; | ||
MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, { | ||
mxnet_op::Kernel<mshadow_op::identity_with_cast, xpu>::Launch( | ||
s, in_data.ndim(), out_data.dptr<int64_t>(), in_shape.data()); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. how come this is not captured by CI? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. it did, the CI failed for GPU tests. I need to fix it. |
||
}); | ||
} | ||
|
||
template<typename xpu> | ||
void SizeCompute(const nnvm::NodeAttrs& attrs, | ||
const OpContext& ctx, | ||
const std::vector<TBlob>& inputs, | ||
const std::vector<OpReqType>& req, | ||
const std::vector<TBlob>& outputs) { | ||
CHECK_EQ(inputs.size(), 1U); | ||
CHECK_EQ(outputs.size(), 1U); | ||
CHECK_EQ(req.size(), 1U); | ||
const TBlob& in_data = inputs[0]; | ||
const TBlob& out_data = outputs[0]; | ||
out_data.dptr<int64_t>()[0] = in_data.Size(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. out_data holds a pointer to gpu memory. you need to explicitly use kernel launch to set the value There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If I use kernel launch then I will need a databuffer pointing to in_data.Size(). How would I get that? because in_data.Size() is of type There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. You know the output size is only 1, so you can just use 1 for that. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
} | ||
|
||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Please get rid of one blank line here, c++ use only 1 blank line between functions |
||
struct HardSigmoidParam : public dmlc::Parameter<HardSigmoidParam> { | ||
real_t alpha; | ||
real_t beta; | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -399,6 +399,71 @@ NNVM_REGISTER_OP(reshape_like) | |
.add_argument("lhs", "NDArray-or-Symbol", "First input.") | ||
.add_argument("rhs", "NDArray-or-Symbol", "Second input."); | ||
|
||
NNVM_REGISTER_OP(shape_nd) | ||
.describe(R"code(Returns a 1D int64 array containing the shape of data. | ||
|
||
Example:: | ||
|
||
shape_nd([[1,2,3,4], [5,6,7,8]]) = [2,4] | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. shape_array? |
||
|
||
)code" ADD_FILELINE) | ||
.set_num_inputs(1) | ||
.set_num_outputs(1) | ||
.set_attr<nnvm::FIgnoreInputs>("FIgnoreInputs", | ||
[](const NodeAttrs& attrs) { return std::vector<uint32_t>(1, 1); }) | ||
.set_attr<FCompute>("FCompute<cpu>", ShapeCompute<cpu>) | ||
.set_attr<nnvm::FInferShape>("FInferShape", | ||
[](const nnvm::NodeAttrs& attrs, | ||
std::vector<TShape> *in_attrs, | ||
std::vector<TShape> *out_attrs) { | ||
CHECK_EQ(in_attrs->size(), 1U); | ||
CHECK_EQ(out_attrs->size(), 1U); | ||
TShape target_shape(1); | ||
target_shape[0] = in_attrs->at(0).ndim(); | ||
SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape); | ||
return !shape_is_none(out_attrs->at(0)); | ||
}) | ||
.set_attr<nnvm::FInferType>("FInferType", | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. not registering FGradient ? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Shape operator does not have a differential. Check the conversation here - #10789 (comment) |
||
[](const nnvm::NodeAttrs& attrs, | ||
std::vector<int>* in_attrs, | ||
std::vector<int>* out_attrs) { | ||
CHECK_EQ(in_attrs->size(), 1U); | ||
CHECK_EQ(out_attrs->size(), 1U); | ||
TYPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::kInt64); | ||
return out_attrs->at(0) != -1; | ||
}) | ||
.add_argument("data", "NDArray-or-Symbol", "Input Array."); | ||
|
||
NNVM_REGISTER_OP(size_nd) | ||
.describe(R"code(Returns a 1D int64 array containing the size of data. | ||
|
||
Example:: | ||
|
||
size_nd([[1,2,3,4], [5,6,7,8]]) = [8] | ||
|
||
)code" ADD_FILELINE) | ||
.set_num_inputs(1) | ||
.set_num_outputs(1) | ||
.set_attr<FCompute>("FCompute<cpu>", SizeCompute<cpu>) | ||
.set_attr<nnvm::FInferShape>("FInferShape", | ||
[](const nnvm::NodeAttrs& attrs, | ||
std::vector<TShape> *in_attrs, | ||
std::vector<TShape> *out_attrs) { | ||
CHECK_EQ(in_attrs->size(), 1U); | ||
CHECK_EQ(out_attrs->size(), 1U); | ||
SHAPE_ASSIGN_CHECK(*out_attrs, 0, 1U); | ||
return !shape_is_none(out_attrs->at(0)); | ||
}) | ||
.set_attr<nnvm::FInferType>("FInferType", | ||
[](const nnvm::NodeAttrs& attrs, | ||
std::vector<int>* in_attrs, | ||
std::vector<int>* out_attrs) { | ||
CHECK_EQ(in_attrs->size(), 1U); | ||
CHECK_EQ(out_attrs->size(), 1U); | ||
TYPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::kInt64); | ||
return out_attrs->at(0) != -1; | ||
}) | ||
.add_argument("data", "NDArray-or-Symbol", "Input Array."); | ||
|
||
DMLC_REGISTER_PARAMETER(CastParam); | ||
NNVM_REGISTER_OP(Cast) | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
You don't need to make templates for shape and size functions based on the type of device. CPU and GPU FCompute functions are defined respectively in .cc and .cu and don't share anything.