Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

mkldnn directory cleanup #47779

Merged
merged 26 commits into from
Nov 15, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 11 additions & 10 deletions paddle/fluid/framework/data_layout_transform.cc
Original file line number Diff line number Diff line change
Expand Up @@ -101,15 +101,16 @@ void* GetDataFromTensor(const phi::DenseTensor& tensor,
dnnl::memory::data_type type) {
switch (type) {
case dnnl::memory::data_type::f32:
return platform::to_void_cast(tensor.data<float>());
return phi::funcs::to_void_cast(tensor.data<float>());
case dnnl::memory::data_type::s8:
return platform::to_void_cast(tensor.data<int8_t>());
return phi::funcs::to_void_cast(tensor.data<int8_t>());
case dnnl::memory::data_type::u8:
return platform::to_void_cast(tensor.data<unsigned char>());
return phi::funcs::to_void_cast(tensor.data<unsigned char>());
case dnnl::memory::data_type::s32:
return platform::to_void_cast(tensor.data<int32_t>());
return phi::funcs::to_void_cast(tensor.data<int32_t>());
case dnnl::memory::data_type::bf16:
return platform::to_void_cast(tensor.data<paddle::platform::bfloat16>());
return phi::funcs::to_void_cast(
tensor.data<paddle::platform::bfloat16>());
default:
PADDLE_THROW(
platform::errors::InvalidArgument("Wrong mkldnn type provided."));
Expand All @@ -125,7 +126,7 @@ void TransDataLayoutFromMKLDNN(const OpKernelType& kernel_type_for_var,
auto place = expected_kernel_type.place_;

PADDLE_ENFORCE(
in_layout == DataLayout::kMKLDNN && out_layout != DataLayout::kMKLDNN,
in_layout == DataLayout::ONEDNN && out_layout != DataLayout::ONEDNN,
platform::errors::InvalidArgument(
"TransDataLayoutFromMKLDNN only supports transform from MKLDNN to "
"non-MKLDNN"));
Expand Down Expand Up @@ -165,7 +166,7 @@ void innerTransDataLayoutFromMKLDNN(DataLayout in_layout,
DataTypeToString(framework::TransToProtoVarType(in.dtype()))));

auto out_format =
platform::MKLDNNFormatForSize(in_tz.size(), ToMKLDNNFormat(out_layout));
phi::funcs::OneDNNFormatForSize(in_tz.size(), ToOneDNNFormat(out_layout));
dnnl::memory::desc out_mem_desc(out_tz, in_type, out_format);

// output tensor has the same dims as input. Reorder don't change dims
Expand All @@ -177,8 +178,8 @@ void innerTransDataLayoutFromMKLDNN(DataLayout in_layout,
if (in.initialized() && ((in.mem_desc() != out->mem_desc()) || always_copy)) {
void* in_data = GetDataFromTensor(in, in_type);

platform::ReorderMKLDNNHandler handler(
in_tz, framework::TransToProtoVarType(in.dtype()), in_type, cpu_engine);
phi::funcs::ReorderOneDNNHandler handler(
in_tz, in.dtype(), in_type, cpu_engine);

auto reorder_src_memory_p =
handler.AcquireSrcMemory(in.mem_desc(), in_data);
Expand All @@ -199,7 +200,7 @@ void innerTransDataLayoutFromMKLDNN(DataLayout in_layout,
}
// For exepected NHWC data format we need to reshape the Output tensor
// As MKL-DNN description was in NCHW and paddle is expecting NHWC
platform::MatchShapeToLayout(out, in_layout, out_layout);
phi::funcs::MatchShapeToLayout(out, in_layout, out_layout);

out->set_layout(DataLayout::kNCHW);
}
Expand Down
48 changes: 16 additions & 32 deletions paddle/fluid/framework/data_layout_transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,51 +52,35 @@ struct CastDataLayout {
};

#ifdef PADDLE_WITH_MKLDNN
using MKLDNNDataType = dnnl::memory::data_type;
using OneDNNDataType = dnnl::memory::data_type;

inline MKLDNNMemoryFormat ToMKLDNNFormat(const DataLayout& layout) {
inline OneDNNMemoryFormat ToOneDNNFormat(const DataLayout& layout) {
switch (layout) {
case DataLayout::kNHWC:
return MKLDNNMemoryFormat::nhwc;
return OneDNNMemoryFormat::nhwc;
case DataLayout::kNCHW:
return MKLDNNMemoryFormat::nchw;
return OneDNNMemoryFormat::nchw;
case DataLayout::kNCDHW:
return MKLDNNMemoryFormat::ncdhw;
return OneDNNMemoryFormat::ncdhw;
case DataLayout::kNDHWC:
return MKLDNNMemoryFormat::ndhwc;
return OneDNNMemoryFormat::ndhwc;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Fail to convert layout %s to MKLDNN format.",
"Fail to convert layout %s to oneDNN format.",
phi::DataLayoutToString(layout)));
}
}

inline DataLayout ToPaddleLayout(const MKLDNNMemoryFormat& format) {
switch (format) {
case MKLDNNMemoryFormat::nhwc:
return DataLayout::kNHWC;
case MKLDNNMemoryFormat::nchw:
return DataLayout::kNCHW;
case MKLDNNMemoryFormat::ncdhw:
return DataLayout::kNCDHW;
case MKLDNNMemoryFormat::ndhwc:
return DataLayout::kNDHWC;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Fail to convert MKLDNN format to paddle layout."));
}
}

inline MKLDNNDataType ToMKLDNNDataType(proto::VarType::Type type) {
static std::unordered_map<int, MKLDNNDataType> dict{
{DataTypeTrait<float>::DataType(), MKLDNNDataType::f32},
{DataTypeTrait<int8_t>::DataType(), MKLDNNDataType::s8},
{DataTypeTrait<uint8_t>::DataType(), MKLDNNDataType::u8},
{DataTypeTrait<int32_t>::DataType(), MKLDNNDataType::s32},
{DataTypeTrait<platform::bfloat16>::DataType(), MKLDNNDataType::bf16}};
inline OneDNNDataType ToMKLDNNDataType(proto::VarType::Type type) {
static std::unordered_map<int, OneDNNDataType> dict{
{DataTypeTrait<float>::DataType(), OneDNNDataType::f32},
{DataTypeTrait<int8_t>::DataType(), OneDNNDataType::s8},
{DataTypeTrait<uint8_t>::DataType(), OneDNNDataType::u8},
{DataTypeTrait<int32_t>::DataType(), OneDNNDataType::s32},
{DataTypeTrait<platform::bfloat16>::DataType(), OneDNNDataType::bf16}};
auto iter = dict.find(static_cast<int>(type));
if (iter != dict.end()) return iter->second;
return MKLDNNDataType::undef;
return OneDNNDataType::undef;
}

void innerTransDataLayoutFromMKLDNN(DataLayout in_layout,
Expand All @@ -111,7 +95,7 @@ void TransDataLayoutFromMKLDNN(const OpKernelType& kernel_type_for_var,
const phi::DenseTensor& in,
phi::DenseTensor* out);

void* GetDataFromTensor(const phi::DenseTensor& tensor, MKLDNNDataType type);
void* GetDataFromTensor(const phi::DenseTensor& tensor, OneDNNDataType type);

#endif

Expand Down
7 changes: 3 additions & 4 deletions paddle/fluid/framework/data_layout_transform_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,8 @@ TEST(DataTransformBf16, GetDataFromTensorDNNL) {

void* in_data =
paddle::framework::GetDataFromTensor(in, dnnl::memory::data_type::bf16);
EXPECT_EQ(
in_data,
paddle::platform::to_void_cast(in.data<paddle::platform::bfloat16>()));
EXPECT_EQ(in_data,
phi::funcs::to_void_cast(in.data<paddle::platform::bfloat16>()));
}

TEST(DataTransformInt32, GetDataFromTensorDNNL) {
Expand All @@ -66,6 +65,6 @@ TEST(DataTransformInt32, GetDataFromTensorDNNL) {

void* in_data =
paddle::framework::GetDataFromTensor(in, dnnl::memory::data_type::s32);
EXPECT_EQ(in_data, paddle::platform::to_void_cast(in.data<int32_t>()));
EXPECT_EQ(in_data, phi::funcs::to_void_cast(in.data<int32_t>()));
}
#endif
14 changes: 7 additions & 7 deletions paddle/fluid/framework/data_transform.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,24 +49,24 @@ void TransformData(const OpKernelType &expected_kernel_type,
// do layout transform
if (NeedTransformLayout(lout, lin)) {
#ifdef PADDLE_WITH_MKLDNN
if (lin == DataLayout::kMKLDNN || lout == DataLayout::kMKLDNN) {
if (lin == DataLayout::ONEDNN || lout == DataLayout::ONEDNN) {
PADDLE_ENFORCE_EQ(
!(lin == DataLayout::kMKLDNN && lout == DataLayout::kMKLDNN),
!(lin == DataLayout::ONEDNN && lout == DataLayout::ONEDNN),
true,
platform::errors::PreconditionNotMet(
"No layout transform needed between two MKLDNN OPKernels."));
"No layout transform needed between two oneDNN OPKernels."));

if (lin != DataLayout::kMKLDNN && lout == DataLayout::kMKLDNN) {
if (lin != DataLayout::ONEDNN && lout == DataLayout::ONEDNN) {
// Case1 - transform from Non-MKLDNN OPKernel to MKLDNN OPKernel
// Just set layout/format. No real transform occur

auto out_format = platform::MKLDNNFormatForSize(in.dims().size(),
ToMKLDNNFormat(lin));
auto out_format = phi::funcs::OneDNNFormatForSize(in.dims().size(),
ToOneDNNFormat(lin));
out.ShareDataWith(input_tensor);
// For NHWC data we need reshape of tensors as MKL-DNN
// is expecting NHWC dims description order
if (lin == DataLayout::kNHWC || lin == DataLayout::kNDHWC) {
platform::MatchShapeToLayout(&out, lin, lout);
phi::funcs::MatchShapeToLayout(&out, lin, lout);
// We register only NHWC assuming that model is consistent e.g. either
// NHWC or NCHW
paddle::platform::MKLDNNDeviceContext::tls()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ namespace ir {
using string::PrettyLogDetail;

void ConvActivationMkldnnFusePass::ApplyImpl(Graph* graph) const {
auto act_types = paddle::platform::GetSupportedActivations();
auto act_types = phi::funcs::GetSupportedActivations();
std::vector<std::string> conv_types = {"conv2d"};

for (auto& act_type : act_types) {
Expand Down Expand Up @@ -64,7 +64,7 @@ void ConvActivationMkldnnFusePass::FuseConvAct(Graph* graph,
OpDesc* conv_op = conv->Op();
OpDesc* act_op = activation->Op();

auto attr_map = paddle::platform::GetAttributeMap(act_type);
auto attr_map = phi::funcs::GetAttributeMap(act_type);
for (const auto& attrs : attr_map) {
if (act_op->HasAttr(attrs.first)) {
conv_op->SetAttr(attrs.second, act_op->GetAttr(attrs.first));
Expand Down Expand Up @@ -145,7 +145,7 @@ void ConvActivationMkldnnFusePass::FuseConvConcatAct(
OpDesc* conv_op = node->inputs[0]->Op();
OpDesc* act_op = activation_op->Op();

auto attr_map = paddle::platform::GetAttributeMap(act_type);
auto attr_map = phi::funcs::GetAttributeMap(act_type);
for (const auto& attrs : attr_map) {
if (act_op->HasAttr(attrs.first)) {
conv_op->SetAttr(attrs.second, act_op->GetAttr(attrs.first));
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/ir/mkldnn/elt_act_mkldnn_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ namespace ir {
using string::PrettyLogDetail;

void ElementwiseActivationOneDNNPass::ApplyImpl(Graph *graph) const {
auto act_types = paddle::platform::GetSupportedActivations();
auto act_types = phi::funcs::GetSupportedActivations();
std::vector<std::string> elt_types = {
"elementwise_add", "elementwise_sub", "elementwise_mul"};

Expand Down Expand Up @@ -76,7 +76,7 @@ void ElementwiseActivationOneDNNPass::FuseElementwiseAct(
}

auto *activation_op = activation->Op();
auto attr_map = paddle::platform::GetAttributeMap(act_type);
auto attr_map = phi::funcs::GetAttributeMap(act_type);
for (const auto &attr : attr_map) {
if (activation_op->HasAttr(attr.first)) {
elementwise_op->SetAttr(attr.second,
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/ir/mkldnn/fc_act_mkldnn_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ namespace ir {
using string::PrettyLogDetail;

void FuseFCActOneDNNPass::ApplyImpl(Graph *graph) const {
auto act_types = paddle::platform::GetSupportedActivations();
auto act_types = phi::funcs::GetSupportedActivations();

for (auto act_type : act_types) FuseFCAct(graph, act_type);
}
Expand Down Expand Up @@ -61,7 +61,7 @@ void FuseFCActOneDNNPass::FuseFCAct(Graph *graph,
"is used."));
}

auto attr_map = paddle::platform::GetAttributeMap(act_type);
auto attr_map = phi::funcs::GetAttributeMap(act_type);
for (const auto &attr : attr_map) {
if (act_op->HasAttr(attr.first)) {
fc_op->SetAttr(attr.second, act_op->GetAttr(attr.first));
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/ir/mkldnn/interpolate_mkldnn_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ namespace ir {

class Graph;

void InterpolateMKLDNNPass::ApplyImpl(ir::Graph* graph) const {
void InterpolateOneDNNPass::ApplyImpl(ir::Graph* graph) const {
PADDLE_ENFORCE_NOT_NULL(graph,
platform::errors::InvalidArgument(
"Pointer to graph argument should not be NULL."));
Expand Down Expand Up @@ -70,4 +70,4 @@ void InterpolateMKLDNNPass::ApplyImpl(ir::Graph* graph) const {
} // namespace paddle

REGISTER_PASS(interpolate_mkldnn_pass,
paddle::framework::ir::InterpolateMKLDNNPass);
paddle::framework::ir::InterpolateOneDNNPass);
4 changes: 2 additions & 2 deletions paddle/fluid/framework/ir/mkldnn/interpolate_mkldnn_pass.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,9 @@ namespace ir {
*/
class Graph;

class InterpolateMKLDNNPass : public FusePassBase {
class InterpolateOneDNNPass : public FusePassBase {
public:
virtual ~InterpolateMKLDNNPass() {}
virtual ~InterpolateOneDNNPass() {}

protected:
void ApplyImpl(ir::Graph* graph) const override;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ namespace ir {
using string::PrettyLogDetail;

void MatmulActivationMkldnnFusePass::ApplyImpl(Graph* graph) const {
auto act_types = paddle::platform::GetSupportedActivations();
auto act_types = phi::funcs::GetSupportedActivations();
auto matmul_types = {"matmul", "matmul_v2"};

for (const auto& matmul_type : matmul_types)
Expand Down Expand Up @@ -64,7 +64,7 @@ void MatmulActivationMkldnnFusePass::FuseMatmulAct(
OpDesc* matmul_op = matmul->Op();
OpDesc* act_op = activation->Op();

auto attr_map = paddle::platform::GetAttributeMap(act_type);
auto attr_map = phi::funcs::GetAttributeMap(act_type);
for (const auto& attrs : attr_map) {
if (act_op->HasAttr(attrs.first)) {
matmul_op->SetAttr(attrs.second, act_op->GetAttr(attrs.first));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ namespace ir {
using string::PrettyLogDetail;

void SoftplusActivationOneDNNPass::ApplyImpl(Graph *graph) const {
auto act_types = paddle::platform::GetSupportedActivations();
auto act_types = phi::funcs::GetSupportedActivations();

// Currently softplus can't be fused with hard_sigmoid
act_types.erase(
Expand Down Expand Up @@ -75,7 +75,7 @@ void SoftplusActivationOneDNNPass::FuseSoftplusActivation(
}

auto *activation_op = activation->Op();
auto attr_map = paddle::platform::GetAttributeMap(act_type);
auto attr_map = phi::funcs::GetAttributeMap(act_type);
for (const auto &attr : attr_map) {
if (activation_op->HasAttr(attr.first)) {
softplus_op->SetAttr(attr.second, activation_op->GetAttr(attr.first));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ std::shared_ptr<OperatorBase> TransferLayout(const std::string& var_name,
#ifdef PADDLE_WITH_MKLDNN

// NOTE(zhiqiu): hot fix, follow the same logic in DataCopy() in fetch_op.cc
if (in_layout == phi::DataLayout::kMKLDNN &&
if (in_layout == phi::DataLayout::ONEDNN &&
var_name == framework::GradVarName("Filter") && is_fetch_v2) {
VLOG(4) << "Match special case(Filter && fetch_v2) " << var_name;
out_layout = phi::DataLayout::kNCHW;
Expand Down Expand Up @@ -484,9 +484,9 @@ void ApplyDataTransform(const OpKernelType& expected_kernel_key,
// MKL-DNN shape of Var may differ from kNHWC Var
// In such situation corressponding resized Var
// has to be created and registered
if ((tensor_in->layout() == DataLayout::kMKLDNN) &&
if ((tensor_in->layout() == DataLayout::ONEDNN) &&
(var->IsType<phi::DenseTensor>() == true) &&
(expected_kernel_key.data_layout_ != DataLayout::kMKLDNN) &&
(expected_kernel_key.data_layout_ != DataLayout::ONEDNN) &&
(paddle::platform::MKLDNNDeviceContext::tls()
.get_cur_paddle_data_layout() == DataLayout::kNHWC)) {
VLOG(7) << "Created reshaped dummy input based on MKL-DNN "
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/framework/new_executor/new_executor_defs.cc
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ void InterpretercoreInferShapeContext::ShareAllLoD(
auto* out_tensor = out_var->GetMutable<phi::DenseTensor>();
out_tensor->set_lod(in_tensor.lod());
#ifdef PADDLE_WITH_MKLDNN
if (in_tensor.layout() != DataLayout::kMKLDNN)
if (in_tensor.layout() != DataLayout::ONEDNN)
#endif
out_tensor->set_layout(in_tensor.layout());
}
Expand Down Expand Up @@ -309,7 +309,7 @@ void InterpretercoreInferShapeContext::ShareLoD(const std::string& in,
// This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
// OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
// in Compute()
if (in_tensor.layout() != DataLayout::kMKLDNN)
if (in_tensor.layout() != DataLayout::ONEDNN)
#endif
out_tensor->set_layout(in_tensor.layout());
}
Expand Down Expand Up @@ -338,7 +338,7 @@ bool InterpretercoreInferShapeContext::IsRunMKLDNNKernel() const {
auto& op_with_kernel = dynamic_cast<const OperatorWithKernel&>(op_);
return ((op_with_kernel.kernel_type()) &&
(op_with_kernel.kernel_type()->data_layout_ ==
phi::DataLayout::kMKLDNN));
phi::DataLayout::ONEDNN));
} catch (std::bad_cast& exp) {
return false;
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/op_kernel_type.h
Original file line number Diff line number Diff line change
Expand Up @@ -102,8 +102,8 @@ inline bool NeedTransformLayout(const DataLayout& l, const DataLayout& r) {
(l != DataLayout::kAnyLayout && r != DataLayout::kAnyLayout && l != r);
#ifdef PADDLE_WITH_MKLDNN
// Layout transform needed for either non-MKLDNN to MKLDNN or vice versa
ret |= (l != DataLayout::kMKLDNN && r == DataLayout::kMKLDNN);
ret |= (l == DataLayout::kMKLDNN && r != DataLayout::kMKLDNN);
ret |= (l != DataLayout::ONEDNN && r == DataLayout::ONEDNN);
ret |= (l == DataLayout::ONEDNN && r != DataLayout::ONEDNN);
#endif
return ret;
}
Expand Down
Loading