Skip to content

Commit

Permalink
add popart_canonicalization_pass (PaddlePaddle#13)
Browse files Browse the repository at this point in the history
  • Loading branch information
gglin001 authored Aug 3, 2021
1 parent d19a32d commit a03acc3
Show file tree
Hide file tree
Showing 12 changed files with 497 additions and 29 deletions.
7 changes: 7 additions & 0 deletions paddle/fluid/framework/ipu/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,9 @@
set(POPART_CANONICALIZATION_HANDLERS_SRC
"popart_canonicalization/other_ops.cpp"
)
# TODO(alleng) build static library
cc_library(popart_canonicalization_utils SHARED SRCS popart_canonicalization_utils.cc
${POPART_CANONICALIZATION_HANDLERS_SRC} DEPS framework_proto enforce)

cc_library(ipu_utils SRCS ipu_utils.cc DEPS memory framework_proto popart)
cc_library(ipu_backend SRCS ipu_backend.cc DEPS popart graph framework_proto enforce ipu_utils)
112 changes: 87 additions & 25 deletions paddle/fluid/framework/ipu/ipu_backend.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ limitations under the License. */
#include "paddle/fluid/framework/ipu/ipu_backend.h"

#include <algorithm>
#include <vector>

#include <popart/builder.hpp>
#include <popart/dataflow.hpp>
#include <popart/devicemanager.hpp>
Expand All @@ -25,12 +27,12 @@ limitations under the License. */
#include <popart/stepio.hpp>
#include <popart/tensor.hpp>
#include <popart/tensorinfo.hpp>
#include <vector>

#include "paddle/fluid/framework/feed_fetch_type.h"
#include "paddle/fluid/framework/framework.pb.h"
#include "paddle/fluid/framework/ipu/ipu_utils.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/ir/graph_helper.h"
#include "paddle/fluid/framework/ir/node.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/platform/enforce.h"
Expand Down Expand Up @@ -68,26 +70,7 @@ void IpuBackend::Compile(ir::Graph* graph,
}
}

for (const ir::Node* n : graph->Nodes()) {
if (n->IsOp()) {
auto* op_desc = n->Op();
if (op_desc->Type() == "elementwise_add") {
if (inputs_.size() != 2) {
PADDLE_THROW(platform::errors::InvalidArgument("Invalid inputs."));
}
VLOG(1) << "found elementwise_add op";
popart::TensorId lhs = inputs_[0];
popart::TensorId rhs = inputs_[1];
VLOG(1) << "popart add lhs tensor id = " << lhs;
VLOG(1) << "popart add rhs tensor id = " << rhs;
popart::TensorId result = builder_->aiOnnxOpset11().add({lhs, rhs});
VLOG(1) << "popart add result tensor id = " << result;
tensors_.emplace(fetch_list[0], result);
} else {
PADDLE_THROW(platform::errors::Unimplemented("Unimplemented."));
}
}
}
LowerBody(graph);

VLOG(1) << "-- fetch_list --";
for (const auto& fetch_name : fetch_list) {
Expand All @@ -105,7 +88,9 @@ void IpuBackend::Compile(ir::Graph* graph,
builder_->addOutputTensor(tensor->second);
outputs_.push_back(tensor->second);
}
}

void IpuBackend::Prepare() {
VLOG(1) << "Save Model to file paddle_model.onnx ...\n";
builder_->saveModelProto("paddle_model.onnx");

Expand Down Expand Up @@ -137,10 +122,11 @@ void IpuBackend::Compile(ir::Graph* graph,

void IpuBackend::Run(const std::vector<const Tensor*>& inputs,
std::vector<Tensor*>& outputs) {
// Prepare input tensor
Prepare();

std::map<popart::TensorId, popart::IArray&> popart_inputs;
std::map<popart::TensorId, popart::NDArrayWrapper<float>> input_wrappers;

// Prepare input tensor
for (size_t i = 0; i < inputs.size(); i++) {
auto tensor_id = inputs_[i];
const Tensor* tensor = inputs[i];
Expand All @@ -151,7 +137,6 @@ void IpuBackend::Run(const std::vector<const Tensor*>& inputs,
input_wrappers.emplace(tensor_id, std::move(data));
popart_inputs.emplace(tensor_id, input_wrappers.at(tensor_id));
}

// Prepare output tensor
std::map<popart::TensorId, popart::IArray&> popart_anchors;
std::map<popart::TensorId, popart::NDArrayWrapper<float>> anchor_wrappers;
Expand All @@ -173,5 +158,82 @@ void IpuBackend::Run(const std::vector<const Tensor*>& inputs,
VLOG(1) << "Running...done";
}

std::vector<std::string> IpuBackend::GetOpInputs(const OpDesc* op) {
auto inputs_ = op->Input("__inputs__");
std::vector<std::string> inputs;
for (const auto& in : inputs_) {
if (tensors_.find(in) != tensors_.end()) {
inputs.push_back(tensors_[in]);
} else {
inputs.push_back(in);
}
}
return inputs;
}

void IpuBackend::LowerBody(const ir::Graph* graph) {
auto nodes = TopologySortOperations(*graph);
for (const auto* node : nodes) {
auto* op = node->Op();
auto op_type = op->Type();
if (op_type == "RandomUniform") {
auto outputs = op->Output("__outputs__");
auto shape = BOOST_GET_CONST(std::vector<int64_t>, op->GetAttr("shape"));
auto dtype = BOOST_GET_CONST(int, op->GetAttr("dtype"));
auto high = BOOST_GET_CONST(float, op->GetAttr("high"));
auto low = BOOST_GET_CONST(float, op->GetAttr("low"));
popart::TensorId result =
builder_->aiOnnxOpset11().randomuniform(shape, dtype, high, low);
tensors_.emplace(outputs[0], result);
} else if (op_type == "RandomNormal") {
auto outputs = op->Output("__outputs__");
auto shape = BOOST_GET_CONST(std::vector<int64_t>, op->GetAttr("shape"));
auto dtype = BOOST_GET_CONST(int, op->GetAttr("dtype"));
auto mean = BOOST_GET_CONST(float, op->GetAttr("mean"));
auto scale = BOOST_GET_CONST(float, op->GetAttr("scale"));
popart::TensorId result =
builder_->aiOnnxOpset11().randomnormal(shape, dtype, mean, scale);
tensors_.emplace(outputs[0], result);
} else if (op_type == "ConstantOfShape") {
// TODO(alleng) use RandomUniform for now
auto outputs = op->Output("__outputs__");
auto shape = BOOST_GET_CONST(std::vector<int64_t>, op->GetAttr("shape"));
auto dtype = BOOST_GET_CONST(int, op->GetAttr("dtype"));
auto high = 1.0f;
auto low = 0.0f;
popart::TensorId result =
builder_->aiOnnxOpset11().randomuniform(shape, dtype, high, low);
tensors_.emplace(outputs[0], result);
} else if (op_type == "Add") {
auto inputs = GetOpInputs(op);
auto outputs = op->Output("__outputs__");
popart::TensorId result = builder_->aiOnnxOpset11().add(inputs);
tensors_.emplace(outputs[0], result);
} else if (op_type == "Conv") {
auto inputs = GetOpInputs(op);
auto outputs = op->Output("__outputs__");
auto dilations =
BOOST_GET_CONST(std::vector<int64_t>, op->GetAttr("dilations"));
auto group = BOOST_GET_CONST(int64_t, op->GetAttr("group"));
auto pads = BOOST_GET_CONST(std::vector<int64_t>, op->GetAttr("pads"));
auto strides =
BOOST_GET_CONST(std::vector<int64_t>, op->GetAttr("strides"));
popart::TensorId result = builder_->aiOnnxOpset11().conv(
inputs, dilations, group, {}, pads, strides);
tensors_.emplace(outputs[0], result);
} else if (op_type == "ReduceMean") {
auto inputs = GetOpInputs(op);
auto outputs = op->Output("__outputs__");
auto axes = BOOST_GET_CONST(std::vector<int64_t>, op->GetAttr("axes"));
auto keepdims = BOOST_GET_CONST(int64_t, op->GetAttr("keepdims"));
popart::TensorId result =
builder_->aiOnnxOpset11().reducemean(inputs, axes, keepdims);
tensors_.emplace(outputs[0], result);
} else {
PADDLE_THROW(platform::errors::Unimplemented("Unimplemented."));
}
}
}

} // namespace framework
} // namespace paddle
} // namespace paddle
10 changes: 6 additions & 4 deletions paddle/fluid/framework/ipu/ipu_backend.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ class IpuBackend {
optimizer_.attrs[attr] = value;
}

std::vector<int64_t> GetTensorShape(const std::string& var_name) {
std::vector<int64_t> GetTensorShape(const std::string &var_name) {
return builder_->getTensorShape(tensors_[var_name]);
}

Expand All @@ -78,9 +78,11 @@ class IpuBackend {
}

private:
// std::map<std::string, popart::TensorId> inputs_;
// std::map<std::string, popart::TensorId> outputs_;
// std::map<std::string, popart::TensorId> tensors_;
void Prepare();
void LowerBody(const ir::Graph *);
std::vector<std::string> GetOpInputs(const OpDesc *op);

private:
Optimizer optimizer_;

std::vector<popart::TensorId> inputs_;
Expand Down
195 changes: 195 additions & 0 deletions paddle/fluid/framework/ipu/popart_canonicalization/other_ops.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,195 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/framework/ipu/popart_canonicalization_utils.h"
#include "paddle/fluid/platform/enforce.h"

namespace paddle {
namespace framework {
namespace {

ir::Node *conv2d_handler(ir::Graph *graph, ir::Node *node) {
auto *op = node->Op();
auto op_desc = std::make_unique<framework::OpDesc>();
op_desc->SetType("Conv");

std::vector<std::string> inputs;
inputs.push_back(op->Input("Input").front());
inputs.push_back(op->Input("Filter").front());
if (op->HasInput("Bias")) {
if (!op->Input("Bias").empty()) {
inputs.push_back(op->Input("Bias").front());
}
}
op_desc->SetInput("__inputs__", inputs);
std::vector<std::string> outputs;
outputs.push_back(op->Output("Output").front());
op_desc->SetOutput("__outputs__", outputs);

auto dilations_ = BOOST_GET_CONST(std::vector<int>, op->GetAttr("dilations"));
auto dilations = std::vector<int64_t>{dilations_.begin(), dilations_.end()};
auto group_ = BOOST_GET_CONST(int, op->GetAttr("groups"));
auto group = int64_t{group_};
// auto paddings_ = BOOST_GET_CONST(std::vector<int>,
// op->GetAttr("paddings")); auto pads =
// std::vector<int64_t>{paddings_.begin(), paddings_.end()};
auto pads = std::vector<int64_t>{1, 1, 1, 1};
auto stride_ = BOOST_GET_CONST(std::vector<int>, op->GetAttr("strides"));
auto stride = std::vector<int64_t>{stride_.begin(), stride_.end()};
op_desc->SetAttr("dilations", dilations);
op_desc->SetAttr("group", group);
op_desc->SetAttr("pads", pads);
op_desc->SetAttr("strides", stride);

op_desc->Flush();
return graph->CreateOpNode(op_desc.get());
}

ir::Node *elementwise_add_handler(ir::Graph *graph, ir::Node *node) {
auto *op = node->Op();
auto op_desc = std::make_unique<framework::OpDesc>();
op_desc->SetType("Add");

std::vector<std::string> inputs;
inputs.push_back(op->Input("X").front());
inputs.push_back(op->Input("Y").front());
op_desc->SetInput("__inputs__", inputs);
std::vector<std::string> outputs;
outputs.push_back(op->Output("Out").front());
op_desc->SetOutput("__outputs__", outputs);

op_desc->Flush();
return graph->CreateOpNode(op_desc.get());
}

ir::Node *reduce_mean_handler(ir::Graph *graph, ir::Node *node) {
auto *op = node->Op();
auto op_desc = std::make_unique<framework::OpDesc>();
op_desc->SetType("ReduceMean");

std::vector<std::string> inputs;
inputs.push_back(op->Input("X").front());
op_desc->SetInput("__inputs__", inputs);
std::vector<std::string> outputs;
outputs.push_back(op->Output("Out").front());
op_desc->SetOutput("__outputs__", outputs);

auto axes_ = BOOST_GET_CONST(std::vector<int>, op->GetAttr("dim"));
auto axes = std::vector<int64_t>{axes_.begin(), axes_.end()};
op_desc->SetAttr("axes", axes);
auto keepdims_ = BOOST_GET_CONST(bool, op->GetAttr("keep_dim"));
auto keepdims = int64_t{keepdims_};
op_desc->SetAttr("keepdims", keepdims);

op_desc->Flush();
return graph->CreateOpNode(op_desc.get());
}

ir::Node *uniform_random_handler(ir::Graph *graph, ir::Node *node) {
auto *op = node->Op();
auto op_desc = std::make_unique<framework::OpDesc>();
op_desc->SetType("RandomUniform");

std::vector<std::string> outputs;
outputs.push_back(op->Output("Out").front());
op_desc->SetOutput("__outputs__", outputs);

auto shape = BOOST_GET_CONST(std::vector<int64_t>, op->GetAttr("shape"));
op_desc->SetAttr("shape", shape);
// auto dtype = BOOST_GET_CONST(int, op->GetAttr("dtype"));
op_desc->SetAttr("dtype", 1);
// cvt dtype
/*
enum Type {
// Pod Types
BOOL = 0;
INT16 = 1;
INT32 = 2;
INT64 = 3;
FP16 = 4;
FP32 = 5;
FP64 = 6;
// Tensor<size_t> is used in C++.
SIZE_T = 19;
UINT8 = 20;
INT8 = 21;
BF16 = 22;
COMPLEX64 = 23;
COMPLEX128 = 24;
...
*/
auto max = BOOST_GET_CONST(float, op->GetAttr("max"));
op_desc->SetAttr("high", max);
auto min = BOOST_GET_CONST(float, op->GetAttr("min"));
op_desc->SetAttr("low", min);
// seed
op_desc->Flush();
return graph->CreateOpNode(op_desc.get());
}

ir::Node *gaussian_random_handler(ir::Graph *graph, ir::Node *node) {
auto *op = node->Op();
auto op_desc = std::make_unique<framework::OpDesc>();
op_desc->SetType("RandomNormal");

std::vector<std::string> outputs;
outputs.push_back(op->Output("Out").front());
op_desc->SetOutput("__outputs__", outputs);

auto shape = BOOST_GET_CONST(std::vector<int64_t>, op->GetAttr("shape"));
op_desc->SetAttr("shape", shape);
// auto dtype = BOOST_GET_CONST(int, op->GetAttr("dtype"));
op_desc->SetAttr("dtype", 1);

auto mean = BOOST_GET_CONST(float, op->GetAttr("mean"));
op_desc->SetAttr("mean", mean);
auto std = BOOST_GET_CONST(float, op->GetAttr("std"));
op_desc->SetAttr("scale", std);
// seed TODO

op_desc->Flush();
return graph->CreateOpNode(op_desc.get());
}

ir::Node *fill_constant_handler(ir::Graph *graph, ir::Node *node) {
auto *op = node->Op();
auto op_desc = std::make_unique<framework::OpDesc>();
op_desc->SetType("ConstantOfShape");

std::vector<std::string> outputs;
outputs.push_back(op->Output("Out").front());
op_desc->SetOutput("__outputs__", outputs);

auto shape = BOOST_GET_CONST(std::vector<int64_t>, op->GetAttr("shape"));
op_desc->SetAttr("shape", shape);
// auto dtype = BOOST_GET_CONST(int, op->GetAttr("dtype"));
op_desc->SetAttr("dtype", 1);

auto value = BOOST_GET_CONST(float, op->GetAttr("value"));
op_desc->SetAttr("value", value);

op_desc->Flush();
return graph->CreateOpNode(op_desc.get());
}

REGISTER_HANDLER(conv2d, conv2d_handler);
REGISTER_HANDLER(elementwise_add, elementwise_add_handler);
REGISTER_HANDLER(reduce_mean, reduce_mean_handler);
REGISTER_HANDLER(uniform_random, uniform_random_handler);
REGISTER_HANDLER(gaussian_random, gaussian_random_handler);
REGISTER_HANDLER(fill_constant, fill_constant_handler);

} // namespace
} // namespace framework
} // namespace paddle
Loading

0 comments on commit a03acc3

Please sign in to comment.