Skip to content

Commit

Permalink
fix expand op (PaddlePaddle#169)
Browse files Browse the repository at this point in the history
* fix expand op

* prepare once
  • Loading branch information
gglin001 authored Sep 18, 2021
1 parent aa85ec2 commit b19499e
Show file tree
Hide file tree
Showing 8 changed files with 35 additions and 32 deletions.
15 changes: 8 additions & 7 deletions paddle/fluid/framework/ipu/ipu_backend.cc
Original file line number Diff line number Diff line change
Expand Up @@ -59,31 +59,32 @@ void IpuBackend::Compile(ir::Graph* graph,
compiler_->LowerWeights(graph, scope_);
compiler_->LowerBody(graph);
compiler_->InitOutputs(fetch_list);
executor_->SetOutputsShape(compiler_->GetOutputsShape());
executor_->SetOutputTensorId(compiler_->GetOutputTensors());
executor_->SetWeights(compiler_->GetWeights());
VLOG(10) << "leave IpuBackend::Compile";
}

void IpuBackend::Run(const std::vector<const Tensor*>& inputs,
const std::vector<Tensor*>& outputs) {
if (!is_prepared_) {
Prepare();
is_prepared_ = true;
}

Prepare();
auto inputs_id = compiler_->GetInputs();
auto outputs_id = compiler_->GetOutputs();
executor_->Run(inputs_id, inputs, outputs_id, outputs);
}

void IpuBackend::Prepare() {
if (is_prepared_) {
return;
} else {
is_prepared_ = true;
}
auto proto = compiler_->GetModelProto();
auto tensors = compiler_->GetTensors();
auto outputs = compiler_->GetOutputs();
executor_->Prepare(proto, tensors, outputs, device_);
}

void IpuBackend::SetScope(Scope& scope) {
void IpuBackend::SetScope(const Scope& scope) {
scope_ = &scope;
executor_->SetScope(&scope);
}
Expand Down
10 changes: 6 additions & 4 deletions paddle/fluid/framework/ipu/ipu_backend.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,16 +53,19 @@ class IpuBackend {
void Compile(ir::Graph *graph, const std::vector<std::string> &feed_list,
const std::vector<std::string> &fetch_list);

// need doc
void Prepare();

// what run does include:
// 1. construct forward onnx graph
// 2. graph-level optimization
// 3. autodiff
void Run(const std::vector<const Tensor *> &inputs,
const std::vector<Tensor *> &outputs);

Executor &GetExecutor() { return *executor_; };
Executor &GetExecutor() { return *executor_; }

void SetScope(Scope &scope);
void SetScope(const Scope &scope);
void SetIpuStrategy(const IpuStrategy &strategy);
const IpuStrategy *GetIpuStrategy();

Expand All @@ -74,7 +77,6 @@ class IpuBackend {
bool DeviceIsAttached();

private:
void Prepare();
int UpperIpuNum();

private:
Expand All @@ -84,7 +86,7 @@ class IpuBackend {
bool is_prepared_ = false;

// not own
Scope *scope_ = nullptr;
const Scope *scope_ = nullptr;
const IpuStrategy *ipu_strategy_ = nullptr;
};

Expand Down
10 changes: 5 additions & 5 deletions paddle/fluid/framework/ipu/ipu_compiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -367,13 +367,13 @@ std::vector<int64_t> Compiler::GetTensorShape(const std::string& name) {
return builder_->getTensorShape(tensors_[name]);
}

std::map<std::string, std::vector<int64_t>> Compiler::GetOutputsShape() {
std::map<std::string, std::vector<int64_t>> outputs_shape;
std::map<std::string, std::string> Compiler::GetOutputTensors() {
std::map<std::string, std::string> outputs;
for (const auto& fetch_name : fetch_list_) {
auto shape = GetTensorShape(fetch_name);
outputs_shape[fetch_name] = shape;
auto tensorid = tensors_[fetch_name];
outputs[fetch_name] = tensorid;
}
return outputs_shape;
return outputs;
}

std::vector<popart::TensorId>& Compiler::GetWeights() { return weights_; }
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ipu/ipu_compiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ class Compiler {
std::vector<popart::TensorId> GetOutputs() { return outputs_; }
std::map<std::string, popart::TensorId> GetTensors() { return tensors_; }
std::vector<int64_t> GetTensorShape(const std::string &name);
std::map<std::string, std::vector<int64_t>> GetOutputsShape();
std::map<std::string, std::string> GetOutputTensors();
std::vector<popart::TensorId> &GetWeights();

std::string GetModelProto();
Expand Down
14 changes: 7 additions & 7 deletions paddle/fluid/framework/ipu/ipu_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ void Executor::SetWeightsIO() {
beta1_acc *= beta1;
step += 1;
}
data_ptr[0] = (float)step;
data_ptr[0] = static_cast<float>(step);
}
auto tensor_info = session_->getInfo(popart_var_name);
weights_io_.insert(popart_var_name, {data_ptr, tensor_info});
Expand All @@ -191,15 +191,15 @@ void Executor::SetIpuStrategy(const IpuStrategy &strategy) {
ipu_strategy_ = &strategy;
}

void Executor::SetOutputsShape(
const std::map<std::string, std::vector<int64_t>> &info) {
for (const auto &pair : info) {
outputs_shape_[pair.first] = pair.second;
}
void Executor::SetOutputTensorId(
const std::map<std::string, std::string> &outputs) {
outputs_ = outputs;
}

std::vector<int64_t> Executor::GetOutputShape(const std::string &fetch_name) {
auto output_shape = outputs_shape_.at(fetch_name);
auto tensor_id = outputs_[fetch_name];
auto fetch_info = session_->getInfo(tensor_id);
auto output_shape = fetch_info.shape();
if (ipu_strategy_->batches_per_step > 1) {
output_shape.insert(output_shape.begin(), ipu_strategy_->batches_per_step);
}
Expand Down
12 changes: 6 additions & 6 deletions paddle/fluid/framework/ipu/ipu_executor.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,13 +56,13 @@ class Executor {
void WeightsToPaddle();

// Scope
void SetScope(Scope *scope) { scope_ = scope; }
void SetScope(const Scope *scope) { scope_ = scope; }

// Strategy
void SetIpuStrategy(const IpuStrategy &strategy);

// Outputs
void SetOutputsShape(const std::map<std::string, std::vector<int64_t>> &info);
void SetOutputTensorId(const std::map<std::string, std::string> &outputs);
std::vector<int64_t> GetOutputShape(const std::string &fetch_name);

private:
Expand All @@ -73,13 +73,13 @@ class Executor {
std::unique_ptr<popart::Session> session_;

private:
Scope *scope_ = nullptr;
const Scope *scope_ = nullptr;
const IpuStrategy *ipu_strategy_ = nullptr;
popart::WeightsIO weights_io_;
std::vector<popart::TensorId> weights_;
std::map<std::string, std::vector<int64_t>> outputs_shape_;
std::map<std::string, std::string> outputs_;
};

} // namespace paddle
} // namespace framework
} // namespace ipu
} // namespace framework
} // namespace paddle
2 changes: 2 additions & 0 deletions paddle/fluid/operators/ipu_runtime_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@ class IpuRuntimeKernel : public framework::OpKernel<T> {
ctx.device_context());
ipu_backend->AttachDevice(ipu_ctx.DeviceId());
}
VLOG(4) << "IpuBackend prepare session";
ipu_backend->Prepare();
VLOG(4) << "IpuRuntime Kernel, begin to run graph";
auto inputs = ctx.MultiInput<framework::Tensor>("FeedList");
auto outputs = ctx.MultiOutput<framework::Tensor>("FetchList");
Expand Down
2 changes: 0 additions & 2 deletions python/paddle/fluid/tests/unittests/ipu/test_expand_op_ipu.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,8 +97,6 @@ def test_base(self):
self.assertTrue(res0.shape == res1.shape)


@unittest.skip("repeats is int32 in paddle, but int64 in popart")
# TODO(alleng) add a pass for this case
class TestCase1(TestBase):
def set_feed(self):
self.feed = {"x": np.random.uniform(size=[2, 2]).astype('float32')}
Expand Down

0 comments on commit b19499e

Please sign in to comment.