Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement basic Load() and modify example based on updated inference design #7690

Merged
merged 18 commits into from
Jan 30, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 25 additions & 0 deletions paddle/framework/program_desc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,9 @@ limitations under the License. */
namespace paddle {
namespace framework {

const std::string kFeedOpType = "feed";
const std::string kFetchOpType = "fetch";

BlockDesc *ProgramDesc::AppendBlock(const BlockDesc &parent) {
auto *b = desc_.add_blocks();
b->set_parent_idx(parent.ID());
Expand Down Expand Up @@ -64,5 +67,27 @@ ProgramDesc::ProgramDesc(const std::string &binary_str) {
}
}

const std::vector<std::string> ProgramDesc::GetFeedVarNames() {
BlockDesc *global_block = blocks_[0].get();
std::vector<std::string> feed_var_names;
for (auto *op : global_block->AllOps()) {
if (op->Type() == "feed") {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

feed -> kFeedOpType
and let's rename feed_var_names to feed_target_names if there is no better candidate.

feed_var_names.insert(feed_var_names.begin(), op->Output("Out")[0]);
}
}
return feed_var_names;
}

const std::vector<std::string> ProgramDesc::GetFetchVarNames() {
BlockDesc *global_block = blocks_[0].get();
std::vector<std::string> fetch_var_names;
for (auto *op : global_block->AllOps()) {
if (op->Type() == "fetch") {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fetch -> kFetchOpType
and let's rename fetch_var_names to fetch_target_names if there is no better candidate.

fetch_var_names.push_back(op->Input("X")[0]);
}
}
return fetch_var_names;
}

} // namespace framework
} // namespace paddle
4 changes: 4 additions & 0 deletions paddle/framework/program_desc.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,10 @@ class ProgramDesc {

proto::ProgramDesc *Proto();

const std::vector<std::string> GetFeedVarNames();

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

remove this blank line.

const std::vector<std::string> GetFetchVarNames();

private:
proto::ProgramDesc desc_;

Expand Down
6 changes: 3 additions & 3 deletions paddle/inference/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
set(FLUID_CORE_MODULES proto_desc paddle_memory executor prune init)

cc_library(paddle_fluid_api
SRCS inference.cc
SRCS io.cc
DEPS ${FLUID_CORE_MODULES} ${GLOB_OP_LIB})

# Merge all modules into a single static library
cc_library(paddle_fluid DEPS paddle_fluid_api ${FLUID_CORE_MODULES} ${GLOB_OP_LIB})

# Create shared library
add_library(paddle_fluid_shared SHARED inference.cc)
add_library(paddle_fluid_shared SHARED io.cc)

target_circle_link_libraries(paddle_fluid_shared
ARCHIVE_START
Expand All @@ -20,7 +20,7 @@ SET_TARGET_PROPERTIES(paddle_fluid_shared PROPERTIES OUTPUT_NAME paddle_fluid)

# install library & headers
if(NOT WITH_C_API AND WITH_FLUID)
install(FILES inference.h DESTINATION include/paddle/inference)
install(FILES io.h DESTINATION include/paddle/inference)
install(TARGETS paddle_fluid_shared DESTINATION lib)
endif()

Expand Down
50 changes: 44 additions & 6 deletions paddle/inference/example.cc
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand All @@ -15,7 +15,9 @@ limitations under the License. */
#include <time.h>
#include <iostream>
#include "gflags/gflags.h"
#include "paddle/inference/inference.h"
#include "paddle/framework/init.h"
#include "paddle/framework/lod_tensor.h"
#include "paddle/inference/io.h"

DEFINE_string(dirname, "", "Directory of the inference model.");

Expand All @@ -28,12 +30,27 @@ int main(int argc, char** argv) {
exit(1);
}

// 1. Define place, executor, scope
auto place = paddle::platform::CPUPlace();
paddle::framework::InitDevices();
auto* executor = new paddle::framework::Executor(place);
auto* scope = new paddle::framework::Scope();

std::cout << "FLAGS_dirname: " << FLAGS_dirname << std::endl;
std::string dirname = FLAGS_dirname;

paddle::InferenceEngine* engine = new paddle::InferenceEngine();
engine->LoadInferenceModel(dirname);
// 2. Initialize the inference program
auto* inference_program = paddle::inference::Load(*executor, *scope, dirname);

// 3. Optional: perform optimization on the inference_program

// 4. Get the feed_var_names and fetch_var_names
const std::vector<std::string>& feed_var_names =
inference_program->GetFeedVarNames();
const std::vector<std::string>& fetch_var_names =
inference_program->GetFetchVarNames();

// 5. Generate input
paddle::framework::LoDTensor input;
srand(time(0));
float* input_ptr =
Expand All @@ -45,8 +62,26 @@ int main(int argc, char** argv) {
std::vector<paddle::framework::LoDTensor> feeds;
feeds.push_back(input);
std::vector<paddle::framework::LoDTensor> fetchs;
engine->Execute(feeds, fetchs);

// Set up maps for feed and fetch targets
std::map<std::string, const paddle::framework::LoDTensor*> feed_targets;
std::map<std::string, paddle::framework::LoDTensor*> fetch_targets;

// set_feed_variable
for (size_t i = 0; i < feed_var_names.size(); ++i) {
feed_targets[feed_var_names[i]] = &feeds[i];
}

// get_fetch_variable
fetchs.resize(fetch_var_names.size());
for (size_t i = 0; i < fetch_var_names.size(); ++i) {
fetch_targets[fetch_var_names[i]] = &fetchs[i];
}

// Run the inference program
executor->Run(*inference_program, scope, feed_targets, fetch_targets);

// Get outputs
for (size_t i = 0; i < fetchs.size(); ++i) {
auto dims_i = fetchs[i].dims();
std::cout << "dims_i:";
Expand All @@ -62,6 +97,9 @@ int main(int argc, char** argv) {
std::cout << std::endl;
}

delete engine;
delete inference_program;
delete scope;
delete executor;

return 0;
}
187 changes: 0 additions & 187 deletions paddle/inference/inference.cc

This file was deleted.

48 changes: 0 additions & 48 deletions paddle/inference/inference.h

This file was deleted.

Loading