-
Notifications
You must be signed in to change notification settings - Fork 5.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
demo/mobilenet inference #11510
demo/mobilenet inference #11510
Changes from 6 commits
22aafcb
f5d0783
2e739cf
190a871
904297e
1c0bbfd
c34da73
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -14,3 +14,38 @@ | |
# | ||
|
||
inference_api_test(simple_on_word2vec ARGS test_word2vec) | ||
|
||
set(mobilenet_url "xxx") | ||
set(DEMO_INSTALL_DIR "${THIRD_PARTY_PATH}/inference_demo") | ||
|
||
|
||
function(inference_download_test_demo TARGET) | ||
if (NOT WITH_TESTING) | ||
return() | ||
endif() | ||
set(options "") | ||
set(oneValueArgs URL) | ||
set(multiValueArgs SRCS) | ||
cmake_parse_arguments(tests "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) | ||
|
||
set(test_dir "${DEMO_INSTALL_DIR}/${TARGET}") | ||
message(STATUS "inference demo ${test_dir}") | ||
|
||
if(NOT EXISTS "${test_dir}") | ||
message(STATUS "Download ${TARGET} model from ${tests_URL}") | ||
execute_process(COMMAND bash -c "mkdir -p ${test_dir}") | ||
execute_process(COMMAND bash -c "cd ${test_dir}; wget -q ${tests_URL}") | ||
execute_process(COMMAND bash -c "cd ${test_dir}; tar xzf *.tar.gz") | ||
endif() | ||
|
||
cc_test(${TARGET} SRCS "${tests_SRCS}" | ||
DEPS paddle_inference_api paddle_fluid | ||
ARGS --data="${test_dir}/data.txt" | ||
--modeldir="${test_dir}/model" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 43行和44行的引号不能加,去掉后可以跑通 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ok |
||
--fraction_of_gpu_memory_to_use=0.5) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. BTW, this is a GPU flag, maybe should not be place here as a common flag. Since I build with cpu only and it fails with |
||
endfunction() | ||
|
||
|
||
inference_download_test_demo (mobilenet_inference_demo | ||
SRCS mobilenet.cc | ||
URL http://paddlemodels.bj.bcebos.com/inference-vis-demos%2Fmobilenet.tar.gz) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Maybe you want use |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,19 @@ | ||
# Infernce Demos | ||
|
||
## MobileNet | ||
Input data format: | ||
|
||
- Each line contains a single record | ||
- Each record's format is | ||
|
||
``` | ||
<space splitted floats as data>\t<space splitted ints as shape> | ||
``` | ||
|
||
Follow the C++ codes in `mobilenet.cc`. | ||
|
||
To execute the demo, simply run | ||
|
||
```sh | ||
./mobilenet_inference_demo --modeldir <model> --data <datafile> | ||
``` |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,112 @@ | ||
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. | ||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. */ | ||
|
||
/* | ||
* This file contains a demo for mobilenet. | ||
* TODO(Superjomn) add some links of the actual models. | ||
*/ | ||
|
||
#include <gflags/gflags.h> | ||
#include <glog/logging.h> // use glog instead of PADDLE_ENFORCE to avoid importing other paddle header files. | ||
#include <gtest/gtest.h> | ||
#include <fstream> | ||
#include <iostream> | ||
#include "paddle/contrib/inference/demo/utils.h" | ||
#include "paddle/contrib/inference/paddle_inference_api.h" | ||
|
||
namespace paddle { | ||
namespace demo { | ||
|
||
DEFINE_string(modeldir, "", "Directory of the inference model."); | ||
DEFINE_string( | ||
data, | ||
"", | ||
"path of data; each line is a record, format is " | ||
"'<space splitted floats as data>\t<space splitted ints as shape'"); | ||
|
||
struct Record { | ||
std::vector<float> data; | ||
std::vector<int32_t> shape; | ||
}; | ||
|
||
void split(const std::string& str, char sep, std::vector<std::string>* pieces); | ||
|
||
Record ProcessALine(const std::string& line) { | ||
LOG(INFO) << "process a line"; | ||
std::vector<std::string> columns; | ||
split(line, '\t', &columns); | ||
CHECK_EQ(columns.size(), 2UL) | ||
<< "data format error, should be <data>\t<shape>"; | ||
|
||
Record record; | ||
std::vector<std::string> data_strs; | ||
split(columns[0], ' ', &data_strs); | ||
for (auto& d : data_strs) { | ||
record.data.push_back(std::stof(d)); | ||
} | ||
|
||
std::vector<std::string> shape_strs; | ||
split(columns[1], ' ', &shape_strs); | ||
for (auto& s : shape_strs) { | ||
record.shape.push_back(std::stoi(s)); | ||
} | ||
LOG(INFO) << "data size " << record.data.size(); | ||
LOG(INFO) << "data shape " << record.shape.size(); | ||
return record; | ||
} | ||
|
||
/* | ||
* Use the native fluid engine to inference the mobilenet. | ||
*/ | ||
void Main(bool use_gpu) { | ||
NativeConfig config; | ||
// config.model_dir = FLAGS_modeldir; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can remove comment. |
||
config.param_file = FLAGS_modeldir + "/__params__"; | ||
config.prog_file = FLAGS_modeldir + "/__model__"; | ||
config.use_gpu = use_gpu; | ||
config.fraction_of_gpu_memory = 0.15; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This should be |
||
config.device = 0; | ||
|
||
LOG(INFO) << "init predictor"; | ||
auto predictor = | ||
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config); | ||
|
||
LOG(INFO) << "begin to process data"; | ||
// Just a single batch of data. | ||
std::string line; | ||
std::ifstream file(FLAGS_data); | ||
std::getline(file, line); | ||
auto record = ProcessALine(line); | ||
file.close(); | ||
|
||
// Inference. | ||
PaddleTensor input{ | ||
.name = "xx", | ||
.shape = record.shape, | ||
.data = PaddleBuf(record.data.data(), record.data.size() * sizeof(float)), | ||
.dtype = PaddleDType::FLOAT32}; | ||
|
||
LOG(INFO) << "run executor"; | ||
std::vector<PaddleTensor> output; | ||
predictor->Run({input}, &output); | ||
|
||
LOG(INFO) << "output.size " << output.size(); | ||
auto& tensor = output.front(); | ||
LOG(INFO) << "output: " << SummaryTensor(tensor); | ||
} | ||
|
||
TEST(demo, mobilenet) { Main(false /*use_gpu*/); } | ||
|
||
} // namespace demo | ||
} // namespace paddle |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,68 @@ | ||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. | ||
// | ||
// Licensed under the Apache License, Version 2.0 (the "License"); | ||
// you may not use this file except in compliance with the License. | ||
// You may obtain a copy of the License at | ||
// | ||
// http://www.apache.org/licenses/LICENSE-2.0 | ||
// | ||
// Unless required by applicable law or agreed to in writing, software | ||
// distributed under the License is distributed on an "AS IS" BASIS, | ||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
// See the License for the specific language governing permissions and | ||
// limitations under the License. | ||
|
||
#pragma once | ||
#include <string> | ||
#include <vector> | ||
|
||
#include "paddle/contrib/inference/paddle_inference_api.h" | ||
|
||
namespace paddle { | ||
namespace demo { | ||
|
||
static void split(const std::string& str, | ||
char sep, | ||
std::vector<std::string>* pieces) { | ||
pieces->clear(); | ||
if (str.empty()) { | ||
return; | ||
} | ||
size_t pos = 0; | ||
size_t next = str.find(sep, pos); | ||
while (next != std::string::npos) { | ||
pieces->push_back(str.substr(pos, next - pos)); | ||
pos = next + 1; | ||
next = str.find(sep, pos); | ||
} | ||
if (!str.substr(pos).empty()) { | ||
pieces->push_back(str.substr(pos)); | ||
} | ||
} | ||
|
||
/* | ||
* Get a summary of a PaddleTensor content. | ||
*/ | ||
static std::string SummaryTensor(const PaddleTensor& tensor) { | ||
std::stringstream ss; | ||
int num_elems = tensor.data.length() / PaddleDtypeSize(tensor.dtype); | ||
|
||
ss << "data[:10]\t"; | ||
switch (tensor.dtype) { | ||
case PaddleDType::INT64: { | ||
for (int i = 0; i < std::min(num_elems, 10); i++) { | ||
ss << static_cast<int64_t*>(tensor.data.data())[i] << " "; | ||
} | ||
break; | ||
} | ||
case PaddleDType::FLOAT32: | ||
for (int i = 0; i < std::min(num_elems, 10); i++) { | ||
ss << static_cast<float*>(tensor.data.data())[i] << " "; | ||
} | ||
break; | ||
} | ||
return ss.str(); | ||
} | ||
|
||
} // namespace demo | ||
} // namespace paddle |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
What's this for? Did not see any usage .
And remove magic name "xxx".