Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add logic to NNAPI EP to exclude pre-processing involving dynamic shapes when partitioning #10452

Merged
merged 7 commits into from
Feb 2, 2022
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -527,14 +527,6 @@ bool IsInputSupported(const NodeArg& input, const std::string& parent_name) {
return false;
}

for (const auto& dim : shape_proto->dim()) {
// For now we do not support dynamic shape
if (!dim.has_dim_value()) {
LOGS_DEFAULT(WARNING) << "Dynamic shape is not supported for now, for input:" << input_name;
return false;
}
}

return true;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -153,26 +153,35 @@ bool BaseOpSupportChecker::IsOpSupported(const InitializedTensorSet& initializer

bool BaseOpSupportChecker::HasSupportedInputs(const NodeUnit& node_unit) const {
// We do not support unknown(null) input shape
auto has_shape = [](const NodeArg& node_arg, const std::string& name, const std::string op_type) {
if (!node_arg.Shape()) {
auto has_supported_shape = [](const NodeArg& node_arg, const std::string& name, const std::string op_type) {
const auto* shape_proto = node_arg.Shape();
if (!shape_proto) {
LOGS_DEFAULT(VERBOSE) << "Node [" << name << "] type [" << op_type
<< "] Input [" << node_arg.Name() << "] has no shape";
return false;
}

// We do not support dynamic shape input for now
for (const auto& dim : shape_proto->dim()) {
if (!dim.has_dim_value()) {
LOGS_DEFAULT(VERBOSE) << "Dynamic shape is not supported for now, for input:" << node_arg.Name();
return false;
}
}
return true;
};

for (const auto& input : node_unit.Inputs()) {
if (!has_shape(input.node_arg, node_unit.Name(), node_unit.OpType()))
if (!has_supported_shape(input.node_arg, node_unit.Name(), node_unit.OpType()))
return false;

if (input.quant_param.has_value()) {
if (!has_shape(input.quant_param->scale, node_unit.Name(), node_unit.OpType()))
if (!has_supported_shape(input.quant_param->scale, node_unit.Name(), node_unit.OpType()))
return false;

// zero point is optional
if (input.quant_param->zero_point &&
!has_shape(*input.quant_param->zero_point, node_unit.Name(), node_unit.OpType()))
!has_supported_shape(*input.quant_param->zero_point, node_unit.Name(), node_unit.OpType()))
return false;
}
}
Expand Down Expand Up @@ -1647,7 +1656,7 @@ bool FlattenOpSupportChecker::IsOpSupportedImpl(const InitializedTensorSet& /* i
GetFlattenOutputShape(node_unit, input_shape, dim_1, dim_2);

if (dim_1 == 0 && dim_2 == 0) {
LOGS_DEFAULT(VERBOSE) << "The dynamical input shape " << Shape2String(input_shape)
LOGS_DEFAULT(VERBOSE) << "The dynamic input shape " << Shape2String(input_shape)
<< " is not supported";
return false;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -110,13 +110,6 @@ NnapiExecutionProvider::GetCapability(const onnxruntime::GraphViewer& graph_view
return result;
}

// Disable NNAPI if the graph has any unsupported inputs
for (const auto* input : graph_viewer.GetInputs()) {
if (!nnapi::IsInputSupported(*input, "graph")) {
return result;
}
}

// Get all the NodeUnits in the graph_viewer
std::vector<std::unique_ptr<NodeUnit>> node_unit_holder;
std::unordered_map<const Node*, const NodeUnit*> node_unit_map;
Expand Down
32 changes: 32 additions & 0 deletions onnxruntime/test/providers/nnapi/nnapi_basic_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,38 @@ TEST(NnapiExecutionProviderTest, ReshapeFlattenTest) {
#endif
}

// Since NNAPI EP does not support dynamic shape input and we now switch from the approach of immediately rejecting
// the whole graph in NNAPI EP if it has a dynamic input to individual operator support checking level check, we have a
// separated test here.
// Please see BaseOpBuilder::HasSupportedInputs in <repo_root>/onnxruntime/core/providers/nnapi/nnapi_builtin/builders/op_support_checker.cc
TEST(NnapiExecutionProviderTest, DynamicGraphInputTest) {
const ORTCHAR_T* model_file_name = ORT_TSTR("testdata/nnapi_dynamic_graph_input_test.onnx");

#if defined(__ANDROID__)
std::vector<int64_t> dims_mul_x = {1, 1, 4, 4};
std::vector<float> values_mul_x = {1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f};
OrtValue ml_value_x;
CreateMLValue<float>(TestNnapiExecutionProvider()->GetAllocator(0, OrtMemTypeDefault), dims_mul_x, values_mul_x,
&ml_value_x);

NameMLValMap feeds;
feeds.insert(std::make_pair("X", ml_value_x));

RunAndVerifyOutputsWithEP(model_file_name, "NnapiExecutionProviderTest.DynamicGraphInputTest",
std::make_unique<NnapiExecutionProvider>(0),
feeds);
#else
// test load only
SessionOptions so;
InferenceSessionWrapper session_object{so, GetEnvironment()};
ASSERT_STATUS_OK(session_object.RegisterExecutionProvider(std::make_unique<NnapiExecutionProvider>(0)));
ASSERT_STATUS_OK(session_object.Load(model_file_name));
ASSERT_STATUS_OK(session_object.Initialize());
ASSERT_GT(CountAssignedNodes(session_object.GetGraph(), kNnapiExecutionProvider), 0)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For this particular graph, the number of assigned nodes (Add only) should be 1, can use ASSERT_EQ instead of ASSERT_GT

<< "Some nodes should have been taken by the NNAPI EP";
#endif
}

// This is to test the uint8 handling of operators without "QLinear" such as Concat and Transpose
// NNAPI will require scale and zero point for inputs of all quantized operations
// For these operators without "Qlinear", there is no information about the scale and zero point, we can
Expand Down
47 changes: 47 additions & 0 deletions onnxruntime/test/testdata/nnapi_dynamic_graph_input_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
import onnx
from onnx import helper
from onnx import TensorProto


# Since NNAPI EP does not support dynamic shape input and we now switch from the approach of immediately rejecting
# the whole graph in NNAPI EP if it has a dynamic input to individual operator support checking level check.
# Please see BaseOpBuilder::HasSupportedInputs in <repo_root>/onnxruntime/core/providers/nnapi/nnapi_builtin/builders/op_support_checker.cc
# We have a separated test here using a graph with dynamic input that becomes fixed after a Resize
def GenerateModel(model_name):
nodes = [
helper.make_node("Resize", ["X", "", "", "Resize_1_sizes"], [
"Resize_1_output"], "resize_1", mode="cubic"),
helper.make_node(
"Add", ["Resize_1_output", "Add_2_input"], ["Y"], "add"),
]

initializers = [
helper.make_tensor('Resize_1_sizes', TensorProto.INT64, [
4], [1, 1, 3, 3]),
helper.make_tensor('Add_2_input', TensorProto.FLOAT, [1, 1, 3, 3], [
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0])
]

inputs = [
helper.make_tensor_value_info(
'X', TensorProto.FLOAT, ["1", "1", "N", "N"]), # used dim_param here
]

outputs = [
helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 1, 3, 3]),
]

graph = helper.make_graph(
nodes,
"NNAPI_Dynamic_Graph_Input_Test",
inputs,
outputs,
initializers
)

model = helper.make_model(graph)
onnx.save(model, model_name)


if __name__ == "__main__":
GenerateModel('nnapi_dynamic_graph_input_test.onnx')
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This model can be applied to CoreML as well, can change the name to something like dynamic_graph_input_test.onnx or `ep_dynamic_graph_input_test.onnx'?