diff --git a/onnxruntime/core/providers/nnapi/nnapi_builtin/builders/helper.cc b/onnxruntime/core/providers/nnapi/nnapi_builtin/builders/helper.cc index ee220b2a7ee45..1017d339a623f 100644 --- a/onnxruntime/core/providers/nnapi/nnapi_builtin/builders/helper.cc +++ b/onnxruntime/core/providers/nnapi/nnapi_builtin/builders/helper.cc @@ -519,27 +519,6 @@ bool IsNodeSupportedInGroup(const NodeUnit& node_unit, const GraphViewer& graph_ return true; } -bool IsInputSupported(const NodeArg& input, const std::string& parent_name) { - const auto& input_name = input.Name(); - const auto* shape_proto = input.Shape(); - // We do not support input with no shape - if (!shape_proto) { - LOGS_DEFAULT(VERBOSE) << "Input [" << input_name << "] of [" << parent_name - << "] has no shape"; - return false; - } - - for (const auto& dim : shape_proto->dim()) { - // For now we do not support dynamic shape - if (!dim.has_dim_value()) { - LOGS_DEFAULT(WARNING) << "Dynamic shape is not supported for now, for input:" << input_name; - return false; - } - } - - return true; -} - std::string Shape2String(const std::vector& shape) { std::ostringstream os; os << "[ "; diff --git a/onnxruntime/core/providers/nnapi/nnapi_builtin/builders/helper.h b/onnxruntime/core/providers/nnapi/nnapi_builtin/builders/helper.h index c5b3e1106d966..c861968869542 100644 --- a/onnxruntime/core/providers/nnapi/nnapi_builtin/builders/helper.h +++ b/onnxruntime/core/providers/nnapi/nnapi_builtin/builders/helper.h @@ -147,9 +147,6 @@ bool IsNodeSupportedInGroup(const NodeUnit& node_unit, const GraphViewer& graph_ const OpSupportCheckParams& params, const std::unordered_set& node_outputs_in_group); -// If a graph input is supported by NNAPI -bool IsInputSupported(const NodeArg& input, const std::string& parent_name); - // If an NNAPI partition node group is valid bool IsValidSupportedNodeGroup(const std::vector& supported_node_group); diff --git a/onnxruntime/core/providers/nnapi/nnapi_builtin/builders/op_support_checker.cc b/onnxruntime/core/providers/nnapi/nnapi_builtin/builders/op_support_checker.cc index a8b299e6d1ba8..5c132f42958c5 100644 --- a/onnxruntime/core/providers/nnapi/nnapi_builtin/builders/op_support_checker.cc +++ b/onnxruntime/core/providers/nnapi/nnapi_builtin/builders/op_support_checker.cc @@ -153,26 +153,35 @@ bool BaseOpSupportChecker::IsOpSupported(const InitializedTensorSet& initializer bool BaseOpSupportChecker::HasSupportedInputs(const NodeUnit& node_unit) const { // We do not support unknown(null) input shape - auto has_shape = [](const NodeArg& node_arg, const std::string& name, const std::string op_type) { - if (!node_arg.Shape()) { + auto has_supported_shape = [](const NodeArg& node_arg, const std::string& name, const std::string op_type) { + const auto* shape_proto = node_arg.Shape(); + if (!shape_proto) { LOGS_DEFAULT(VERBOSE) << "Node [" << name << "] type [" << op_type << "] Input [" << node_arg.Name() << "] has no shape"; return false; } + + // We do not support dynamic shape input for now + for (const auto& dim : shape_proto->dim()) { + if (!dim.has_dim_value()) { + LOGS_DEFAULT(VERBOSE) << "Dynamic shape is not supported for now, for input:" << node_arg.Name(); + return false; + } + } return true; }; for (const auto& input : node_unit.Inputs()) { - if (!has_shape(input.node_arg, node_unit.Name(), node_unit.OpType())) + if (!has_supported_shape(input.node_arg, node_unit.Name(), node_unit.OpType())) return false; if (input.quant_param.has_value()) { - if (!has_shape(input.quant_param->scale, node_unit.Name(), node_unit.OpType())) + if (!has_supported_shape(input.quant_param->scale, node_unit.Name(), node_unit.OpType())) return false; // zero point is optional if (input.quant_param->zero_point && - !has_shape(*input.quant_param->zero_point, node_unit.Name(), node_unit.OpType())) + !has_supported_shape(*input.quant_param->zero_point, node_unit.Name(), node_unit.OpType())) return false; } } @@ -1681,7 +1690,7 @@ bool FlattenOpSupportChecker::IsOpSupportedImpl(const InitializedTensorSet& /* i GetFlattenOutputShape(node_unit, input_shape, dim_1, dim_2); if (dim_1 == 0 && dim_2 == 0) { - LOGS_DEFAULT(VERBOSE) << "The dynamical input shape " << Shape2String(input_shape) + LOGS_DEFAULT(VERBOSE) << "The dynamic input shape " << Shape2String(input_shape) << " is not supported"; return false; } diff --git a/onnxruntime/core/providers/nnapi/nnapi_builtin/nnapi_execution_provider.cc b/onnxruntime/core/providers/nnapi/nnapi_builtin/nnapi_execution_provider.cc index 32fffec7395b3..4ff0b41a51773 100644 --- a/onnxruntime/core/providers/nnapi/nnapi_builtin/nnapi_execution_provider.cc +++ b/onnxruntime/core/providers/nnapi/nnapi_builtin/nnapi_execution_provider.cc @@ -110,13 +110,6 @@ NnapiExecutionProvider::GetCapability(const onnxruntime::GraphViewer& graph_view return result; } - // Disable NNAPI if the graph has any unsupported inputs - for (const auto* input : graph_viewer.GetInputs()) { - if (!nnapi::IsInputSupported(*input, "graph")) { - return result; - } - } - // Get all the NodeUnits in the graph_viewer std::vector> node_unit_holder; std::unordered_map node_unit_map; diff --git a/onnxruntime/test/providers/nnapi/nnapi_basic_test.cc b/onnxruntime/test/providers/nnapi/nnapi_basic_test.cc index d0cd2bb786653..c59efaae76e92 100644 --- a/onnxruntime/test/providers/nnapi/nnapi_basic_test.cc +++ b/onnxruntime/test/providers/nnapi/nnapi_basic_test.cc @@ -76,6 +76,38 @@ TEST(NnapiExecutionProviderTest, ReshapeFlattenTest) { #endif } +// Since NNAPI EP does not support dynamic shape input and we now switch from the approach of immediately rejecting +// the whole graph in NNAPI EP if it has a dynamic input to check at individual operator support check level, we have a +// separated test here. +// Please see BaseOpBuilder::HasSupportedInputs in /onnxruntime/core/providers/nnapi/nnapi_builtin/builders/op_support_checker.cc +TEST(NnapiExecutionProviderTest, DynamicGraphInputTest) { + const ORTCHAR_T* model_file_name = ORT_TSTR("testdata/ep_dynamic_graph_input_test.onnx"); + +#if defined(__ANDROID__) + std::vector dims_mul_x = {1, 1, 4, 4}; + std::vector values_mul_x = {1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f}; + OrtValue ml_value_x; + CreateMLValue(TestNnapiExecutionProvider()->GetAllocator(0, OrtMemTypeDefault), dims_mul_x, values_mul_x, + &ml_value_x); + + NameMLValMap feeds; + feeds.insert(std::make_pair("X", ml_value_x)); + + RunAndVerifyOutputsWithEP(model_file_name, "NnapiExecutionProviderTest.DynamicGraphInputTest", + std::make_unique(0), + feeds); +#else + // test load only + SessionOptions so; + InferenceSessionWrapper session_object{so, GetEnvironment()}; + ASSERT_STATUS_OK(session_object.RegisterExecutionProvider(std::make_unique(0))); + ASSERT_STATUS_OK(session_object.Load(model_file_name)); + ASSERT_STATUS_OK(session_object.Initialize()); + ASSERT_EQ(CountAssignedNodes(session_object.GetGraph(), kNnapiExecutionProvider), 1) + << "Exactly one node (Add) should have been taken by the NNAPI EP"; +#endif +} + // This is to test the uint8 handling of operators without "QLinear" such as Concat and Transpose // NNAPI will require scale and zero point for inputs of all quantized operations // For these operators without "Qlinear", there is no information about the scale and zero point, we can diff --git a/onnxruntime/test/testdata/ep_dynamic_graph_input_test.onnx b/onnxruntime/test/testdata/ep_dynamic_graph_input_test.onnx new file mode 100644 index 0000000000000..67f0d39f526e4 Binary files /dev/null and b/onnxruntime/test/testdata/ep_dynamic_graph_input_test.onnx differ diff --git a/onnxruntime/test/testdata/ep_dynamic_graph_input_test.py b/onnxruntime/test/testdata/ep_dynamic_graph_input_test.py new file mode 100644 index 0000000000000..d04f8a8884d3d --- /dev/null +++ b/onnxruntime/test/testdata/ep_dynamic_graph_input_test.py @@ -0,0 +1,47 @@ +import onnx +from onnx import helper +from onnx import TensorProto + + +# Since NNAPI EP does not support dynamic shape input and we now switch from the approach of immediately rejecting +# the whole graph in NNAPI EP if it has a dynamic input to checking the dynamic shape at individual operator support check level, +# We have a separated test here using a graph with dynamic input that becomes fixed after a Resize +# Please see BaseOpBuilder::HasSupportedInputs in /onnxruntime/core/providers/nnapi/nnapi_builtin/builders/op_support_checker.cc +def GenerateModel(model_name): + nodes = [ + helper.make_node("Resize", ["X", "", "", "Resize_1_sizes"], [ + "Resize_1_output"], "resize_1", mode="cubic"), + helper.make_node( + "Add", ["Resize_1_output", "Add_2_input"], ["Y"], "add"), + ] + + initializers = [ + helper.make_tensor('Resize_1_sizes', TensorProto.INT64, [ + 4], [1, 1, 3, 3]), + helper.make_tensor('Add_2_input', TensorProto.FLOAT, [1, 1, 3, 3], [ + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]) + ] + + inputs = [ + helper.make_tensor_value_info( + 'X', TensorProto.FLOAT, ["1", "1", "N", "N"]), # used dim_param here + ] + + outputs = [ + helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 1, 3, 3]), + ] + + graph = helper.make_graph( + nodes, + "EP_Dynamic_Graph_Input_Test", + inputs, + outputs, + initializers + ) + + model = helper.make_model(graph) + onnx.save(model, model_name) + + +if __name__ == "__main__": + GenerateModel('ep_dynamic_graph_input_test.onnx')