diff --git a/.azure-pipelines/OneBranchPipeline-Official.yml b/.azure-pipelines/OneBranchPipeline-Official.yml index b05a291..6e9cabf 100644 --- a/.azure-pipelines/OneBranchPipeline-Official.yml +++ b/.azure-pipelines/OneBranchPipeline-Official.yml @@ -33,7 +33,7 @@ extends: break: true # always break the build on policheck issues. You can disable it by setting to 'false' codeql: python: - enabled: true + enabled: false stages: - stage: firststage @@ -57,7 +57,7 @@ extends: pip install -r requirements.txt pip install onnxruntime pip install onnxmltools - pip install onnx==1.12.0 + pip install onnx pip install pytest pip install -e . displayName: 'Install dependencies' diff --git a/.azure-pipelines/OneBranchPipeline-PullRequest.yml b/.azure-pipelines/OneBranchPipeline-PullRequest.yml index 0a84e99..6eaf586 100644 --- a/.azure-pipelines/OneBranchPipeline-PullRequest.yml +++ b/.azure-pipelines/OneBranchPipeline-PullRequest.yml @@ -34,7 +34,7 @@ extends: break: true # always break the build on policheck issues. codeql: python: - enabled: true + enabled: false stages: - stage: firststage @@ -55,7 +55,7 @@ extends: pip install -r requirements.txt pip install onnxruntime pip install onnxmltools - pip install onnx==1.12.0 + pip install onnx pip install pytest pip install -e . displayName: 'Install dependencies' diff --git a/MANIFEST.in b/MANIFEST.in index 1aba38f..096dd50 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1 +1,2 @@ include LICENSE +include requirements.txt diff --git a/onnxconverter_common/__init__.py b/onnxconverter_common/__init__.py index 4581691..146bf5d 100644 --- a/onnxconverter_common/__init__.py +++ b/onnxconverter_common/__init__.py @@ -8,7 +8,7 @@ This framework performs optimization for ONNX models and includes common utilities for ONNX converters. """ -__version__ = "1.13.0" +__version__ = "1.14.0" __author__ = "Microsoft" __producer__ = "OnnxMLTools" __producer_version__ = __version__ diff --git a/onnxconverter_common/data_types.py b/onnxconverter_common/data_types.py index bc5debe..8b1ca1c 100644 --- a/onnxconverter_common/data_types.py +++ b/onnxconverter_common/data_types.py @@ -248,9 +248,9 @@ def find_type_conversion(source_type, target_type): """ Find the operator name for converting source_type into target_type """ - if type(source_type) == type(target_type): + if isinstance(source_type, target_type): return 'identity' - if type(target_type) == FloatTensorType: + if isinstance(target_type, FloatTensorType): return 'imageToFloatTensor' raise ValueError('Unsupported type conversion from %s to %s' % ( source_type, target_type)) diff --git a/onnxconverter_common/float16.py b/onnxconverter_common/float16.py index 02af8d0..a63572f 100644 --- a/onnxconverter_common/float16.py +++ b/onnxconverter_common/float16.py @@ -157,6 +157,8 @@ def convert_float_to_float16(model, min_positive_val=1e-7, max_finite_val=1e4, queue = [] value_info_list = [] node_list = [] + # key = node, value = graph, used to distinguish global with sub-graph + node_dict = {} # type inference on input model if func_infer_shape is not None: model = func_infer_shape(model) @@ -222,6 +224,7 @@ def convert_float_to_float16(model, min_positive_val=1e-7, max_finite_val=1e4, # so it will not be converted to float16 if n.op_type in op_block_list or n.name in node_block_list: node_list.append(n) + node_dict[n.name] = q else: if n.op_type == 'Cast': for attr in n.attribute: @@ -264,7 +267,8 @@ def convert_float_to_float16(model, min_positive_val=1e-7, max_finite_val=1e4, for value_info in value_info_list: if input == value_info.name: # create new value_info for current node's new input name - new_value_info = model.graph.value_info.add() + graph = node_dict[node.name] # get the correct graph instead of the global graph + new_value_info = graph.value_info.add() new_value_info.CopyFrom(value_info) output_name = node.name + '_input_cast_' + str(i) new_value_info.name = output_name @@ -272,7 +276,7 @@ def convert_float_to_float16(model, min_positive_val=1e-7, max_finite_val=1e4, # add Cast node (from tensor(float16) to tensor(float) before current node node_name = node.name + '_input_cast' + str(i) new_node = [helper.make_node('Cast', [input], [output_name], to=1, name=node_name)] - model.graph.node.extend(new_node) + graph.node.extend(new_node) # change current node's input name node.input[i] = output_name break @@ -283,7 +287,8 @@ def convert_float_to_float16(model, min_positive_val=1e-7, max_finite_val=1e4, for value_info in value_info_list: if output == value_info.name: # create new value_info for current node's new output - new_value_info = model.graph.value_info.add() + graph = node_dict[node.name] # get the correct graph instead of the global graph + new_value_info = graph.value_info.add() new_value_info.CopyFrom(value_info) input_name = node.name + '_output_cast_' + str(i) new_value_info.name = input_name @@ -291,11 +296,12 @@ def convert_float_to_float16(model, min_positive_val=1e-7, max_finite_val=1e4, # add Cast node (from tensor(float) to tensor(float16) after current node node_name = node.name + '_output_cast' + str(i) new_node = [helper.make_node('Cast', [input_name], [output], to=10, name=node_name)] - model.graph.node.extend(new_node) + graph.node.extend(new_node) # change current node's input name node.output[i] = input_name break + sort_topology(model.graph) return model @@ -332,3 +338,53 @@ def convert_float_to_float16_model_path(model_path, min_positive_val=1e-7, max_f if not disable_shape_infer: model = onnx.load(model_path) return convert_float_to_float16(model, min_positive_val, max_finite_val, keep_io_types, disable_shape_infer) + + +def sort_graph_node(graph_proto): + # find the "first" node in Nodes that its input is not any node's output + def find_first_node(output2node_dict): + for node in org_nodes: + is_not_first_node = any(item in output2node_dict for item in node.input) + if not is_not_first_node: + return node + return None + + # remove the node from output2node_dict using output as key + def remove_first_node_from_dict2(first_node): + for output in first_node.output: + if output in output2node_dict: + del output2node_dict[output] + + org_nodes = graph_proto.node + # create a dict to store output as key and node as value + output2node_dict = {} + for node in org_nodes: + for output in node.output: + output2node_dict[output] = node + + # save the final node after sorted + sorted_node = [] + # traverse the Nodes to find the first node + while (len(output2node_dict) > 0): + first_node = find_first_node(output2node_dict) + sorted_node.append(first_node) + remove_first_node_from_dict2(first_node) + # del node from original nodes list to avoid duplicate traverse + org_nodes.remove(first_node) + + for new_node in sorted_node: + graph_proto.node.extend([new_node]) + + +# The input graph should be mode.graph +# Recursevly sort the topology for each sub-graph +def sort_topology(graph_proto): + assert (isinstance(graph_proto, onnx_proto.GraphProto)) + sort_graph_node(graph_proto) # sort global graph + for node in graph_proto.node: + for attr in node.attribute: + if isinstance(attr.g, onnx_proto.GraphProto) and len(attr.g.node) > 0: + sort_topology(attr.g) # sort sub-graph + for g in attr.graphs: + if isinstance(g, onnx_proto.GraphProto): + sort_topology(g) # sort sub-graph diff --git a/onnxconverter_common/perfstats.py b/onnxconverter_common/perfstats.py index 11d405f..62361aa 100644 --- a/onnxconverter_common/perfstats.py +++ b/onnxconverter_common/perfstats.py @@ -114,7 +114,7 @@ def compute_op_type_entries(raw_entries): def read_raw_entries(profile_path): with open(profile_path, "r") as f: data = json.load(f) - if type(data) == dict: + if isinstance(data, dict): data = data['traceEvents'] entries = [] for item in data: diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..bc22188 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,30 @@ +[build-system] +requires = ["setuptools>=61.0.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "onnxconverter-common" +dynamic = ["version"] +description="ONNX Converter and Optimization Tools" +authors = [{ name = "Microsoft Corporation", email = "onnx@microsoft.com" }] +urls = { "Repository" = "https://github.com/microsoft/onnxconverter-common" } +readme = "README.md" +requires-python = ">=3.8" +license = { file = "LICENSE" } +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Environment :: Console", + "Intended Audience :: Developers", + "Operating System :: MacOS :: MacOS X", + "Operating System :: Microsoft :: Windows", + "Programming Language :: Python", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "License :: OSI Approved :: MIT License", +] +dependencies = ["numpy", "onnx", "packaging", "protobuf==3.20.2"] + +[tool.setuptools.dynamic] +version = {attr = "onnxconverter_common.__version__"} diff --git a/tests/data/test_subgraph.onnx b/tests/data/test_subgraph.onnx new file mode 100644 index 0000000..66076ac Binary files /dev/null and b/tests/data/test_subgraph.onnx differ diff --git a/tests/test_float16.py b/tests/test_float16.py index c0b1f64..ca77907 100644 --- a/tests/test_float16.py +++ b/tests/test_float16.py @@ -108,6 +108,22 @@ def test_convert_to_float16_with_truncated(self): np_array = np.array([1e-10, -2.0, 15, -1e-9, 65536.1, -100000]) convert_np_to_float16(np_array) + @unittest.skipIf(pv.Version(onnx.__version__) == pv.Version('1.9.0'), "ONNX 1.9 has different Optype behavior for Max operator") + def test_convert_to_float16_with_subgraph(self): + model32_name = "test_subgraph.onnx" + working_path = os.path.abspath(os.path.dirname(__file__)) + data_path = os.path.join(working_path, 'data') + model_path = os.path.join(data_path, model32_name) + onnx_model32 = onnxmltools.utils.load_model(model_path) + x = np.array([1.0], dtype=np.float32) + y = np.array([2.0], dtype=np.float32) + output_32 = _ort_inference(onnx_model32, {"x":x, "y":y}) + + onnx_model16 = convert_float_to_float16(onnx_model32, keep_io_types=True) + output_16 = _ort_inference(onnx_model16, {"x":x, "y":y}) + self.assertTrue(np.allclose(output_16, output_32, atol=1e-2)) + + if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(ONNXFloat16Test)