Skip to content

Commit

Permalink
fix subgraph bug (#260)
Browse files Browse the repository at this point in the history
* add functions to resort

Signed-off-by: xiaowuhu <[email protected]>

* change code and add test case

Signed-off-by: xiaowuhu <[email protected]>

* Update float16.py

Signed-off-by: xiaowuhu <[email protected]>

* Delete float16_old.py

Signed-off-by: xiaowuhu <[email protected]>

* Update float16.py

Signed-off-by: xiaowuhu <[email protected]>

* fix flake 8

Signed-off-by: xiaowuhu <[email protected]>

* Update perfstats.py

Signed-off-by: xiaowuhu <[email protected]>

* Update test_float16.py

Signed-off-by: xiaowuhu <[email protected]>

* Update __init__.py

Signed-off-by: xiaowuhu <[email protected]>

* Create pyproject.toml

* add more info

* update

Signed-off-by: xiaowuhu <[email protected]>

* Delete setup.py

* Update float16.py

Signed-off-by: xiaowuhu <[email protected]>

* disable python

Signed-off-by: xiaowuhu <[email protected]>

* Revert "Delete setup.py"

This reverts commit 2da0727.

---------

Signed-off-by: xiaowuhu <[email protected]>
  • Loading branch information
xiaowuhu authored Aug 21, 2023
1 parent 8c81a0e commit 3873b29
Show file tree
Hide file tree
Showing 10 changed files with 115 additions and 12 deletions.
4 changes: 2 additions & 2 deletions .azure-pipelines/OneBranchPipeline-Official.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ extends:
break: true # always break the build on policheck issues. You can disable it by setting to 'false'
codeql:
python:
enabled: true
enabled: false

stages:
- stage: firststage
Expand All @@ -57,7 +57,7 @@ extends:
pip install -r requirements.txt
pip install onnxruntime
pip install onnxmltools
pip install onnx==1.12.0
pip install onnx
pip install pytest
pip install -e .
displayName: 'Install dependencies'
Expand Down
4 changes: 2 additions & 2 deletions .azure-pipelines/OneBranchPipeline-PullRequest.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ extends:
break: true # always break the build on policheck issues.
codeql:
python:
enabled: true
enabled: false

stages:
- stage: firststage
Expand All @@ -55,7 +55,7 @@ extends:
pip install -r requirements.txt
pip install onnxruntime
pip install onnxmltools
pip install onnx==1.12.0
pip install onnx
pip install pytest
pip install -e .
displayName: 'Install dependencies'
Expand Down
1 change: 1 addition & 0 deletions MANIFEST.in
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
include LICENSE
include requirements.txt
2 changes: 1 addition & 1 deletion onnxconverter_common/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
This framework performs optimization for ONNX models and
includes common utilities for ONNX converters.
"""
__version__ = "1.13.0"
__version__ = "1.14.0"
__author__ = "Microsoft"
__producer__ = "OnnxMLTools"
__producer_version__ = __version__
Expand Down
4 changes: 2 additions & 2 deletions onnxconverter_common/data_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -248,9 +248,9 @@ def find_type_conversion(source_type, target_type):
"""
Find the operator name for converting source_type into target_type
"""
if type(source_type) == type(target_type):
if isinstance(source_type, target_type):
return 'identity'
if type(target_type) == FloatTensorType:
if isinstance(target_type, FloatTensorType):
return 'imageToFloatTensor'
raise ValueError('Unsupported type conversion from %s to %s' % (
source_type, target_type))
Expand Down
64 changes: 60 additions & 4 deletions onnxconverter_common/float16.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,8 @@ def convert_float_to_float16(model, min_positive_val=1e-7, max_finite_val=1e4,
queue = []
value_info_list = []
node_list = []
# key = node, value = graph, used to distinguish global with sub-graph
node_dict = {}
# type inference on input model
if func_infer_shape is not None:
model = func_infer_shape(model)
Expand Down Expand Up @@ -222,6 +224,7 @@ def convert_float_to_float16(model, min_positive_val=1e-7, max_finite_val=1e4,
# so it will not be converted to float16
if n.op_type in op_block_list or n.name in node_block_list:
node_list.append(n)
node_dict[n.name] = q
else:
if n.op_type == 'Cast':
for attr in n.attribute:
Expand Down Expand Up @@ -264,15 +267,16 @@ def convert_float_to_float16(model, min_positive_val=1e-7, max_finite_val=1e4,
for value_info in value_info_list:
if input == value_info.name:
# create new value_info for current node's new input name
new_value_info = model.graph.value_info.add()
graph = node_dict[node.name] # get the correct graph instead of the global graph
new_value_info = graph.value_info.add()
new_value_info.CopyFrom(value_info)
output_name = node.name + '_input_cast_' + str(i)
new_value_info.name = output_name
new_value_info.type.tensor_type.elem_type = onnx_proto.TensorProto.FLOAT
# add Cast node (from tensor(float16) to tensor(float) before current node
node_name = node.name + '_input_cast' + str(i)
new_node = [helper.make_node('Cast', [input], [output_name], to=1, name=node_name)]
model.graph.node.extend(new_node)
graph.node.extend(new_node)
# change current node's input name
node.input[i] = output_name
break
Expand All @@ -283,19 +287,21 @@ def convert_float_to_float16(model, min_positive_val=1e-7, max_finite_val=1e4,
for value_info in value_info_list:
if output == value_info.name:
# create new value_info for current node's new output
new_value_info = model.graph.value_info.add()
graph = node_dict[node.name] # get the correct graph instead of the global graph
new_value_info = graph.value_info.add()
new_value_info.CopyFrom(value_info)
input_name = node.name + '_output_cast_' + str(i)
new_value_info.name = input_name
new_value_info.type.tensor_type.elem_type = onnx_proto.TensorProto.FLOAT
# add Cast node (from tensor(float) to tensor(float16) after current node
node_name = node.name + '_output_cast' + str(i)
new_node = [helper.make_node('Cast', [input_name], [output], to=10, name=node_name)]
model.graph.node.extend(new_node)
graph.node.extend(new_node)
# change current node's input name
node.output[i] = input_name
break

sort_topology(model.graph)
return model


Expand Down Expand Up @@ -332,3 +338,53 @@ def convert_float_to_float16_model_path(model_path, min_positive_val=1e-7, max_f
if not disable_shape_infer:
model = onnx.load(model_path)
return convert_float_to_float16(model, min_positive_val, max_finite_val, keep_io_types, disable_shape_infer)


def sort_graph_node(graph_proto):
# find the "first" node in Nodes that its input is not any node's output
def find_first_node(output2node_dict):
for node in org_nodes:
is_not_first_node = any(item in output2node_dict for item in node.input)
if not is_not_first_node:
return node
return None

# remove the node from output2node_dict using output as key
def remove_first_node_from_dict2(first_node):
for output in first_node.output:
if output in output2node_dict:
del output2node_dict[output]

org_nodes = graph_proto.node
# create a dict to store output as key and node as value
output2node_dict = {}
for node in org_nodes:
for output in node.output:
output2node_dict[output] = node

# save the final node after sorted
sorted_node = []
# traverse the Nodes to find the first node
while (len(output2node_dict) > 0):
first_node = find_first_node(output2node_dict)
sorted_node.append(first_node)
remove_first_node_from_dict2(first_node)
# del node from original nodes list to avoid duplicate traverse
org_nodes.remove(first_node)

for new_node in sorted_node:
graph_proto.node.extend([new_node])


# The input graph should be mode.graph
# Recursevly sort the topology for each sub-graph
def sort_topology(graph_proto):
assert (isinstance(graph_proto, onnx_proto.GraphProto))
sort_graph_node(graph_proto) # sort global graph
for node in graph_proto.node:
for attr in node.attribute:
if isinstance(attr.g, onnx_proto.GraphProto) and len(attr.g.node) > 0:
sort_topology(attr.g) # sort sub-graph
for g in attr.graphs:
if isinstance(g, onnx_proto.GraphProto):
sort_topology(g) # sort sub-graph
2 changes: 1 addition & 1 deletion onnxconverter_common/perfstats.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def compute_op_type_entries(raw_entries):
def read_raw_entries(profile_path):
with open(profile_path, "r") as f:
data = json.load(f)
if type(data) == dict:
if isinstance(data, dict):
data = data['traceEvents']
entries = []
for item in data:
Expand Down
30 changes: 30 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
[build-system]
requires = ["setuptools>=61.0.0"]
build-backend = "setuptools.build_meta"

[project]
name = "onnxconverter-common"
dynamic = ["version"]
description="ONNX Converter and Optimization Tools"
authors = [{ name = "Microsoft Corporation", email = "[email protected]" }]
urls = { "Repository" = "https://github.com/microsoft/onnxconverter-common" }
readme = "README.md"
requires-python = ">=3.8"
license = { file = "LICENSE" }
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"License :: OSI Approved :: MIT License",
]
dependencies = ["numpy", "onnx", "packaging", "protobuf==3.20.2"]

[tool.setuptools.dynamic]
version = {attr = "onnxconverter_common.__version__"}
Binary file added tests/data/test_subgraph.onnx
Binary file not shown.
16 changes: 16 additions & 0 deletions tests/test_float16.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,22 @@ def test_convert_to_float16_with_truncated(self):
np_array = np.array([1e-10, -2.0, 15, -1e-9, 65536.1, -100000])
convert_np_to_float16(np_array)

@unittest.skipIf(pv.Version(onnx.__version__) == pv.Version('1.9.0'), "ONNX 1.9 has different Optype behavior for Max operator")
def test_convert_to_float16_with_subgraph(self):
model32_name = "test_subgraph.onnx"
working_path = os.path.abspath(os.path.dirname(__file__))
data_path = os.path.join(working_path, 'data')
model_path = os.path.join(data_path, model32_name)
onnx_model32 = onnxmltools.utils.load_model(model_path)
x = np.array([1.0], dtype=np.float32)
y = np.array([2.0], dtype=np.float32)
output_32 = _ort_inference(onnx_model32, {"x":x, "y":y})

onnx_model16 = convert_float_to_float16(onnx_model32, keep_io_types=True)
output_16 = _ort_inference(onnx_model16, {"x":x, "y":y})
self.assertTrue(np.allclose(output_16, output_32, atol=1e-2))



if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(ONNXFloat16Test)
Expand Down

0 comments on commit 3873b29

Please sign in to comment.