From 38670fa963fdfc90a54cfd9a2263880bb14bc4cf Mon Sep 17 00:00:00 2001 From: David Date: Wed, 16 Dec 2020 22:14:09 -0800 Subject: [PATCH 1/2] Update to opset 13 --- onnxconverter_common/__init__.py | 2 +- onnxconverter_common/onnx_ex.py | 6 +++-- onnxconverter_common/onnx_ops.py | 42 +++++++++++++++++++++++++------- 3 files changed, 38 insertions(+), 12 deletions(-) diff --git a/onnxconverter_common/__init__.py b/onnxconverter_common/__init__.py index 9109cab..00eb404 100644 --- a/onnxconverter_common/__init__.py +++ b/onnxconverter_common/__init__.py @@ -8,7 +8,7 @@ This framework performs optimization for ONNX models and includes common utilities for ONNX converters. """ -__version__ = "1.7.0" +__version__ = "1.8.0" __author__ = "Microsoft" __producer__ = "OnnxMLTools" __producer_version__ = __version__ diff --git a/onnxconverter_common/onnx_ex.py b/onnxconverter_common/onnx_ex.py index 547295c..2dc2938 100644 --- a/onnxconverter_common/onnx_ex.py +++ b/onnxconverter_common/onnx_ex.py @@ -8,10 +8,12 @@ from . import utils from .metadata_props import add_metadata_props -DEFAULT_OPSET_NUMBER = 12 # The maximum opset supported by the converter in the code branch. +DEFAULT_OPSET_NUMBER = 13 # The maximum opset supported by the converter in the code branch. +# From https://github.com/onnx/onnx/blob/master/docs/Versioning.md OPSET_TO_IR_VERSION = { 1: 3, 2: 3, 3: 3, 4: 3, 5: 3, 6: 3, - 7: 3, 8: 4, 9: 4, 10: 5, 11: 6, 12: 7 + 7: 3, 8: 3, 9: 4, 10: 5, 11: 6, 12: 7, + 13: 7 } diff --git a/onnxconverter_common/onnx_ops.py b/onnxconverter_common/onnx_ops.py index 7924b8e..ead1fe0 100644 --- a/onnxconverter_common/onnx_ops.py +++ b/onnxconverter_common/onnx_ops.py @@ -860,8 +860,10 @@ def apply_selu(scope, input_name, output_name, container, operator_name=None, al _apply_unary_operation(scope, 'Selu', input_name, output_name, container, operator_name, alpha=alpha, gamma=gamma) -def apply_softmax(scope, input_name, output_name, container, operator_name=None, axis=1): +def apply_softmax(scope, input_name, output_name, container, operator_name=None, axis=None): name = _create_name_or_use_existing_one(scope, 'Softmax', operator_name) + if axis is None: + axis = 1 if container.target_opset < 13 else -1 container.add_node('Softmax', input_name, output_name, name=name, axis=axis) @@ -969,12 +971,23 @@ def apply_split(scope, input_name, output_names, container, operator_name=None, op_version = 1 elif container.target_opset < 11: op_version = 2 - else: + elif container.target_opset < 13: op_version = 11 + else: + op_version = 13 attrs = {'name': name} if split is not None: - attrs['split'] = split + if container.target_opset < 13: + attrs['split'] = split + else: + if isinstance(split, str): + input_name = input_name + [split] + else: + split_name = scope.get_unique_variable_name(name + '_split') + container.add_initializer(split_name, onnx_proto.TensorProto.INT64, [len(split)], split) + input_name = [input_name, split_name] + if axis is not None: attrs['axis'] = axis @@ -988,17 +1001,28 @@ def apply_sqrt(scope, input_name, output_name, container, operator_name=None): def _apply_squeeze_unsqueeze(scope, input_name, output_name, container, squeeze_str, operator_name=None, axes=None, rank=0): name = _create_name_or_use_existing_one(scope, squeeze_str, operator_name) - if container.target_opset < 11: - op_version = 1 - axes = [axis if axis >= 0 else axis + rank + 1 for axis in axes] + if container.target_opset < 13: + if container.target_opset < 11: + op_version = 1 + axes = [axis if axis >= 0 else axis + rank + 1 for axis in axes] + else: + op_version = 11 + container.add_node(squeeze_str, input_name, output_name, name=name, op_version=op_version, axes=axes) else: - op_version = 11 - container.add_node(squeeze_str, input_name, output_name, name=name, op_version=op_version, axes=axes) + op_version = 13 + if isinstance(axes, str): + container.add_node(squeeze_str, input_name + [axes], output_name, op_version=op_version, name=name) + elif len(axes) == 0: + container.add_node(squeeze_str, input_name, output_name, op_version=op_version, name=name) + else: + axes_name = scope.get_unique_variable_name(name + '_axes') + container.add_initializer(axes_name, onnx_proto.TensorProto.INT64, [len(axes)], axes) + container.add_node(squeeze_str, [input_name, axes_name], output_name, op_version=op_version, name=name) def apply_squeeze(scope, input_name, output_name, container, operator_name=None, axes=None, rank=0): if axes is None: - axes = [0] + axes = [] _apply_squeeze_unsqueeze(scope, input_name, output_name, container, 'Squeeze', operator_name, axes, rank) From 90f962123e44d4795d27936a370745b8862d3bd1 Mon Sep 17 00:00:00 2001 From: David Date: Wed, 16 Dec 2020 22:14:09 -0800 Subject: [PATCH 2/2] Update to opset 13 --- onnxconverter_common/__init__.py | 2 +- onnxconverter_common/onnx_ex.py | 6 ++- onnxconverter_common/onnx_ops.py | 63 +++++++++++++++++++++++++++----- 3 files changed, 59 insertions(+), 12 deletions(-) diff --git a/onnxconverter_common/__init__.py b/onnxconverter_common/__init__.py index 9109cab..00eb404 100644 --- a/onnxconverter_common/__init__.py +++ b/onnxconverter_common/__init__.py @@ -8,7 +8,7 @@ This framework performs optimization for ONNX models and includes common utilities for ONNX converters. """ -__version__ = "1.7.0" +__version__ = "1.8.0" __author__ = "Microsoft" __producer__ = "OnnxMLTools" __producer_version__ = __version__ diff --git a/onnxconverter_common/onnx_ex.py b/onnxconverter_common/onnx_ex.py index 547295c..2dc2938 100644 --- a/onnxconverter_common/onnx_ex.py +++ b/onnxconverter_common/onnx_ex.py @@ -8,10 +8,12 @@ from . import utils from .metadata_props import add_metadata_props -DEFAULT_OPSET_NUMBER = 12 # The maximum opset supported by the converter in the code branch. +DEFAULT_OPSET_NUMBER = 13 # The maximum opset supported by the converter in the code branch. +# From https://github.com/onnx/onnx/blob/master/docs/Versioning.md OPSET_TO_IR_VERSION = { 1: 3, 2: 3, 3: 3, 4: 3, 5: 3, 6: 3, - 7: 3, 8: 4, 9: 4, 10: 5, 11: 6, 12: 7 + 7: 3, 8: 3, 9: 4, 10: 5, 11: 6, 12: 7, + 13: 7 } diff --git a/onnxconverter_common/onnx_ops.py b/onnxconverter_common/onnx_ops.py index 7924b8e..0bb426c 100644 --- a/onnxconverter_common/onnx_ops.py +++ b/onnxconverter_common/onnx_ops.py @@ -765,6 +765,27 @@ def apply_reciprocal(scope, input_name, output_name, container, operator_name=No _apply_unary_operation(scope, 'Reciprocal', input_name, output_name, container, operator_name=operator_name) +def apply_reducesum(scope, input_name, output_name, container, operator_name=None, axes=None, keepdims=None, rank=0): + name = _create_name_or_use_existing_one(scope, 'ReduceSum', operator_name) + if container.target_opset < 13: + if container.target_opset < 11: + op_version = 1 + axes = [axis if axis >= 0 else axis + rank + 1 for axis in axes] + else: + op_version = 11 + container.add_node('ReduceSum', input_name, output_name, name=name, op_version=op_version, axes=axes, keepdims=keepdims) + else: + op_version = 13 + if isinstance(axes, str): + container.add_node('ReduceSum', input_name + [axes], output_name, op_version=op_version, name=name, keepdims=keepdims) + elif axes is None or len(axes) == 0: + container.add_node('ReduceSum', input_name, output_name, op_version=op_version, name=name, keepdims=keepdims) + else: + axes_name = scope.get_unique_variable_name(name + '_reducesum') + container.add_initializer(axes_name, onnx_proto.TensorProto.INT64, [len(axes)], axes) + container.add_node('ReduceSum', [input_name, axes_name], output_name, op_version=op_version, name=name, keepdims=keepdims) + + def apply_relu(scope, input_name, output_name, container, operator_name=None): _apply_unary_operation(scope, 'Relu', input_name, output_name, container, operator_name) @@ -860,8 +881,10 @@ def apply_selu(scope, input_name, output_name, container, operator_name=None, al _apply_unary_operation(scope, 'Selu', input_name, output_name, container, operator_name, alpha=alpha, gamma=gamma) -def apply_softmax(scope, input_name, output_name, container, operator_name=None, axis=1): +def apply_softmax(scope, input_name, output_name, container, operator_name=None, axis=None): name = _create_name_or_use_existing_one(scope, 'Softmax', operator_name) + if axis is None: + axis = 1 if container.target_opset < 13 else -1 container.add_node('Softmax', input_name, output_name, name=name, axis=axis) @@ -969,12 +992,23 @@ def apply_split(scope, input_name, output_names, container, operator_name=None, op_version = 1 elif container.target_opset < 11: op_version = 2 - else: + elif container.target_opset < 13: op_version = 11 + else: + op_version = 13 attrs = {'name': name} if split is not None: - attrs['split'] = split + if container.target_opset < 13: + attrs['split'] = split + else: + if isinstance(split, str): + input_name = input_name + [split] + else: + split_name = scope.get_unique_variable_name(name + '_split') + container.add_initializer(split_name, onnx_proto.TensorProto.INT64, [len(split)], split) + input_name = [input_name, split_name] + if axis is not None: attrs['axis'] = axis @@ -988,17 +1022,28 @@ def apply_sqrt(scope, input_name, output_name, container, operator_name=None): def _apply_squeeze_unsqueeze(scope, input_name, output_name, container, squeeze_str, operator_name=None, axes=None, rank=0): name = _create_name_or_use_existing_one(scope, squeeze_str, operator_name) - if container.target_opset < 11: - op_version = 1 - axes = [axis if axis >= 0 else axis + rank + 1 for axis in axes] + if container.target_opset < 13: + if container.target_opset < 11: + op_version = 1 + axes = [axis if axis >= 0 else axis + rank + 1 for axis in axes] + else: + op_version = 11 + container.add_node(squeeze_str, input_name, output_name, name=name, op_version=op_version, axes=axes) else: - op_version = 11 - container.add_node(squeeze_str, input_name, output_name, name=name, op_version=op_version, axes=axes) + op_version = 13 + if isinstance(axes, str): + container.add_node(squeeze_str, input_name + [axes], output_name, op_version=op_version, name=name) + elif len(axes) == 0: + container.add_node(squeeze_str, input_name, output_name, op_version=op_version, name=name) + else: + axes_name = scope.get_unique_variable_name(name + '_axes') + container.add_initializer(axes_name, onnx_proto.TensorProto.INT64, [len(axes)], axes) + container.add_node(squeeze_str, [input_name, axes_name], output_name, op_version=op_version, name=name) def apply_squeeze(scope, input_name, output_name, container, operator_name=None, axes=None, rank=0): if axes is None: - axes = [0] + axes = [] _apply_squeeze_unsqueeze(scope, input_name, output_name, container, 'Squeeze', operator_name, axes, rank)