Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
[v1.x] Add more ONNX export support to operators (#19625)
Browse files Browse the repository at this point in the history
  • Loading branch information
josephevans authored Dec 11, 2020
1 parent c37d5aa commit b219ac2
Show file tree
Hide file tree
Showing 3 changed files with 321 additions and 0 deletions.
1 change: 1 addition & 0 deletions ci/docker/runtime_functions.sh
Original file line number Diff line number Diff line change
Expand Up @@ -1281,6 +1281,7 @@ integrationtest_ubuntu_cpu_onnx() {
pytest tests/python-pytest/onnx/mxnet_export_test.py
pytest tests/python-pytest/onnx/test_models.py
pytest tests/python-pytest/onnx/test_node.py
pytest tests/python-pytest/onnx/test_operators.py
pytest tests/python-pytest/onnx/test_onnxruntime.py
}

Expand Down
186 changes: 186 additions & 0 deletions python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,29 @@ def create_basic_op_node(op_name, node, kwargs):
)
return [node]

def create_const_scalar_node(input_name, value, kwargs):
"""Helper function to create a tensor value node and a
initializer tensor node with constant value."""
from onnx.helper import make_tensor, make_tensor_value_info
initializer = kwargs["initializer"]
input_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[value.dtype]
value_node = make_tensor_value_info(input_name, input_type, ())
tensor_node = make_tensor(input_name, input_type, (), (value,))
initializer.append(tensor_node)
return value_node

def create_const_node(input_name, value, kwargs):
"""Helper function to create a tensor value node and a
initializer tensor node with constant value."""
from onnx.helper import make_tensor, make_tensor_value_info
initializer = kwargs["initializer"]
input_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[value.dtype]
input_shape = value.shape
value_node = make_tensor_value_info(input_name, input_type, input_shape)
tensor_node = make_tensor(input_name, input_type, input_shape, value)
initializer.append(tensor_node)
return value_node

@mx_op.register("null")
def convert_weights_and_inputs(node, **kwargs):
"""Helper function to convert weights and inputs.
Expand Down Expand Up @@ -802,6 +825,7 @@ def convert_leakyrelu(node, **kwargs):
"""Map MXNet's LeakyReLU operator attributes to onnx's Elu/LeakyRelu/PRelu operators
based on the input node's attributes and return the created node.
"""
from onnx.helper import make_node
name, input_nodes, attrs = get_inputs(node, kwargs)

act_type = attrs.get("act_type", "leaky")
Expand All @@ -816,6 +840,19 @@ def convert_leakyrelu(node, **kwargs):
inputs=input_nodes,
outputs=[name],
name=name)
elif act_type in ('gelu'):
sqrt2 = np.float32(1.4142135623730951)
nodes = [
create_const_scalar_node(name+"_sqrt2", sqrt2, kwargs),
make_node("Div", [input_nodes[0], name+"_sqrt2"], [name+"_div0_out"]),
make_node("Erf", [name+"_div0_out"], [name+"_erf0_out"]),
create_const_scalar_node(name+"_one", np.float32(1.0), kwargs),
create_const_scalar_node(name+"_half", np.float32(0.5), kwargs),
make_node("Add", [name+"_erf0_out", name+"_one"], [name+"_add0_out"]),
make_node("Mul", [input_nodes[0], name+"_add0_out"], [name+"_mul0_out"]),
make_node("Mul", [name+"_mul0_out", name+"_half"], [name])
]
return nodes
else:
node = onnx.helper.make_node(
act_name[act_type],
Expand Down Expand Up @@ -2214,3 +2251,152 @@ def convert_take(node, **kwargs):
name=name,
)
return [node]


@mx_op.register("LayerNorm")
def convert_layer_norm(node, **kwargs):
"""Map MXNet's LayerNorm operator attributes to onnx operators.
"""
from onnx.helper import make_node
name, input_nodes, attrs = get_inputs(node, kwargs)

in_shape = kwargs['in_shape']
axes = [-i for i in range(len(in_shape[0]), 0, -1)]
eps = attrs.get('eps')
nodes = [
make_node("ReduceMean", [input_nodes[0]], [name+"_rm0_out"], axes=axes),
make_node("Sub", [input_nodes[0], name+"_rm0_out"], [name+"_sub0_out"]),
create_const_scalar_node(name+"_two", np.float32(2.), kwargs),
make_node("Pow", [name+"_sub0_out", name+"_two"], [name+"_pow0_out"]),
make_node("ReduceMean", [name+"_pow0_out"], [name+"_rm1_out"], axes=axes),
create_const_scalar_node(name+"_eps", np.float32(eps), kwargs),
make_node("Add", [name+"_rm1_out", name+"_eps"], [name+"_add0_out"]),
make_node("Sqrt", [name+"_add0_out"], [name+"_sqrt0_out"]),
make_node("Div", [name+"_sub0_out", name+"_sqrt0_out"], [name+"_div0_out"]),
make_node("Mul", [name+"_div0_out", input_nodes[1]], [name+"_mul0_out"]),
make_node("Add", [name+"_mul0_out", input_nodes[2]], [name], name)
]

return nodes


@mx_op.register("Embedding")
def convert_embedding(node, **kwargs):
"""Map MXNet's Embedding operator attributes to onnx's
Gather operator."""
name, input_nodes, attrs = get_inputs(node, kwargs)
axis = int(attrs.get('axis', 0))
node = onnx.helper.make_node(
"Gather",
input_nodes,
[name],
axis=axis,
name=name
)
return [node]


@mx_op.register("stack")
def convert_stack(node, **kwargs):
"""Map MXNet's stack operator to onnx operators.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
axis = int(attrs.get('axis', 0))
idx = 0
nodes = []
for input_node in input_nodes:
nodes.append(onnx.helper.make_node(
"Unsqueeze",
inputs=[input_node],
outputs=[name+"_unsqueeze"+str(idx)],
axes=[axis]
))
idx += 1

nodes.append(onnx.helper.make_node(
"Concat",
inputs=[name+"_unsqueeze"+str(i) for i in range(len(nodes))],
outputs=[name],
name=name,
axis=axis
))
return nodes


@mx_op.register("slice")
def convert_slice(node, **kwargs):
"""Map MXNet's slice operator to onnx Slice operator."""
name, input_nodes, attrs = get_inputs(node, kwargs)
starts = convert_string_to_list(attrs.get("begin"))
ends = convert_string_to_list(attrs.get("end"))
steps = attrs.get("step", [])
nodes = [
create_const_node(name+"_begin", np.array(starts), kwargs),
create_const_node(name+"_end", np.array(ends), kwargs)
]
inputs = [input_nodes[0], name+"_begin", name+"_end"]
if len(steps) > 0:
nodes.append(create_const_node(name+"_steps", np.array(steps, dtype='int64'), kwargs))
inputs.append(name+"_steps")
nodes.append(onnx.helper.make_node("Slice", inputs, [name], name=name))
return nodes


@mx_op.register("zeros_like")
def convert_zeros_like(node, **kwargs):
"""Map MXNet's zeros_like operator attributes to onnx's ConstantOfShape operator.
"""
from onnx.helper import make_node, make_tensor
name, _, _ = get_inputs(node, kwargs)

# create tensor with shape of input
create_const_node(name+"_shape", np.array(kwargs['in_shape'][0], dtype='int64'), kwargs)
tensor_value = make_tensor(name+"_zero", kwargs['in_type'], [1], [0])
nodes = [
make_node("ConstantOfShape", [name+"_shape"], [name], value=tensor_value)
]
return nodes


@mx_op.register("_contrib_arange_like")
def convert_arange_like(node, **kwargs):
"""Map MXNet's arange_like operator attributes to onnx's Range and Reshape operators.
"""
from onnx.helper import make_node
name, _, attrs = get_inputs(node, kwargs)

opset_version = kwargs['opset_version']
if opset_version < 11:
raise AttributeError("ONNX opset 11 or greater is required to export this operator")

input_type = kwargs['in_type']
dtype = onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[input_type]
in_shape = kwargs['in_shape']
axis = attrs.get('axis')

if axis is None:
# output will be same shape as input
output_shape = in_shape[0]
else:
# determine shape of axis
output_shape = [in_shape[0][int(axis)]]

start = np.array([attrs.get('start', 0.)], dtype=dtype)
step = np.array([attrs.get('step', 1.)], dtype=dtype)
repeat = np.array([attrs.get('repeat', 1)], dtype=dtype)
if repeat != 1:
raise NotImplementedError("arange_like operator with repeat != 1 not yet implemented.")

tot_elements = np.prod(output_shape)
limit = np.array([start + (tot_elements * step)], dtype=dtype)

# create constant inputs
nodes = [
create_const_scalar_node(name+"_start", start, kwargs),
create_const_scalar_node(name+"_limit", limit, kwargs),
create_const_scalar_node(name+"_step", step, kwargs),
create_const_node(name+"_shape", np.array(output_shape, dtype='int64'), kwargs),
make_node("Range", [name+"_start", name+"_limit", name+"_step"], [name+"_range0_out"]),
make_node("Reshape", [name+"_range0_out", name+"_shape"], [name])
]
return nodes
134 changes: 134 additions & 0 deletions tests/python-pytest/onnx/test_operators.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,134 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

import mxnet as mx
from mxnet.gluon import HybridBlock, nn
import numpy as np
import onnxruntime as rt
from mxnet.test_utils import assert_almost_equal
import pytest
import tempfile

def op_export_test(op_name, Model, inputs, tmp_path):
def export_to_onnx(model, op_name, inputs):
model_path = '{}/{}'.format(tmp_path, op_name)
model.export(model_path, epoch=0)
sym_file = '{}-symbol.json'.format(model_path)
params_file = '{}-0000.params'.format(model_path)
dtype = inputs[0].dtype
onnx_file = '{}/{}.onnx'.format(tmp_path, op_name)
mx.contrib.onnx.export_model(sym_file, params_file, [i.shape for i in inputs],
dtype, onnx_file)
return onnx_file
def onnx_rt(onnx_file, inputs):
sess = rt.InferenceSession(onnx_file)
input_dict = dict((sess.get_inputs()[i].name, inputs[i].asnumpy()) for i in range(len(inputs)))
pred = sess.run(None, input_dict)[0]
return pred

# create a new model
model = Model()
model.initialize(ctx=mx.cpu(0))
model.hybridize()
pred_nat = model(*inputs)
onnx_file = export_to_onnx(model, op_name, inputs)
pred_onx = onnx_rt(onnx_file, inputs)
assert_almost_equal(pred_nat, pred_onx)


def test_onnx_export_abs():
with tempfile.TemporaryDirectory() as tmp_path:
class Model(HybridBlock):
def __init__(self, **kwargs):
super(Model, self).__init__(**kwargs)
def hybrid_forward(self, F, x):
out = F.abs(x)
return out
x = mx.nd.array([[-2, -1], [0, 99]], dtype='float32')
op_export_test('abs', Model, [x], tmp_path)

def test_onnx_export_slice():
with tempfile.TemporaryDirectory() as tmp_path:
class Model(HybridBlock):
def __init__(self, **kwargs):
super(Model, self).__init__(**kwargs)
def hybrid_forward(self, F, x):
out = F.slice(x, begin=(0,1), end=(2,4))
return out
x = mx.nd.array([[1,2,3,4],[5,6,7,8],[9,10,11,12]], dtype='float32')
op_export_test('slice', Model, [x], tmp_path)

def test_onnx_export_stack():
with tempfile.TemporaryDirectory() as tmp_path:
dtype = 'float32'
class Model(HybridBlock):
def __init__(self, **kwargs):
super(Model, self).__init__(**kwargs)
def hybrid_forward(self, F, x, y):
out = F.stack(x, y)
return out
x = mx.nd.array([1, 2], dtype=dtype)
y = mx.nd.array([3, 4], dtype=dtype)
op_export_test('stack', Model, [x, y], tmp_path)

def test_onnx_export_zeros_like():
with tempfile.TemporaryDirectory() as tmp_path:
class Model(HybridBlock):
def __init__(self, **kwargs):
super(Model, self).__init__(**kwargs)
def hybrid_forward(self, F, x):
out = F.zeros_like(x)
return out
x = mx.nd.array([[-2,-1,0],[0,50,99],[4,5,6],[7,8,9]], dtype='float32')
op_export_test('zeros_like', Model, [x], tmp_path)

@pytest.mark.parametrize("dtype", ["float32", "double"])
def test_onnx_export_arange_like(dtype):
with tempfile.TemporaryDirectory() as tmp_path:
class Model(HybridBlock):
def __init__(self, **kwargs):
super(Model, self).__init__(**kwargs)
def hybrid_forward(self, F, x):
out = F.contrib.arange_like(x)
return out
x = mx.nd.array([[-2,-1,0],[0,50,99],[4,5,6],[7,8,9]], dtype=dtype)
op_export_test('arange_like', Model, [x], tmp_path)

def test_onnx_export_layernorm():
with tempfile.TemporaryDirectory() as tmp_path:
dtype = 'float32'
class Model(HybridBlock):
def __init__(self, **kwargs):
super(Model, self).__init__(**kwargs)
def hybrid_forward(self, F, x, gamma, beta):
out = F.LayerNorm(x, gamma, beta, axis=1)
return out
x = mx.nd.array([[1,3],[2,4]], dtype=dtype)
gamma = mx.random.uniform(0, 1, x[0].shape).astype(dtype)
beta = mx.random.uniform(0, 1, x[0].shape).astype(dtype)
op_export_test('LayerNorm', Model, [x, gamma, beta], tmp_path)


if __name__ == '__main__':
test_onnx_export_abs()
test_onnx_export_slice()
test_onnx_export_stack()
test_onnx_export_zeros_like()
test_onnx_export_arange_like('float32')
test_onnx_export_arange_like('double')
test_onnx_export_layernorm()

0 comments on commit b219ac2

Please sign in to comment.