Skip to content

Commit

Permalink
Refactor model generation scripts (#6336)
Browse files Browse the repository at this point in the history
* Refactor model generation scripts

* Fix codeql

* Fix relative path import

* Fix package structure

* Copy the gen_common file

* Add missing uint8

* Remove duplicate import
  • Loading branch information
Tabrizian authored Sep 25, 2023
1 parent 2ccc3ce commit e0f70aa
Show file tree
Hide file tree
Showing 15 changed files with 211 additions and 1,034 deletions.
158 changes: 158 additions & 0 deletions qa/common/gen_common.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,158 @@
# Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

# Common utilities for model generation scripts
import numpy as np

np_dtype_string = np.dtype(object)


def np_to_onnx_dtype(np_dtype):
import onnx

if np_dtype == bool:
return onnx.TensorProto.BOOL
elif np_dtype == np.int8:
return onnx.TensorProto.INT8
elif np_dtype == np.int16:
return onnx.TensorProto.INT16
elif np_dtype == np.int32:
return onnx.TensorProto.INT32
elif np_dtype == np.int64:
return onnx.TensorProto.INT64
elif np_dtype == np.uint8:
return onnx.TensorProto.UINT8
elif np_dtype == np.uint16:
return onnx.TensorProto.UINT16
elif np_dtype == np.float16:
return onnx.TensorProto.FLOAT16
elif np_dtype == np.float32:
return onnx.TensorProto.FLOAT
elif np_dtype == np.float64:
return onnx.TensorProto.DOUBLE
elif np_dtype == np_dtype_string:
return onnx.TensorProto.STRING
return None


def np_to_model_dtype(np_dtype):
if np_dtype == bool:
return "TYPE_BOOL"
elif np_dtype == np.int8:
return "TYPE_INT8"
elif np_dtype == np.int16:
return "TYPE_INT16"
elif np_dtype == np.int32:
return "TYPE_INT32"
elif np_dtype == np.int64:
return "TYPE_INT64"
elif np_dtype == np.uint8:
return "TYPE_UINT8"
elif np_dtype == np.uint16:
return "TYPE_UINT16"
elif np_dtype == np.float16:
return "TYPE_FP16"
elif np_dtype == np.float32:
return "TYPE_FP32"
elif np_dtype == np.float64:
return "TYPE_FP64"
elif np_dtype == np_dtype_string:
return "TYPE_STRING"
return None


def np_to_trt_dtype(np_dtype):
import tensorrt as trt

if np_dtype == bool:
return trt.bool
elif np_dtype == np.int8:
return trt.int8
elif np_dtype == np.int32:
return trt.int32
elif np_dtype == np.uint8:
return trt.uint8
elif np_dtype == np.float16:
return trt.float16
elif np_dtype == np.float32:
return trt.float32
return None


def np_to_tf_dtype(np_dtype):
import tensorflow as tf

if np_dtype == bool:
return tf.bool
elif np_dtype == np.int8:
return tf.int8
elif np_dtype == np.int16:
return tf.int16
elif np_dtype == np.int32:
return tf.int32
elif np_dtype == np.int64:
return tf.int64
elif np_dtype == np.uint8:
return tf.uint8
elif np_dtype == np.uint16:
return tf.uint16
elif np_dtype == np.float16:
return tf.float16
elif np_dtype == np.float32:
return tf.float32
elif np_dtype == np.float64:
return tf.float64
elif np_dtype == np_dtype_string:
return tf.string
return None


def np_to_torch_dtype(np_dtype):
import torch

if np_dtype == bool:
return torch.bool
elif np_dtype == np.int8:
return torch.int8
elif np_dtype == np.int16:
return torch.int16
elif np_dtype == np.int32:
return torch.int
elif np_dtype == np.int64:
return torch.long
elif np_dtype == np.uint8:
return torch.uint8
elif np_dtype == np.uint16:
return None # Not supported in Torch
elif np_dtype == np.float16:
return None
elif np_dtype == np.float32:
return torch.float
elif np_dtype == np.float64:
return torch.double
elif np_dtype == np_dtype_string:
return None # Not supported in Torch
return None
27 changes: 1 addition & 26 deletions qa/common/gen_ensemble_model_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,38 +30,13 @@

import numpy as np
import test_util as tu
from gen_common import np_to_model_dtype

BASIC_ENSEMBLE_TYPES = ["simple", "sequence", "fan"]

np_dtype_string = np.dtype(object)


def np_to_model_dtype(np_dtype):
if np_dtype == bool:
return "TYPE_BOOL"
elif np_dtype == np.int8:
return "TYPE_INT8"
elif np_dtype == np.int16:
return "TYPE_INT16"
elif np_dtype == np.int32:
return "TYPE_INT32"
elif np_dtype == np.int64:
return "TYPE_INT64"
elif np_dtype == np.uint8:
return "TYPE_UINT8"
elif np_dtype == np.uint16:
return "TYPE_UINT16"
elif np_dtype == np.float16:
return "TYPE_FP16"
elif np_dtype == np.float32:
return "TYPE_FP32"
elif np_dtype == np.float64:
return "TYPE_FP64"
elif np_dtype == np_dtype_string:
return "TYPE_STRING"
return None


def fixed_to_variable_size(shape):
return [-1] * len(shape)

Expand Down
66 changes: 1 addition & 65 deletions qa/common/gen_qa_dyna_sequence_implicit_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,76 +30,12 @@
import os

import numpy as np
from gen_common import np_to_model_dtype, np_to_onnx_dtype, np_to_trt_dtype

FLAGS = None
np_dtype_string = np.dtype(object)


def np_to_onnx_dtype(np_dtype):
if np_dtype == bool:
return onnx.TensorProto.BOOL
elif np_dtype == np.int8:
return onnx.TensorProto.INT8
elif np_dtype == np.int16:
return onnx.TensorProto.INT16
elif np_dtype == np.int32:
return onnx.TensorProto.INT32
elif np_dtype == np.int64:
return onnx.TensorProto.INT64
elif np_dtype == np.uint8:
return onnx.TensorProto.UINT8
elif np_dtype == np.uint16:
return onnx.TensorProto.UINT16
elif np_dtype == np.float16:
return onnx.TensorProto.FLOAT16
elif np_dtype == np.float32:
return onnx.TensorProto.FLOAT
elif np_dtype == np.float64:
return onnx.TensorProto.DOUBLE
elif np_dtype == np_dtype_string:
return onnx.TensorProto.STRING


def np_to_model_dtype(np_dtype):
if np_dtype == bool:
return "TYPE_BOOL"
elif np_dtype == np.int8:
return "TYPE_INT8"
elif np_dtype == np.int16:
return "TYPE_INT16"
elif np_dtype == np.int32:
return "TYPE_INT32"
elif np_dtype == np.int64:
return "TYPE_INT64"
elif np_dtype == np.uint8:
return "TYPE_UINT8"
elif np_dtype == np.uint16:
return "TYPE_UINT16"
elif np_dtype == np.float16:
return "TYPE_FP16"
elif np_dtype == np.float32:
return "TYPE_FP32"
elif np_dtype == np.float64:
return "TYPE_FP64"
elif np_dtype == np_dtype_string:
return "TYPE_STRING"
return None


def np_to_trt_dtype(np_dtype):
if np_dtype == bool:
return trt.bool
elif np_dtype == np.int8:
return trt.int8
elif np_dtype == np.int32:
return trt.int32
elif np_dtype == np.float16:
return trt.float16
elif np_dtype == np.float32:
return trt.float32
return None


def create_onnx_modelfile(models_dir, model_version, max_batch, dtype, shape):
if not tu.validate_for_onnx_model(dtype, dtype, dtype, shape, shape, shape):
return
Expand Down
Loading

0 comments on commit e0f70aa

Please sign in to comment.