Skip to content

Commit

Permalink
[Feature] Apis unit test (open-mmlab#7)
Browse files Browse the repository at this point in the history
* add apis test

* split torch2onnx impl, prepare for codebase test

* add is_available to backend

* lint
  • Loading branch information
grimoire authored Jul 5, 2021
1 parent dff06ee commit 66a099f
Show file tree
Hide file tree
Showing 9 changed files with 463 additions and 39 deletions.
2 changes: 1 addition & 1 deletion .isort.cfg
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
[settings]
known_third_party = mmcv,mmdet,numpy,onnx,setuptools,tensorrt,torch
known_third_party = mmcv,mmdet,numpy,onnx,pytest,setuptools,tensorrt,torch
4 changes: 2 additions & 2 deletions mmdeploy/apis/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
from .pytorch2onnx import torch2onnx
from .pytorch2onnx import torch2onnx, torch2onnx_impl

__all__ = ['torch2onnx']
__all__ = ['torch2onnx_impl', 'torch2onnx']
13 changes: 13 additions & 0 deletions mmdeploy/apis/onnxruntime/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from .init_plugins import get_ops_path

__all__ = ['get_ops_path']


def is_available():
import os.path as osp
tensorrt_op_path = get_ops_path()
if not osp.exists(tensorrt_op_path):
return False

import importlib
return importlib.util.find_spec('onnxruntime') is not None
27 changes: 27 additions & 0 deletions mmdeploy/apis/onnxruntime/init_plugins.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
import ctypes
import glob
import logging
import os


def get_ops_path():
"""Get TensorRT plugins library path."""
wildcard = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'../../../build/lib/libmmlab_onnxruntime_ops.so'))

paths = glob.glob(wildcard)
lib_path = paths[0] if len(paths) > 0 else ''
return lib_path


def load_tensorrt_plugin():
"""load TensorRT plugins library."""
lib_path = get_ops_path()
if os.path.exists(lib_path):
ctypes.CDLL(lib_path)
return 0
else:
logging.warning('Can not load tensorrt custom ops.')
return -1
65 changes: 44 additions & 21 deletions mmdeploy/apis/pytorch2onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,39 @@
from .utils import create_input, init_model


def torch2onnx_impl(model: torch.nn.Module, input: torch.Tensor,
deploy_cfg: Union[str, mmcv.Config], output_file: str):
# load deploy_cfg if needed
if isinstance(deploy_cfg, str):
deploy_cfg = mmcv.Config.fromfile(deploy_cfg)
if not isinstance(deploy_cfg, mmcv.Config):
raise TypeError('deploy_cfg must be a filename or Config object, '
f'but got {type(deploy_cfg)}')

pytorch2onnx_cfg = deploy_cfg['pytorch2onnx']
backend = deploy_cfg['backend']
opset_version = pytorch2onnx_cfg.get('opset_version', 11)

# load registed symbolic
register_extra_symbolics(deploy_cfg, backend=backend, opset=opset_version)

# patch model
patched_model = patch_model(model, cfg=deploy_cfg, backend=backend)

with RewriterContext(cfg=deploy_cfg, backend=backend):
torch.onnx.export(
patched_model,
input,
output_file,
export_params=pytorch2onnx_cfg['export_params'],
input_names=pytorch2onnx_cfg['input_names'],
output_names=pytorch2onnx_cfg['output_names'],
opset_version=opset_version,
dynamic_axes=pytorch2onnx_cfg.get('dynamic_axes', None),
keep_initializers_as_inputs=pytorch2onnx_cfg[
'keep_initializers_as_inputs'])


def torch2onnx(img: Any,
work_dir: str,
save_file: str,
Expand All @@ -18,7 +51,9 @@ def torch2onnx(img: Any,
model_checkpoint: Optional[str] = None,
device: str = 'cuda:0',
ret_value: Optional[mp.Value] = None):
ret_value.value = -1

if ret_value is not None:
ret_value.value = -1

# load deploy_cfg if needed
if isinstance(deploy_cfg, str):
Expand All @@ -36,30 +71,18 @@ def torch2onnx(img: Any,
mmcv.mkdir_or_exist(osp.abspath(work_dir))
output_file = osp.join(work_dir, save_file)

pytorch2onnx_cfg = deploy_cfg['pytorch2onnx']
codebase = deploy_cfg['codebase']
backend = deploy_cfg['backend']
opset_version = pytorch2onnx_cfg.get('opset_version', 11)
# load registed symbolic
register_extra_symbolics(deploy_cfg, backend=backend, opset=opset_version)

torch_model = init_model(codebase, model_cfg, model_checkpoint, device)
data, model_inputs = create_input(codebase, model_cfg, img, device)
patched_model = patch_model(torch_model, cfg=deploy_cfg, backend=backend)

if not isinstance(model_inputs, torch.Tensor):
model_inputs = model_inputs[0]
with RewriterContext(cfg=deploy_cfg, backend=backend):
torch.onnx.export(
patched_model,
model_inputs,
output_file,
export_params=pytorch2onnx_cfg['export_params'],
input_names=pytorch2onnx_cfg['input_names'],
output_names=pytorch2onnx_cfg['output_names'],
opset_version=opset_version,
dynamic_axes=pytorch2onnx_cfg.get('dynamic_axes', None),
keep_initializers_as_inputs=pytorch2onnx_cfg[
'keep_initializers_as_inputs'])

ret_value.value = 0
torch2onnx_impl(
torch_model,
model_inputs,
deploy_cfg=deploy_cfg,
output_file=output_file)

if ret_value is not None:
ret_value.value = 0
39 changes: 26 additions & 13 deletions mmdeploy/apis/tensorrt/__init__.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,27 @@
# flake8: noqa
from .init_plugins import load_tensorrt_plugin
from .onnx2tensorrt import onnx2tensorrt
from .tensorrt_utils import (TRTWrapper, load_trt_engine, onnx2trt,
save_trt_engine)

# load tensorrt plugin lib
load_tensorrt_plugin()

__all__ = [
'onnx2trt', 'save_trt_engine', 'load_trt_engine', 'TRTWraper',
'TRTWrapper', 'is_tensorrt_plugin_loaded', 'preprocess_onnx',
'onnx2tensorrt'
]
from .init_plugins import get_ops_path, load_tensorrt_plugin


def is_available():
import os.path as osp
tensorrt_op_path = get_ops_path()
if not osp.exists(tensorrt_op_path):
return False

import importlib
return importlib.util.find_spec('tensorrt') is not None


if is_available():
from .onnx2tensorrt import onnx2tensorrt
from .tensorrt_utils import (TRTWrapper, load_trt_engine, onnx2trt,
save_trt_engine)

# load tensorrt plugin lib
load_tensorrt_plugin()

__all__ = [
'onnx2trt', 'save_trt_engine', 'load_trt_engine', 'TRTWraper',
'TRTWrapper', 'is_tensorrt_plugin_loaded', 'preprocess_onnx',
'onnx2tensorrt'
]
4 changes: 2 additions & 2 deletions mmdeploy/apis/tensorrt/init_plugins.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import os


def get_tensorrt_op_path():
def get_ops_path():
"""Get TensorRT plugins library path."""
wildcard = os.path.abspath(
os.path.join(
Expand All @@ -18,7 +18,7 @@ def get_tensorrt_op_path():

def load_tensorrt_plugin():
"""load TensorRT plugins library."""
lib_path = get_tensorrt_op_path()
lib_path = get_ops_path()
if os.path.exists(lib_path):
ctypes.CDLL(lib_path)
return 0
Expand Down
109 changes: 109 additions & 0 deletions tests/test_apis/test_onnx2tensorrt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
import os
import os.path as osp
import shutil

import mmcv
import pytest
import torch
import torch.multiprocessing as mp
from torch import nn

import mmdeploy.apis.tensorrt as trt_apis

# skip if tensorrt apis can not loaded
if not trt_apis.is_available():
pytest.skip('TensorRT apis is not prepared.')
trt = pytest.importorskip('tensorrt', reason='Import tensorrt failed.')
if not torch.cuda.is_available():
pytest.skip('CUDA is not available.')

# load apis from trt_apis
TRTWrapper = trt_apis.TRTWrapper
onnx2tensorrt = trt_apis.onnx2tensorrt

ret_value = mp.Value('d', 0, lock=False)
work_dir = './tmp/'
onnx_file = 'tmp.onnx'
save_file = 'tmp.engine'


@pytest.fixture(autouse=True)
def clear_workdir_after_test():
# clear work_dir before test
if osp.exists(work_dir):
shutil.rmtree(work_dir)
os.mkdir(work_dir)

yield

# clear work_dir after test
if osp.exists(work_dir):
shutil.rmtree(work_dir)


def test_onnx2tensorrt():

# dummy model
class TestModel(nn.Module):

def __init__(self):
super().__init__()

def forward(self, x):
return x + 1

model = TestModel().eval().cuda()
x = torch.rand(1, 3, 64, 64).cuda()

onnx_path = osp.join(work_dir, onnx_file)
# export to onnx
torch.onnx.export(
model,
x,
onnx_path,
input_names=['input'],
output_names=['output'],
dynamic_axes={'input': {
0: 'batch',
2: 'height',
3: 'width'
}})

assert osp.exists(onnx_path)

# deploy config
deploy_cfg = mmcv.Config(
dict(
backend='tensorrt',
tensorrt_param=dict(
shared_param=dict(
log_level=trt.Logger.WARNING, fp16_mode=False),
model_params=[
dict(
opt_shape_dict=dict(
input=[[1, 3, 32, 32], [1, 3, 64, 64],
[1, 3, 128, 128]]),
max_workspace_size=1 << 30)
])))

# convert to engine
onnx2tensorrt(
work_dir,
save_file,
0,
deploy_cfg=deploy_cfg,
onnx_model=onnx_path,
ret_value=ret_value)

assert ret_value.value == 0
assert osp.exists(work_dir)
assert osp.exists(osp.join(work_dir, save_file))

# test
trt_model = TRTWrapper(osp.join(work_dir, save_file))
x = x.cuda()

with torch.no_grad():
trt_output = trt_model({'input': x})['output']

torch.testing.assert_allclose(trt_output, x + 1)
Loading

0 comments on commit 66a099f

Please sign in to comment.