diff --git a/.gitignore b/.gitignore index 3c4bdc851e..5e424cdba2 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ __pycache__/ # C extensions *.so +onnx2ncnn # Distribution / packaging .Python diff --git a/MANIFEST.in b/MANIFEST.in index e534d45947..7c85a3240b 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1 +1,7 @@ include requirements/*.txt +include mmdeploy/backend/ncnn/*.so +include mmdeploy/backend/ncnn/*.dll +include mmdeploy/backend/ncnn/*.pyd +include mmdeploy/lib/*.so +include mmdeploy/lib/*.dll +include mmdeploy/lib/*.pyd diff --git a/csrc/backend_ops/ncnn/onnx2ncnn/CMakeLists.txt b/csrc/backend_ops/ncnn/onnx2ncnn/CMakeLists.txt index 21e413e4ac..705113ae7c 100755 --- a/csrc/backend_ops/ncnn/onnx2ncnn/CMakeLists.txt +++ b/csrc/backend_ops/ncnn/onnx2ncnn/CMakeLists.txt @@ -12,6 +12,8 @@ if (PROTOBUF_FOUND) ${CMAKE_CURRENT_BINARY_DIR}) target_link_libraries(onnx2ncnn PRIVATE ${PROTOBUF_LIBRARIES}) + set(_NCNN_CONVERTER_DIR ${CMAKE_SOURCE_DIR}/mmdeploy/backend/ncnn) + install(TARGETS onnx2ncnn DESTINATION ${_NCNN_CONVERTER_DIR}) else () message( FATAL_ERROR "Protobuf not found, onnx model convert tool won't be built") diff --git a/csrc/backend_ops/ncnn/ops/CMakeLists.txt b/csrc/backend_ops/ncnn/ops/CMakeLists.txt index 461301211a..8af1b85f0b 100755 --- a/csrc/backend_ops/ncnn/ops/CMakeLists.txt +++ b/csrc/backend_ops/ncnn/ops/CMakeLists.txt @@ -23,3 +23,6 @@ target_include_directories(${PROJECT_NAME} PUBLIC ${_COMMON_INCLUDE_DIRS}) add_library(mmdeploy::ncnn_ops ALIAS ${PROJECT_NAME}) + +set(_NCNN_OPS_DIR ${CMAKE_SOURCE_DIR}/mmdeploy/lib) +install(TARGETS ${PROJECT_NAME} DESTINATION ${_NCNN_OPS_DIR}) diff --git a/csrc/backend_ops/onnxruntime/CMakeLists.txt b/csrc/backend_ops/onnxruntime/CMakeLists.txt index 5dfa8176b0..3d1a1e14c3 100644 --- a/csrc/backend_ops/onnxruntime/CMakeLists.txt +++ b/csrc/backend_ops/onnxruntime/CMakeLists.txt @@ -22,3 +22,6 @@ target_link_libraries(${PROJECT_NAME}_obj PUBLIC onnxruntime) mmdeploy_add_library(${PROJECT_NAME} SHARED EXCLUDE "") target_link_libraries(${PROJECT_NAME} PUBLIC ${PROJECT_NAME}_obj) add_library(mmdeploy::onnxruntime::ops ALIAS ${PROJECT_NAME}) + +set(_ORT_OPS_DIR ${CMAKE_SOURCE_DIR}/mmdeploy/lib) +install(TARGETS ${PROJECT_NAME} DESTINATION ${_ORT_OPS_DIR}) diff --git a/csrc/backend_ops/tensorrt/CMakeLists.txt b/csrc/backend_ops/tensorrt/CMakeLists.txt index 14db917dd3..9f375596fa 100644 --- a/csrc/backend_ops/tensorrt/CMakeLists.txt +++ b/csrc/backend_ops/tensorrt/CMakeLists.txt @@ -35,3 +35,6 @@ mmdeploy_export(${PROJECT_NAME}_obj) mmdeploy_add_module(${PROJECT_NAME} MODULE EXCLUDE "") target_link_libraries(${PROJECT_NAME} PRIVATE ${PROJECT_NAME}_obj) add_library(mmdeploy::tensorrt_ops ALIAS ${PROJECT_NAME}) + +set(_TRT_OPS_DIR ${CMAKE_SOURCE_DIR}/mmdeploy/lib) +install(TARGETS ${PROJECT_NAME} DESTINATION ${_TRT_OPS_DIR}) diff --git a/mmdeploy/__init__.py b/mmdeploy/__init__.py index 805db5c94b..a57f906701 100644 --- a/mmdeploy/__init__.py +++ b/mmdeploy/__init__.py @@ -4,7 +4,11 @@ from mmdeploy.utils import get_root_logger from .version import __version__ # noqa F401 -importlib.import_module('mmdeploy.pytorch') +if importlib.util.find_spec('torch'): + importlib.import_module('mmdeploy.pytorch') +else: + logger = get_root_logger() + logger.debug('torch is not installed.') if importlib.util.find_spec('mmcv'): importlib.import_module('mmdeploy.mmcv') diff --git a/mmdeploy/backend/__init__.py b/mmdeploy/backend/__init__.py index 3385ad598e..ef101fec61 100644 --- a/mmdeploy/backend/__init__.py +++ b/mmdeploy/backend/__init__.py @@ -1,27 +1 @@ # Copyright (c) OpenMMLab. All rights reserved. -from mmdeploy.backend.ncnn import is_available as ncnn_available -from mmdeploy.backend.onnxruntime import is_available as ort_available -from mmdeploy.backend.openvino import is_available as openvino_available -from mmdeploy.backend.pplnn import is_available as pplnn_available -from mmdeploy.backend.sdk import is_available as sdk_available -from mmdeploy.backend.tensorrt import is_available as trt_available - -__all__ = [] -if ncnn_available(): - from .ncnn import NCNNWrapper # noqa: F401,F403 - __all__.append('NCNNWrapper') -if ort_available(): - from .onnxruntime import ORTWrapper # noqa: F401,F403 - __all__.append('ORTWrapper') -if trt_available(): - from .tensorrt import TRTWrapper # noqa: F401,F403 - __all__.append('TRTWrapper') -if pplnn_available(): - from .pplnn import PPLNNWrapper # noqa: F401,F403 - __all__.append('PPLNNWrapper') -if openvino_available(): - from .openvino import OpenVINOWrapper # noqa: F401,F403 - __all__.append('OpenVINOWrapper') -if sdk_available(): - from .sdk import SDKWrapper # noqa: F401,F403 - __all__.append('SDKWrapper') diff --git a/mmdeploy/backend/ncnn/__init__.py b/mmdeploy/backend/ncnn/__init__.py index e241b6d2ae..41b2c0ad94 100644 --- a/mmdeploy/backend/ncnn/__init__.py +++ b/mmdeploy/backend/ncnn/__init__.py @@ -32,6 +32,9 @@ def is_plugin_available(): if is_available(): - from .wrapper import NCNNWrapper + try: + from .wrapper import NCNNWrapper - __all__ = ['NCNNWrapper'] + __all__ = ['NCNNWrapper'] + except Exception: + pass diff --git a/mmdeploy/backend/ncnn/init_plugins.py b/mmdeploy/backend/ncnn/init_plugins.py index 6e3a678cdc..7eebde77c7 100644 --- a/mmdeploy/backend/ncnn/init_plugins.py +++ b/mmdeploy/backend/ncnn/init_plugins.py @@ -11,8 +11,7 @@ def get_ops_path() -> str: str: The library path of ncnn custom ops. """ candidates = [ - '../../../build/lib/libmmdeploy_ncnn_ops.so', - '../../../build/bin/*/mmdeploy_ncnn_ops.dll' + '../../lib/libmmdeploy_ncnn_ops.so', '../../lib/mmdeploy_ncnn_ops.dll' ] return get_file_path(os.path.dirname(__file__), candidates) @@ -23,7 +22,5 @@ def get_onnx2ncnn_path() -> str: Returns: str: A path of onnx2ncnn tool. """ - candidates = [ - '../../../build/bin/onnx2ncnn', '../../../build/bin/*/onnx2ncnn.exe' - ] + candidates = ['./onnx2ncnn', './onnx2ncnn.exe'] return get_file_path(os.path.dirname(__file__), candidates) diff --git a/mmdeploy/backend/ncnn/onnx2ncnn.py b/mmdeploy/backend/ncnn/onnx2ncnn.py index f4bd30d800..ec69d941db 100644 --- a/mmdeploy/backend/ncnn/onnx2ncnn.py +++ b/mmdeploy/backend/ncnn/onnx2ncnn.py @@ -1,13 +1,19 @@ # Copyright (c) OpenMMLab. All rights reserved. +import os import os.path as osp from subprocess import call from typing import List -import mmcv - from .init_plugins import get_onnx2ncnn_path +def mkdir_or_exist(dir_name, mode=0o777): + if dir_name == '': + return + dir_name = osp.expanduser(dir_name) + os.makedirs(dir_name, mode=mode, exist_ok=True) + + def get_output_model_file(onnx_path: str, work_dir: str) -> List[str]: """Returns the path to the .param, .bin file with export result. @@ -19,7 +25,7 @@ def get_output_model_file(onnx_path: str, work_dir: str) -> List[str]: List[str]: The path to the files where the export result will be located. """ - mmcv.mkdir_or_exist(osp.abspath(work_dir)) + mkdir_or_exist(osp.abspath(work_dir)) file_name = osp.splitext(osp.split(onnx_path)[1])[0] save_param = osp.join(work_dir, file_name + '.param') save_bin = osp.join(work_dir, file_name + '.bin') diff --git a/mmdeploy/backend/onnxruntime/__init__.py b/mmdeploy/backend/onnxruntime/__init__.py index 09399a41b4..a6dc88c6be 100644 --- a/mmdeploy/backend/onnxruntime/__init__.py +++ b/mmdeploy/backend/onnxruntime/__init__.py @@ -26,5 +26,9 @@ def is_plugin_available(): if is_available(): - from .wrapper import ORTWrapper - __all__ = ['ORTWrapper'] + try: + # import wrapper if pytorch is available + from .wrapper import ORTWrapper + __all__ = ['ORTWrapper'] + except Exception: + pass diff --git a/mmdeploy/backend/onnxruntime/init_plugins.py b/mmdeploy/backend/onnxruntime/init_plugins.py index e8622eedf3..3c194fddb7 100644 --- a/mmdeploy/backend/onnxruntime/init_plugins.py +++ b/mmdeploy/backend/onnxruntime/init_plugins.py @@ -11,7 +11,7 @@ def get_ops_path() -> str: str: The library path to onnxruntime custom ops. """ candidates = [ - '../../../build/lib/libmmdeploy_onnxruntime_ops.so', - '../../../build/bin/*/mmdeploy_onnxruntime_ops.dll', + '../../lib/libmmdeploy_onnxruntime_ops.so', + '../../lib/mmdeploy_onnxruntime_ops.dll', ] return get_file_path(os.path.dirname(__file__), candidates) diff --git a/mmdeploy/backend/sdk/__init__.py b/mmdeploy/backend/sdk/__init__.py index ef648c4d5b..e64379dd7a 100644 --- a/mmdeploy/backend/sdk/__init__.py +++ b/mmdeploy/backend/sdk/__init__.py @@ -18,13 +18,20 @@ if lib_path: lib_dir = os.path.dirname(lib_path) - sys.path.insert(0, lib_dir) + sys.path.append(lib_dir) if importlib.util.find_spec(module_name) is not None: - from .wrapper import SDKWrapper - __all__ = ['SDKWrapper'] _is_available = True def is_available() -> bool: return _is_available + + +if is_available(): + + try: + from .wrapper import SDKWrapper + __all__ = ['SDKWrapper'] + except Exception: + pass diff --git a/mmdeploy/backend/tensorrt/__init__.py b/mmdeploy/backend/tensorrt/__init__.py index de0a71574e..b86fd8efdd 100644 --- a/mmdeploy/backend/tensorrt/__init__.py +++ b/mmdeploy/backend/tensorrt/__init__.py @@ -3,8 +3,6 @@ import importlib import os.path as osp -import torch - from .init_plugins import get_ops_path, load_tensorrt_plugin @@ -15,8 +13,7 @@ def is_available(): bool: True if TensorRT package is installed and cuda is available. """ - return importlib.util.find_spec('tensorrt') is not None and \ - torch.cuda.is_available() + return importlib.util.find_spec('tensorrt') is not None def is_plugin_available(): @@ -31,9 +28,15 @@ def is_plugin_available(): if is_available(): from .utils import create_trt_engine, load_trt_engine, save_trt_engine - from .wrapper import TRTWrapper __all__ = [ 'create_trt_engine', 'save_trt_engine', 'load_trt_engine', - 'TRTWrapper', 'load_tensorrt_plugin' + 'load_tensorrt_plugin' ] + + try: + # import wrapper if pytorch is available + from .wrapper import TRTWrapper + __all__ += ['TRTWrapper'] + except Exception: + pass diff --git a/mmdeploy/backend/tensorrt/calib_utils.py b/mmdeploy/backend/tensorrt/calib_utils.py index 9a969b5f9a..fdd1fda184 100644 --- a/mmdeploy/backend/tensorrt/calib_utils.py +++ b/mmdeploy/backend/tensorrt/calib_utils.py @@ -3,8 +3,9 @@ import h5py import numpy as np +import pycuda.autoinit # noqa:F401 +import pycuda.driver as cuda import tensorrt as trt -import torch DEFAULT_CALIBRATION_ALGORITHM = trt.CalibrationAlgoType.ENTROPY_CALIBRATION_2 @@ -67,30 +68,28 @@ def get_batch(self, names: Sequence[str], **kwargs) -> list: ret = [] for name in names: input_group = self.calib_data[name] - data_np = input_group[str(self.count)][...] - data_torch = torch.from_numpy(data_np) + data_np = input_group[str(self.count)][...].astype(np.float32) # tile the tensor so we can keep the same distribute opt_shape = self.input_shapes[name]['opt_shape'] - data_shape = data_torch.shape + data_shape = data_np.shape reps = [ int(np.ceil(opt_s / data_s)) for opt_s, data_s in zip(opt_shape, data_shape) ] - data_torch = data_torch.tile(reps) + data_np = np.tile(data_np, reps) - for dim, opt_s in enumerate(opt_shape): - if data_torch.shape[dim] != opt_s: - data_torch = data_torch.narrow(dim, 0, opt_s) + slice_list = tuple(slice(0, end) for end in opt_shape) + data_np = data_np[slice_list] - if name not in self.buffers: - self.buffers[name] = data_torch.cuda(self.device_id) - else: - self.buffers[name].copy_(data_torch.cuda(self.device_id)) + data_np_cuda_ptr = cuda.mem_alloc(data_np.nbytes) + cuda.memcpy_htod(data_np_cuda_ptr, + np.ascontiguousarray(data_np)) + self.buffers[name] = data_np_cuda_ptr - ret.append(int(self.buffers[name].data_ptr())) + ret.append(self.buffers[name]) self.count += 1 return ret else: diff --git a/mmdeploy/backend/tensorrt/init_plugins.py b/mmdeploy/backend/tensorrt/init_plugins.py index 80c6eea4d7..d1dcc9d8bb 100644 --- a/mmdeploy/backend/tensorrt/init_plugins.py +++ b/mmdeploy/backend/tensorrt/init_plugins.py @@ -12,8 +12,8 @@ def get_ops_path() -> str: str: A path of the TensorRT plugin library. """ candidates = [ - '../../../build/lib/libmmdeploy_tensorrt_ops.so', - '../../../build/bin/*/mmdeploy_tensorrt_ops.dll' + '../../lib/libmmdeploy_tensorrt_ops.so', + '../../lib/mmdeploy_tensorrt_ops.dll' ] return get_file_path(os.path.dirname(__file__), candidates) diff --git a/mmdeploy/backend/tensorrt/utils.py b/mmdeploy/backend/tensorrt/utils.py index 3dc0882d2f..4a2c56ca2b 100644 --- a/mmdeploy/backend/tensorrt/utils.py +++ b/mmdeploy/backend/tensorrt/utils.py @@ -4,11 +4,9 @@ import onnx import tensorrt as trt -import torch from packaging import version from mmdeploy.utils import get_root_logger -from .calib_utils import HDF5Calibrator from .init_plugins import load_tensorrt_plugin @@ -54,8 +52,17 @@ def create_trt_engine(onnx_model: Union[str, onnx.ModelProto], >>> device_id=0) >>> }) """ + + import os + old_cuda_device = os.environ.get('CUDA_DEVICE', None) + os.environ['CUDA_DEVICE'] = str(device_id) + import pycuda.autoinit # noqa:F401 + if old_cuda_device is not None: + os.environ['CUDA_DEVICE'] = old_cuda_device + else: + os.environ.pop('CUDA_DEVICE') + load_tensorrt_plugin() - device = torch.device('cuda:{}'.format(device_id)) # create builder and network logger = trt.Logger(log_level) builder = trt.Builder(logger) @@ -96,6 +103,7 @@ def create_trt_engine(onnx_model: Union[str, onnx.ModelProto], config.set_flag(trt.BuilderFlag.FP16) if int8_mode: + from .calib_utils import HDF5Calibrator config.set_flag(trt.BuilderFlag.INT8) assert int8_param is not None config.int8_calibrator = HDF5Calibrator( @@ -110,8 +118,7 @@ def create_trt_engine(onnx_model: Union[str, onnx.ModelProto], builder.int8_calibrator = config.int8_calibrator # create engine - with torch.cuda.device(device): - engine = builder.build_engine(network, config) + engine = builder.build_engine(network, config) assert engine is not None, 'Failed to create TensorRT engine' return engine @@ -145,46 +152,6 @@ def load_trt_engine(path: str) -> trt.ICudaEngine: return engine -def torch_dtype_from_trt(dtype: trt.DataType) -> torch.dtype: - """Convert pytorch dtype to TensorRT dtype. - - Args: - dtype (str.DataType): The data type in tensorrt. - - Returns: - torch.dtype: The corresponding data type in torch. - """ - - if dtype == trt.bool: - return torch.bool - elif dtype == trt.int8: - return torch.int8 - elif dtype == trt.int32: - return torch.int32 - elif dtype == trt.float16: - return torch.float16 - elif dtype == trt.float32: - return torch.float32 - else: - raise TypeError(f'{dtype} is not supported by torch') - - -def torch_device_from_trt(device: trt.TensorLocation): - """Convert pytorch device to TensorRT device. - - Args: - device (trt.TensorLocation): The device in tensorrt. - Returns: - torch.device: The corresponding device in torch. - """ - if device == trt.TensorLocation.DEVICE: - return torch.device('cuda') - elif device == trt.TensorLocation.HOST: - return torch.device('cpu') - else: - return TypeError(f'{device} is not supported by torch') - - def get_trt_log_level() -> trt.Logger.Severity: """Get tensorrt log level from root logger. diff --git a/mmdeploy/backend/tensorrt/wrapper.py b/mmdeploy/backend/tensorrt/wrapper.py index 9a0cd2e10f..9a23d5b2a4 100644 --- a/mmdeploy/backend/tensorrt/wrapper.py +++ b/mmdeploy/backend/tensorrt/wrapper.py @@ -8,7 +8,47 @@ from mmdeploy.utils.timer import TimeCounter from ..base import BACKEND_WRAPPER, BaseWrapper from .init_plugins import load_tensorrt_plugin -from .utils import load_trt_engine, torch_device_from_trt, torch_dtype_from_trt +from .utils import load_trt_engine + + +def torch_dtype_from_trt(dtype: trt.DataType) -> torch.dtype: + """Convert pytorch dtype to TensorRT dtype. + + Args: + dtype (str.DataType): The data type in tensorrt. + + Returns: + torch.dtype: The corresponding data type in torch. + """ + + if dtype == trt.bool: + return torch.bool + elif dtype == trt.int8: + return torch.int8 + elif dtype == trt.int32: + return torch.int32 + elif dtype == trt.float16: + return torch.float16 + elif dtype == trt.float32: + return torch.float32 + else: + raise TypeError(f'{dtype} is not supported by torch') + + +def torch_device_from_trt(device: trt.TensorLocation): + """Convert pytorch device to TensorRT device. + + Args: + device (trt.TensorLocation): The device in tensorrt. + Returns: + torch.device: The corresponding device in torch. + """ + if device == trt.TensorLocation.DEVICE: + return torch.device('cuda') + elif device == trt.TensorLocation.HOST: + return torch.device('cpu') + else: + return TypeError(f'{device} is not supported by torch') @BACKEND_WRAPPER.register_module(Backend.TENSORRT.value) diff --git a/mmdeploy/mmcv/ops/deform_conv.py b/mmdeploy/mmcv/ops/deform_conv.py index ccd0542678..3e2a436f48 100644 --- a/mmdeploy/mmcv/ops/deform_conv.py +++ b/mmdeploy/mmcv/ops/deform_conv.py @@ -26,7 +26,7 @@ def deform_conv__default(ctx, padding_i=[p for pair in zip(padding, padding) for p in pair], dilation_i=dilation, groups_i=groups, - deformable_groups_i=deform_groups) + deform_groups_i=deform_groups) @SYMBOLIC_REWRITER.register_symbolic( diff --git a/mmdeploy/utils/__init__.py b/mmdeploy/utils/__init__.py index 4847ba7b09..dda45c09d2 100644 --- a/mmdeploy/utils/__init__.py +++ b/mmdeploy/utils/__init__.py @@ -1,24 +1,37 @@ # Copyright (c) OpenMMLab. All rights reserved. -from .config_utils import (cfg_apply_marks, get_backend, get_backend_config, - get_calib_config, get_calib_filename, get_codebase, - get_codebase_config, get_common_config, - get_dynamic_axes, get_input_shape, get_ir_config, - get_model_inputs, get_onnx_config, - get_partition_config, get_task_type, - is_dynamic_batch, is_dynamic_shape, load_config) +import importlib + from .constants import IR, SDK_TASK_MAP, Backend, Codebase, Task from .device import parse_cuda_device_id, parse_device_id from .env import get_backend_version, get_codebase_version, get_library_version from .utils import get_file_path, get_root_logger, target_wrapper __all__ = [ - 'is_dynamic_batch', 'is_dynamic_shape', 'get_task_type', 'get_codebase', - 'get_backend', 'load_config', 'Backend', 'Codebase', 'Task', - 'get_ir_config', 'get_onnx_config', 'get_partition_config', - 'get_calib_config', 'get_calib_filename', 'get_common_config', - 'get_model_inputs', 'cfg_apply_marks', 'get_input_shape', - 'parse_device_id', 'parse_cuda_device_id', 'get_codebase_config', - 'get_backend_config', 'get_root_logger', 'get_dynamic_axes', - 'target_wrapper', 'SDK_TASK_MAP', 'get_library_version', - 'get_codebase_version', 'get_backend_version', 'IR', 'get_file_path' + 'SDK_TASK_MAP', 'IR', 'Backend', 'Codebase', 'Task', + 'parse_cuda_device_id', 'get_library_version', 'get_codebase_version', + 'get_backend_version', 'parse_device_id', 'get_file_path', + 'get_root_logger', 'target_wrapper' ] + +if importlib.util.find_spec('mmcv') is not None: + # yapf: disable + from .config_utils import (cfg_apply_marks, get_backend, + get_backend_config, get_calib_config, + get_calib_filename, get_codebase, + get_codebase_config, get_common_config, + get_dynamic_axes, get_input_shape, + get_ir_config, get_model_inputs, + get_onnx_config, get_partition_config, + get_task_type, is_dynamic_batch, + is_dynamic_shape, load_config) + + # yapf: enable + + __all__ += [ + 'cfg_apply_marks', 'get_backend', 'get_backend_config', + 'get_calib_config', 'get_calib_filename', 'get_codebase', + 'get_codebase_config', 'get_common_config', 'get_dynamic_axes', + 'get_input_shape', 'get_ir_config', 'get_model_inputs', + 'get_onnx_config', 'get_partition_config', 'get_task_type', + 'is_dynamic_batch', 'is_dynamic_shape', 'load_config' + ] diff --git a/mmdeploy/utils/device.py b/mmdeploy/utils/device.py index 1925e6523c..1b980e449c 100644 --- a/mmdeploy/utils/device.py +++ b/mmdeploy/utils/device.py @@ -1,8 +1,7 @@ # Copyright (c) OpenMMLab. All rights reserved. +import re from typing import Optional -import torch - def parse_device_id(device: str) -> Optional[int]: """Parse device index from a string. @@ -34,9 +33,11 @@ def parse_cuda_device_id(device: str) -> int: Returns: int: The parsed device id, defaults to `0`. """ - device = torch.device(device) - assert device.type == 'cuda', 'Not cuda device.' + match_result = re.match('([^:]+)(:[0-9]+)?$', device) + assert match_result is not None, f'Can not parse device {device}.' + assert match_result.group(1).lower() == 'cuda', 'Not cuda device.' - device_id = 0 if device.index is None else device.index + device_id = 0 if match_result.lastindex == 1 else int( + match_result.group(2)[1:]) return device_id diff --git a/mmdeploy/utils/logging.py b/mmdeploy/utils/logging.py new file mode 100644 index 0000000000..7a6ea65d11 --- /dev/null +++ b/mmdeploy/utils/logging.py @@ -0,0 +1,65 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +from typing import Optional + +logger_initialized = {} + + +def get_logger(name: str, + log_file: Optional[str] = None, + log_level: int = logging.INFO, + file_mode: str = 'w'): + """Initialize and get a logger by name. + + If the logger has not been initialized, this method will initialize the + logger by adding one or two handlers, otherwise the initialized logger will + be directly returned. During initialization, a StreamHandler will always be + added. If `log_file` is specified, a FileHandler will also be added. + + Args: + name (str): Logger name. + log_file (str | None): The log filename. If specified, a FileHandler + will be added to the logger. + log_level (int): The logger level. + file_mode (str): The file mode used in opening log file. + Defaults to 'w'. + + Returns: + logging.Logger: The expected logger. + """ + logger = logging.getLogger(name) + if name in logger_initialized: + return logger + # handle hierarchical names + # e.g., logger "a" is initialized, then logger "a.b" will skip the + # initialization since it is a child of "a". + for logger_name in logger_initialized: + if name.startswith(logger_name): + return logger + + # handle duplicate logs to the console + for handler in logger.root.handlers: + if type(handler) is logging.StreamHandler: + handler.setLevel(logging.ERROR) + + stream_handler = logging.StreamHandler() + handlers = [stream_handler] + + if log_file is not None: + # Here, the default behaviour of the official logger is 'a'. Thus, we + # provide an interface to change the file mode to the default + # behaviour. + file_handler = logging.FileHandler(log_file, file_mode) + handlers.append(file_handler) + + formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - %(message)s') + for handler in handlers: + handler.setFormatter(formatter) + handler.setLevel(log_level) + logger.addHandler(handler) + + logger.setLevel(log_level) + logger_initialized[name] = True + + return logger diff --git a/mmdeploy/utils/timer.py b/mmdeploy/utils/timer.py index 2c7702ec3d..4e4e1a8a55 100644 --- a/mmdeploy/utils/timer.py +++ b/mmdeploy/utils/timer.py @@ -5,7 +5,8 @@ from typing import Optional import torch -from mmcv.utils import get_logger + +from mmdeploy.utils.logging import get_logger class TimeCounter: diff --git a/mmdeploy/utils/utils.py b/mmdeploy/utils/utils.py index 10d6d02bd0..29b94dde65 100644 --- a/mmdeploy/utils/utils.py +++ b/mmdeploy/utils/utils.py @@ -6,8 +6,9 @@ import traceback from typing import Callable, Optional, Union -import torch.multiprocessing as mp -from mmcv.utils import get_logger +import multiprocess as mp + +from mmdeploy.utils.logging import get_logger def target_wrapper(target: Callable, @@ -27,8 +28,7 @@ def target_wrapper(target: Callable, """ logger = logging.getLogger() logging.basicConfig( - format='%(asctime)s,%(name)s %(levelname)-8s' - ' [%(filename)s:%(lineno)d] %(message)s', + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d:%H:%M:%S') logger.level logger.setLevel(log_level) diff --git a/requirements/runtime.txt b/requirements/runtime.txt index 6114dfc58f..82e152466d 100644 --- a/requirements/runtime.txt +++ b/requirements/runtime.txt @@ -1,5 +1,6 @@ h5py matplotlib +multiprocess numpy onnx>=1.8.0 six diff --git a/setup.cfg b/setup.cfg index f2dac821a1..2073768695 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,3 @@ -[bdist_wheel] -universal=1 - [aliases] test=pytest diff --git a/setup.py b/setup.py index cb22dc54ee..86e5cdf022 100644 --- a/setup.py +++ b/setup.py @@ -1,25 +1,19 @@ -import logging +import os from setuptools import find_packages, setup +pwd = os.path.dirname(__file__) version_file = 'mmdeploy/version.py' -try: - from torch.utils.cpp_extension import BuildExtension - cmd_class = {'build_ext': BuildExtension} -except ModuleNotFoundError: - cmd_class = {} - logging.warning('Skip building ext ops due to the absence of torch.') - def readme(): - with open('README.md', encoding='utf-8') as f: + with open(os.path.join(pwd, 'README.md'), encoding='utf-8') as f: content = f.read() return content def get_version(): - with open(version_file, 'r') as f: + with open(os.path.join(pwd, version_file), 'r') as f: exec(compile(f.read(), version_file, 'exec')) return locals()['__version__'] @@ -135,5 +129,5 @@ def gen_packages_items(): 'optional': parse_requirements('requirements/optional.txt'), }, ext_modules=[], - cmdclass=cmd_class, + cmdclass={}, zip_safe=False) diff --git a/tests/test_utils/test_util.py b/tests/test_utils/test_util.py index 5d34d5040f..081a8e779e 100644 --- a/tests/test_utils/test_util.py +++ b/tests/test_utils/test_util.py @@ -391,7 +391,7 @@ def test_cuda10(self): def test_incorrect_cuda_device(self): device = 'cuda_5' - with pytest.raises(RuntimeError): + with pytest.raises(AssertionError): util.parse_device_id(device) def test_incorrect_device(self): diff --git a/tools/package_tools/configs/jetson.yaml b/tools/package_tools/configs/jetson.yaml new file mode 100644 index 0000000000..8df61e6fc9 --- /dev/null +++ b/tools/package_tools/configs/jetson.yaml @@ -0,0 +1,13 @@ +global_config: + cmake_envs: + CMAKE_CXX_COMPILER: "g++-7" + MMDEPLOY_BUILD_SDK: "ON" + MMDEPLOY_BUILD_SDK_PYTHON_API: "ON" + MMDEPLOY_TARGET_DEVICES: '"cpu;cuda"' + MMDEPLOY_TARGET_BACKENDS: "trt" + TENSORRT_DIR: "/usr/include/aarch64-linux-gnu" + CUDA_TOOLKIT_ROOT_DIR: "/usr/local/cuda" + pplcv_DIR: ${pplcv_DIR}/cuda-build/install/lib/cmake/ppl + +local_configs: + - BUILD_NAME: "mmdeploy-{mmdeploy_v}-jetson-cuda{cuda_v}" diff --git a/tools/package_tools/configs/linux_x64.yaml b/tools/package_tools/configs/linux_x64.yaml new file mode 100644 index 0000000000..a1dc7bc45f --- /dev/null +++ b/tools/package_tools/configs/linux_x64.yaml @@ -0,0 +1,20 @@ +global_config: + cmake_envs: + CMAKE_CXX_COMPILER: "g++-7" + MMDEPLOY_BUILD_SDK: "ON" + MMDEPLOY_BUILD_SDK_PYTHON_API: "ON" + +local_configs: + - BUILD_NAME: "mmdeploy-{mmdeploy_v}-{system}-{machine}-onnxruntime{ort_v}" + cmake_envs: + MMDEPLOY_TARGET_DEVICES: '"cpu"' + MMDEPLOY_TARGET_BACKENDS: "ort" + ONNXRUNTIME_DIR: "${ONNXRUNTIME_DIR}" + - BUILD_NAME: "mmdeploy-{mmdeploy_v}-{system}-{machine}-cuda{cuda_v}-tensorrt{trt_v}" + cmake_envs: + MMDEPLOY_TARGET_DEVICES: '"cuda"' + MMDEPLOY_TARGET_BACKENDS: "trt" + TENSORRT_DIR: "${TENSORRT_DIR}" + CUDA_TOOLKIT_ROOT_DIR: "/usr/local/cuda-11.3" + CUDNN_DIR: "${CUDNN_DIR}" + pplcv_DIR: ${pplcv_DIR}/cuda-build/install/lib/cmake/ppl diff --git a/tools/package_tools/configs/windows_x64.yaml b/tools/package_tools/configs/windows_x64.yaml new file mode 100644 index 0000000000..47d0ccf597 --- /dev/null +++ b/tools/package_tools/configs/windows_x64.yaml @@ -0,0 +1,22 @@ +global_config: + cmake_flags: ['-G "Visual Studio 16 2019" -A x64 -T v142'] + cmake_envs: + MMDEPLOY_BUILD_SDK: "ON" + MMDEPLOY_BUILD_SDK_PYTHON_API: "ON" + MMDEPLOY_CODEBASES: "all" + OpenCV_DIR: "%OpenCV_DIR%" + spdlog_DIR: '"%spdlog_DIR%"' + +local_configs: + - BUILD_NAME: "mmdeploy-{mmdeploy_v}-{system}-{machine}-onnxruntime{ort_v}" + cmake_envs: + MMDEPLOY_TARGET_DEVICES: '"cpu"' + MMDEPLOY_TARGET_BACKENDS: "ort" + ONNXRUNTIME_DIR: "%ONNXRUNTIME_DIR%" + - BUILD_NAME: "mmdeploy-{mmdeploy_v}-{system}-{machine}-cuda{cuda_v}-tensorrt{trt_v}" + cmake_envs: + MMDEPLOY_TARGET_DEVICES: '"cuda"' + MMDEPLOY_TARGET_BACKENDS: "trt" + pplcv_DIR: "%PPLCV_DIR%\\pplcv-build\\install\\lib\\cmake\\ppl" + TENSORRT_DIR: "%TENSORRT_DIR%" + CUDNN_DIR: "%CUDNN_DIR%" diff --git a/tools/package_tools/mmdeploy_builder.py b/tools/package_tools/mmdeploy_builder.py new file mode 100644 index 0000000000..2e59d6cddf --- /dev/null +++ b/tools/package_tools/mmdeploy_builder.py @@ -0,0 +1,356 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import copy +import logging +import os +import os.path as osp +import platform +import re +import shutil +import sys +import tarfile +from distutils.util import get_platform +from glob import glob +from subprocess import CalledProcessError, check_output, run +from typing import Dict + +import yaml + +logger = logging.getLogger() +logger.setLevel(logging.INFO) + +CUR_DIR = os.path.dirname(os.path.abspath(__file__)) +PACKAGING_DIR = osp.join(CUR_DIR, 'packaging') +PLATFORM_TAG = get_platform().replace('-', '_').replace('.', '_') + + +def get_version(version_file): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +def _merge_cfg(cfg0, cfg1): + cfg = copy.deepcopy(cfg0) + for k, v in cfg1.items(): + if k in cfg: + cfg[k] = _merge_cfg(cfg0[k], cfg1[k]) + else: + cfg[k] = v + return cfg + + +def _remove_if_exist(path): + if osp.exists(path): + logging.info(f'Remove path: {path}.') + if osp.isdir(path): + shutil.rmtree(path) + else: + os.remove(path) + + +def _copy(src_path, dst_path): + assert osp.exists(src_path), f'src path: {src_path} not exist.' + + logging.info(f'copy path: {src_path} to {dst_path}.') + if osp.isdir(src_path): + if osp.exists(dst_path): + shutil.rmtree(dst_path) + shutil.copytree(src_path, dst_path) + else: + shutil.copy(src_path, dst_path) + + +def _call_command(cmd, cwd, stdout=None, stderr=None): + if cmd == '': + return + logging.info(f'Process cmd: {cmd}') + logging.info(f'work_path: {cwd}') + try: + ret = run(cmd, stdout=stdout, stderr=stderr, cwd=cwd, shell=True) + if ret.returncode != 0: + logging.error(f'Process cmd: "{cmd}"' + f' failed with returncode: {ret.returncode}') + exit(-1) + except Exception: + logging.error(f'Process cmd: {cmd} failed.') + exit(-1) + + +def _create_tar(path, tar_name): + logging.info(f'create tar file: {tar_name}') + with tarfile.open(tar_name, 'w:gz') as tar: + tar.add(path, arcname=os.path.basename(path)) + + +def _create_bdist_cmd(cfg, c_ext=False, dist_dir=None): + + bdist_tags = cfg.get('bdist_tags', {}) + + # base + bdist_cmd = 'python setup.py bdist_wheel ' + + # platform + bdist_cmd += f' --plat-name {PLATFORM_TAG} ' + + # python tag + py_flag = 'cp' if c_ext else 'py' + python_tag = f'{py_flag}{sys.version_info.major}{sys.version_info.minor}' + if 'python_tag' in bdist_tags: + python_tag = bdist_tags['python_tag'] + bdist_cmd += f' --python-tag {python_tag} ' + + # dist dir + if dist_dir is not None: + dist_dir = osp.abspath(dist_dir) + bdist_cmd += f' --dist-dir {dist_dir} ' + return bdist_cmd + + +def clear_mmdeploy(mmdeploy_dir: str): + logging.info(f'cleaning mmdeploy: {mmdeploy_dir}') + + def _remove_in_mmdeploy(path): + remove_dir = osp.join(mmdeploy_dir, path) + _remove_if_exist(remove_dir) + + # remove build file + _remove_in_mmdeploy('build') + + # remove dist + _remove_in_mmdeploy('dist') + + # remove installed library + _remove_in_mmdeploy('mmdeploy/lib') + + # remove onnx2ncnn and ncnn ext + _remove_in_mmdeploy('mmdeploy/backend/ncnn/onnx2ncnn') + _remove_in_mmdeploy('mmdeploy/backend/ncnn/onnx2ncnn.exe') + ncnn_ext_paths = glob( + osp.join(mmdeploy_dir, 'mmdeploy/backend/ncnn/ncnn_ext.*')) + for ncnn_ext_path in ncnn_ext_paths: + os.remove(ncnn_ext_path) + + +def build_mmdeploy(cfg, mmdeploy_dir, dist_dir=None): + cmake_flags = cfg.get('cmake_flags', []) + cmake_envs = cfg.get('cmake_envs', dict()) + + args = [f'-D{k}={v}' for k, v in cmake_envs.items()] + + # clear mmdeploy + clear_mmdeploy(mmdeploy_dir) + + build_dir = osp.join(mmdeploy_dir, 'build') + if not osp.exists(build_dir): + os.mkdir(build_dir) + + # cmake cmd + cmake_cmd = ' '.join(['cmake ..'] + cmake_flags + args) + _call_command(cmake_cmd, build_dir) + + if sys.platform == 'win32': + # build cmd + build_cmd = 'cmake --build . --config Release -- /m' + _call_command(build_cmd, build_dir) + install_cmd = 'cmake --install . --config Release' + _call_command(install_cmd, build_dir) + else: + # build cmd + build_cmd = 'cmake --build . -- -j$(nproc) && cmake --install .' + _call_command(build_cmd, build_dir) + + # build wheel + bdist_cmd = _create_bdist_cmd(cfg, c_ext=False, dist_dir=dist_dir) + _call_command(bdist_cmd, mmdeploy_dir) + + +def get_dir_name(cfg, tag, default_name): + if tag not in cfg: + logging.warning(f'{tag} not found, use `{default_name}` as default.') + else: + default_name = cfg[tag] + return cfg, default_name + + +def check_env(cfg: Dict): + env_info = {} + + cmake_envs = cfg.get('cmake_envs', dict()) + + # system + platform_system = platform.system().lower() + platform_machine = platform.machine().lower() + env_info['system'] = platform_system + env_info['machine'] = platform_machine + + # CUDA version + cuda_version = 'unknown' + + CUDA_TOOLKIT_ROOT_DIR = cmake_envs.get('CUDA_TOOLKIT_ROOT_DIR', '') + CUDA_TOOLKIT_ROOT_DIR = osp.expandvars(CUDA_TOOLKIT_ROOT_DIR) + nvcc_cmd = 'nvcc' if len(CUDA_TOOLKIT_ROOT_DIR) <= 0 else osp.join( + CUDA_TOOLKIT_ROOT_DIR, 'bin', 'nvcc') + + try: + nvcc = check_output(f'"{nvcc_cmd}" -V', shell=True) + nvcc = nvcc.decode('utf-8').strip() + pattern = r'Cuda compilation tools, release (\d+.\d+)' + match = re.search(pattern, nvcc) + if match is not None: + cuda_version = match.group(1) + except Exception: + pass + + env_info['cuda_v'] = cuda_version + + # ONNX Runtime version + onnxruntime_version = 'unknown' + + ONNXRUNTIME_DIR = os.getenv('ONNXRUNTIME_DIR', '') + ONNXRUNTIME_DIR = cmake_envs.get('ONNXRUNTIME_DIR', ONNXRUNTIME_DIR) + ONNXRUNTIME_DIR = osp.expandvars(ONNXRUNTIME_DIR) + + if osp.exists(ONNXRUNTIME_DIR): + with open(osp.join(ONNXRUNTIME_DIR, 'VERSION_NUMBER'), mode='r') as f: + onnxruntime_version = f.readlines()[0].strip() + + env_info['ort_v'] = onnxruntime_version + + # TensorRT version + tensorrt_version = 'unknown' + + TENSORRT_DIR = os.getenv('TENSORRT_DIR', '') + TENSORRT_DIR = cmake_envs.get('TENSORRT_DIR', TENSORRT_DIR) + TENSORRT_DIR = osp.expandvars(TENSORRT_DIR) + + if osp.exists(TENSORRT_DIR): + with open( + osp.join(TENSORRT_DIR, 'include', 'NvInferVersion.h'), + mode='r') as f: + data = f.read() + major = re.search(r'#define NV_TENSORRT_MAJOR (\d+)', data) + minor = re.search(r'#define NV_TENSORRT_MINOR (\d+)', data) + patch = re.search(r'#define NV_TENSORRT_PATCH (\d+)', data) + if major is not None and minor is not None and patch is not None: + tensorrt_version = f'{major.group(1)}.' +\ + f'{minor.group(1)}.' +\ + f'{patch.group(1)}' + + env_info['trt_v'] = tensorrt_version + + return env_info + + +def create_package(cfg: Dict, mmdeploy_dir: str): + build_dir = 'build' + sdk_tar_name = 'sdk' + + # load flags + cfg, build_dir = get_dir_name(cfg, 'BUILD_NAME', build_dir) + cmake_envs = cfg.get('cmake_envs', dict()) + build_sdk_flag = cmake_envs.get('MMDEPLOY_BUILD_SDK', False) + if 'TAR_NAME' in cfg: + cfg, sdk_tar_name = get_dir_name(cfg, 'TAR_NAME', sdk_tar_name) + + # fill name + env_info = check_env(cfg) + version_file = osp.join(mmdeploy_dir, 'mmdeploy', 'version.py') + mmdeploy_version = get_version(version_file) + build_dir = build_dir.format(mmdeploy_v=mmdeploy_version, **env_info) + + # create package directory. + if osp.exists(build_dir): + logging.info(f'{build_dir} existed, deleting...') + shutil.rmtree(build_dir) + os.mkdir(build_dir) + + logging.info(f'build mmdeploy in {build_dir}:') + logging.debug(f'with config: {cfg}') + + try: + # build dist + dist_dir = osp.join(build_dir, 'dist') + build_mmdeploy(cfg, mmdeploy_dir, dist_dir=dist_dir) + + if build_sdk_flag: + + sdk_tar_dir = osp.join(build_dir, sdk_tar_name) + + # copy lib and install into sdk dir + install_dir = osp.join(mmdeploy_dir, 'build/install/') + _copy(install_dir, sdk_tar_dir) + _remove_if_exist(osp.join(sdk_tar_dir, 'example', 'build')) + + # create sdk python api wheel + # for linux + python_api_lib_path = glob( + osp.join(mmdeploy_dir, 'build/lib/mmdeploy_python.*.so')) + # for windows + python_api_lib_path += glob( + osp.join(mmdeploy_dir, 'build/bin/*/mmdeploy_python.*.pyd')) + num_libs = len(python_api_lib_path) + if num_libs != 1: + logging.info('find multiple mmdeploy_python libraries.') + python_api_lib_path = python_api_lib_path[0] + + sdk_python_package_dir = osp.join(build_dir, '.mmdeploy_python') + _copy(PACKAGING_DIR, sdk_python_package_dir) + _copy( + osp.join(mmdeploy_dir, 'mmdeploy', 'version.py'), + osp.join(sdk_python_package_dir, 'mmdeploy_python', + 'version.py')) + _copy(python_api_lib_path, + osp.join(sdk_python_package_dir, 'mmdeploy_python')) + sdk_wheel_dir = osp.abspath(osp.join(sdk_tar_dir, 'python')) + bdist_cmd = _create_bdist_cmd( + cfg, c_ext=True, dist_dir=sdk_wheel_dir) + _call_command(bdist_cmd, sdk_python_package_dir) + + # remove temp package dir + _remove_if_exist(sdk_python_package_dir) + + logging.info('build finish.') + + except CalledProcessError: + logging.error('build failed') + exit(-1) + + +def parse_args(): + parser = argparse.ArgumentParser(description='Build mmdeploy from yaml.') + parser.add_argument('build_cfgs', help='The build config yaml file.') + parser.add_argument('mmdeploy_dir', help='The source code of MMDeploy.') + args = parser.parse_args() + + return args + + +def parse_configs(cfg_path: str): + with open(cfg_path, mode='r') as f: + cfgs = yaml.load(f, yaml.Loader) + + global_cfg = cfgs.get('global_config', dict()) + local_cfgs = cfgs.get('local_configs', []) + + merged_cfgs = [ + _merge_cfg(global_cfg, local_cfg) for local_cfg in local_cfgs + ] + + return merged_cfgs + + +def main(): + args = parse_args() + cfgs = parse_configs(args.build_cfgs) + mmdeploy_dir = osp.abspath(args.mmdeploy_dir) + logging.info(f'Using mmdeploy_dir: {mmdeploy_dir}') + + logging.info(f'Using PACKAGING_DIR: {PACKAGING_DIR}') + + for cfg in cfgs: + create_package(cfg, mmdeploy_dir) + + +if __name__ == '__main__': + main() diff --git a/tools/package_tools/packaging/MANIFEST.in b/tools/package_tools/packaging/MANIFEST.in new file mode 100644 index 0000000000..891555be37 --- /dev/null +++ b/tools/package_tools/packaging/MANIFEST.in @@ -0,0 +1,3 @@ +include mmdeploy_python/*.so +include mmdeploy_python/*.dll +include mmdeploy_python/*.pyd diff --git a/tools/package_tools/packaging/mmdeploy_python/__init__.py b/tools/package_tools/packaging/mmdeploy_python/__init__.py new file mode 100644 index 0000000000..2fc45bf87c --- /dev/null +++ b/tools/package_tools/packaging/mmdeploy_python/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# modify from https://github.com/NVIDIA/TensorRT/blob/main/python/packaging/tensorrt/__init__.py # noqa +import ctypes +import glob +import os + +from .version import __version__ + + +def try_load(library): + try: + ctypes.CDLL(library) + except OSError: + pass + + +CURDIR = os.path.realpath(os.path.dirname(__file__)) +for lib in glob.iglob(os.path.join(CURDIR, '*.so*')): + try_load(lib) + +from .mmdeploy_python import * # noqa + +__all__ = ['__version__'] diff --git a/tools/package_tools/packaging/mmdeploy_python/version.py b/tools/package_tools/packaging/mmdeploy_python/version.py new file mode 100644 index 0000000000..b3309754c0 --- /dev/null +++ b/tools/package_tools/packaging/mmdeploy_python/version.py @@ -0,0 +1,2 @@ +# Copyright (c) OpenMMLab. All rights reserved. +__version__ = '0.5.0' diff --git a/tools/package_tools/packaging/setup.py b/tools/package_tools/packaging/setup.py new file mode 100644 index 0000000000..3d336ff741 --- /dev/null +++ b/tools/package_tools/packaging/setup.py @@ -0,0 +1,37 @@ +import os +import os.path as osp +import platform + +try: + from setuptools import find_packages, setup +except ImportError: + from distutils.core import find_packages, setup + +CURDIR = os.path.realpath(os.path.dirname(__file__)) +version_file = osp.join(CURDIR, 'mmdeploy_python', 'version.py') + + +def get_version(): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +def get_platform_name(): + return platform.machine() + + +if __name__ == '__main__': + setup( + name='mmdeploy_python', + version=get_version(), + description='OpenMMLab Model Deployment SDK python api', + author='OpenMMLab', + author_email='openmmlab@gmail.com', + keywords='computer vision, model deployment', + url='https://github.com/open-mmlab/mmdeploy', + packages=find_packages(), + include_package_data=True, + platforms=get_platform_name(), + package_data={'mmdeploy_python': ['*.so*', '*.pyd', '*.pdb']}, + license='Apache License 2.0')