diff --git a/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc b/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc index 13d247afd37f7..aa0294a4e12ff 100644 --- a/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc +++ b/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc @@ -473,7 +473,7 @@ TensorrtExecutionProvider::TensorrtExecutionProvider(const TensorrtExecutionProv throw std::runtime_error("Failed to create directory " + cache_path_); } } - runtime_ = nvinfer1::createInferRuntime(GetTensorrtLogger()); + runtime_ = tensorrt_ptr::unique_pointer(nvinfer1::createInferRuntime(GetTensorrtLogger())); } const std::string engine_decryption_enable_env = onnxruntime::GetEnvironmentVar(tensorrt_env_vars::kDecryptionEnable); @@ -1243,7 +1243,7 @@ common::Status TensorrtExecutionProvider::Compile(const std::vector& fuse &engines_[context->node_name], &contexts_[context->node_name], &builders_[context->node_name], &networks_[context->node_name], input_info_[context->node_name], output_info_[context->node_name], input_shape_ranges_[context->node_name], &tensorrt_mu_, &fp16_enable_, &int8_enable_, &max_workspace_size_, - trt_node_name_with_precision, engine_cache_enable_, cache_path_, runtime_, nullptr, + trt_node_name_with_precision, engine_cache_enable_, cache_path_, runtime_.get(), nullptr, allocator_, dynamic_range_map, engine_decryption_enable_, engine_decryption_}; *state = p.release(); return 0; @@ -1295,9 +1295,8 @@ common::Status TensorrtExecutionProvider::Compile(const std::vector& fuse engine_file.seekg(0, std::ios::beg); std::unique_ptr engine_buf{new char[engine_size]}; engine_file.read((char*)engine_buf.get(), engine_size); - auto runtime_ = trt_state->runtime; *(trt_state->engine) = tensorrt_ptr::unique_pointer( - runtime_->deserializeCudaEngine(engine_buf.get(), engine_size, nullptr)); + trt_state->runtime->deserializeCudaEngine(engine_buf.get(), engine_size, nullptr)); if (trt_state->engine == nullptr) { return ORT_MAKE_STATUS(ONNXRUNTIME, EP_FAIL, "TensorRT EP Failed to Build Engine."); } diff --git a/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.h b/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.h index 073ee6aa275a2..1838cc1c05f0b 100644 --- a/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.h +++ b/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.h @@ -149,7 +149,7 @@ class TensorrtExecutionProvider : public IExecutionProvider { bool dump_subgraphs_ = false; bool engine_cache_enable_ = false; std::string cache_path_; - nvinfer1::IRuntime* runtime_ = nullptr; + tensorrt_ptr::unique_pointer runtime_ = nullptr; OrtMutex tensorrt_mu_; int device_id_; AllocatorPtr allocator_;