From 567808eb20727855a9413cc34f8ba3cf76974c6b Mon Sep 17 00:00:00 2001 From: AllentDan Date: Mon, 6 Feb 2023 11:48:04 +0800 Subject: [PATCH] fix torch allocator resouce releasing --- mmdeploy/backend/tensorrt/torch_allocator.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/mmdeploy/backend/tensorrt/torch_allocator.py b/mmdeploy/backend/tensorrt/torch_allocator.py index 7b8b0ee587..47e7d5a596 100644 --- a/mmdeploy/backend/tensorrt/torch_allocator.py +++ b/mmdeploy/backend/tensorrt/torch_allocator.py @@ -13,6 +13,7 @@ def __init__(self, device_id: int = 0) -> None: self.device_id = device_id self.mems = set() + self.caching_delete = torch._C._cuda_cudaCachingAllocator_raw_delete def __del__(self): """destructor.""" @@ -53,11 +54,9 @@ def deallocate(self: trt.IGpuAllocator, memory: int) -> bool: Returns: bool: deallocate success. """ - logger = get_root_logger() - logger.debug(f'deallocate {memory} with TorchAllocator.') if memory not in self.mems: return False - torch.cuda.caching_allocator_delete(memory) + self.caching_delete(memory) self.mems.discard(memory) return True