diff --git a/python/rmm/rmm/_lib/memory_resource.pxd b/python/rmm/rmm/_lib/memory_resource.pxd index f9c2e91de..15ddc84f6 100644 --- a/python/rmm/rmm/_lib/memory_resource.pxd +++ b/python/rmm/rmm/_lib/memory_resource.pxd @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2022, NVIDIA CORPORATION. +# Copyright (c) 2020-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,7 @@ from libc.stdint cimport int8_t from libcpp.memory cimport shared_ptr +from libcpp.pair cimport pair from libcpp.string cimport string from libcpp.vector cimport vector @@ -32,6 +33,10 @@ cdef extern from "rmm/mr/device/device_memory_resource.hpp" \ cuda_stream_view stream ) except + +cdef extern from "rmm/cuda_device.hpp" namespace "rmm" nogil: + size_t percent_of_free_device_memory(int percent) except + + pair[size_t, size_t] available_device_memory() except + + cdef class DeviceMemoryResource: cdef shared_ptr[device_memory_resource] c_obj cdef device_memory_resource* get_mr(self) noexcept nogil diff --git a/python/rmm/rmm/_lib/memory_resource.pyx b/python/rmm/rmm/_lib/memory_resource.pyx index 100d18b56..992203c27 100644 --- a/python/rmm/rmm/_lib/memory_resource.pyx +++ b/python/rmm/rmm/_lib/memory_resource.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2022, NVIDIA CORPORATION. +# Copyright (c) 2020-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -32,9 +32,16 @@ from libcpp.string cimport string from cuda.cudart import cudaError_t from rmm._cuda.gpu import CUDARuntimeError, getDevice, setDevice + from rmm._cuda.stream cimport Stream + from rmm._cuda.stream import DEFAULT_STREAM + from rmm._lib.cuda_stream_view cimport cuda_stream_view +from rmm._lib.memory_resource cimport ( + available_device_memory as c_available_device_memory, + percent_of_free_device_memory as c_percent_of_free_device_memory, +) from rmm._lib.per_device_resource cimport ( cuda_device_id, set_per_device_resource as cpp_set_per_device_resource, @@ -109,8 +116,6 @@ cdef extern from "rmm/mr/device/cuda_async_memory_resource.hpp" \ win32 win32_kmt -cdef extern from "rmm/cuda_device.hpp" namespace "rmm" nogil: - size_t percent_of_free_device_memory(int percent) except + cdef extern from "rmm/mr/device/pool_memory_resource.hpp" \ namespace "rmm::mr" nogil: @@ -368,7 +373,7 @@ cdef class PoolMemoryResource(UpstreamResourceAdaptor): cdef size_t c_initial_pool_size cdef optional[size_t] c_maximum_pool_size c_initial_pool_size = ( - percent_of_free_device_memory(50) if + c_percent_of_free_device_memory(50) if initial_pool_size is None else initial_pool_size ) @@ -1188,3 +1193,12 @@ def get_log_filenames(): else None for i, each_mr in _per_device_mrs.items() } + + +def available_device_memory(): + """ + Returns a tuple of free and total device memory memory. + """ + cdef pair[size_t, size_t] res + res = c_available_device_memory() + return (res.first, res.second) diff --git a/python/rmm/rmm/mr.py b/python/rmm/rmm/mr.py index 4f6b801f5..b6ae4e6cd 100644 --- a/python/rmm/rmm/mr.py +++ b/python/rmm/rmm/mr.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2021, NVIDIA CORPORATION. +# Copyright (c) 2020-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -28,6 +28,7 @@ UpstreamResourceAdaptor, _flush_logs, _initialize, + available_device_memory, disable_logging, enable_logging, get_current_device_resource, @@ -57,6 +58,7 @@ "UpstreamResourceAdaptor", "_flush_logs", "_initialize", + "available_device_memory", "set_per_device_resource", "enable_logging", "disable_logging", diff --git a/python/rmm/rmm/tests/test_rmm.py b/python/rmm/rmm/tests/test_rmm.py index c37fe0298..62adcd4a5 100644 --- a/python/rmm/rmm/tests/test_rmm.py +++ b/python/rmm/rmm/tests/test_rmm.py @@ -1002,3 +1002,16 @@ def test_invalid_logging_level(level): rmm.set_flush_level(level) with pytest.raises(TypeError): rmm.should_log(level) + + +def test_available_device_memory(): + from rmm.mr import available_device_memory + + initial_memory = available_device_memory() + device_buffer = rmm.DeviceBuffer.to_device( # noqa: F841 + np.zeros(10000, dtype="u1") + ) + final_memory = available_device_memory() + assert initial_memory[1] == final_memory[1] + assert initial_memory[0] > 0 + assert final_memory[0] > 0