diff --git a/.gitignore b/.gitignore index 2d0b150e1..36aafe643 100644 --- a/.gitignore +++ b/.gitignore @@ -22,10 +22,13 @@ rmm.egg-info/ python/build python/*/build python/rmm/docs/_build -python/rmm/**/_lib/**/*.cpp -!python/rmm/_lib/_torch_allocator.cpp -python/rmm/**/_lib/**/*.h -python/rmm/**/_lib/.nfs* +python/rmm/**/librmmm/**/*.cpp +!python/rmm/librmmm/_torch_allocator.cpp +python/rmm/**/librmm/**/*.h +python/rmm/**/librmm/.nfs* +python/rmm/**/pylibrmmm/**/*.cpp +python/rmm/**/pylibrmmm/**/*.h +python/rmm/**/pylibrmmm/.nfs* python/rmm/_cuda/*.cpp python/rmm/tests/*.cpp python/rmm/*.ipynb diff --git a/python/rmm/CMakeLists.txt b/python/rmm/CMakeLists.txt index 6c2515102..ac8495e14 100644 --- a/python/rmm/CMakeLists.txt +++ b/python/rmm/CMakeLists.txt @@ -30,4 +30,5 @@ rapids_cython_init() add_compile_definitions("SPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_${RMM_LOGGING_LEVEL}") add_subdirectory(rmm/_cuda) -add_subdirectory(rmm/_lib) +add_subdirectory(rmm/librmm) +add_subdirectory(rmm/pylibrmm) diff --git a/python/rmm/docs/guide.md b/python/rmm/docs/guide.md index 22c0dc023..c7e940497 100644 --- a/python/rmm/docs/guide.md +++ b/python/rmm/docs/guide.md @@ -236,17 +236,17 @@ Common to both usages is that they modify the currently active RMM memory resour >>> # We start with the default cuda memory resource >>> rmm.mr.get_current_device_resource() - + >>> # When using statistics, we get a StatisticsResourceAdaptor with the context >>> with rmm.statistics.statistics(): ... rmm.mr.get_current_device_resource() - + >>> # We can also enable statistics globally >>> rmm.statistics.enable_statistics() >>> print(rmm.mr.get_current_device_resource()) - + ``` With statistics enabled, you can query statistics of the current and peak bytes and number of allocations performed by the current RMM memory resource: diff --git a/python/rmm/rmm/__init__.py b/python/rmm/rmm/__init__.py index 1e3b5c8b1..b23ad68f9 100644 --- a/python/rmm/rmm/__init__.py +++ b/python/rmm/rmm/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018-2021, NVIDIA CORPORATION. +# Copyright (c) 2018-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,8 +13,10 @@ # limitations under the License. from rmm import mr -from rmm._lib.device_buffer import DeviceBuffer -from rmm._lib.logger import ( +from rmm._version import __git_commit__, __version__ +from rmm.mr import disable_logging, enable_logging, get_log_filenames +from rmm.pylibrmm.device_buffer import DeviceBuffer +from rmm.pylibrmm.logger import ( flush_logger, get_flush_level, get_logging_level, @@ -23,8 +25,6 @@ set_logging_level, should_log, ) -from rmm._version import __git_commit__, __version__ -from rmm.mr import disable_logging, enable_logging, get_log_filenames from rmm.rmm import ( RMMError, is_initialized, @@ -52,3 +52,13 @@ "should_log", "unregister_reinitialize_hook", ] + + +def __getattr__(name): + if name == "_lib": + import importlib + + module = importlib.import_module("rmm.pylibrmm") + return module + else: + raise AttributeError(f"Module '{__name__}' has no attribute '{name}'") diff --git a/python/rmm/rmm/_cuda/stream.pxd b/python/rmm/rmm/_cuda/stream.pxd index 3c3d3aa6f..e91e2ce58 100644 --- a/python/rmm/rmm/_cuda/stream.pxd +++ b/python/rmm/rmm/_cuda/stream.pxd @@ -1,4 +1,4 @@ -# Copyright (c) 2020, NVIDIA CORPORATION. +# Copyright (c) 2020-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ from cuda.ccudart cimport cudaStream_t from libc.stdint cimport uintptr_t from libcpp cimport bool -from rmm._lib.cuda_stream_view cimport cuda_stream_view +from rmm.librmm.cuda_stream_view cimport cuda_stream_view cdef class Stream: diff --git a/python/rmm/rmm/_cuda/stream.pyx b/python/rmm/rmm/_cuda/stream.pyx index 4d5ff5232..37dcbd610 100644 --- a/python/rmm/rmm/_cuda/stream.pyx +++ b/python/rmm/rmm/_cuda/stream.pyx @@ -16,13 +16,13 @@ from cuda.ccudart cimport cudaStream_t from libc.stdint cimport uintptr_t from libcpp cimport bool -from rmm._lib.cuda_stream cimport CudaStream -from rmm._lib.cuda_stream_view cimport ( +from rmm.librmm.cuda_stream_view cimport ( cuda_stream_default, cuda_stream_legacy, cuda_stream_per_thread, cuda_stream_view, ) +from rmm.pylibrmm.cuda_stream cimport CudaStream cdef class Stream: diff --git a/python/rmm/rmm/_lib/__init__.py b/python/rmm/rmm/_lib/__init__.py index 0b8672ef6..7cfddab60 100644 --- a/python/rmm/rmm/_lib/__init__.py +++ b/python/rmm/rmm/_lib/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2021, NVIDIA CORPORATION. +# Copyright (c) 2018-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,4 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .device_buffer import DeviceBuffer +from rmm.pylibrmm import * diff --git a/python/rmm/rmm/_lib/cuda_stream.pxd b/python/rmm/rmm/_lib/cuda_stream.pxd index e224cf9af..afc365fbb 100644 --- a/python/rmm/rmm/_lib/cuda_stream.pxd +++ b/python/rmm/rmm/_lib/cuda_stream.pxd @@ -1,4 +1,4 @@ -# Copyright (c) 2020, NVIDIA CORPORATION. +# Copyright (c) 2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,26 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -cimport cython -from cuda.ccudart cimport cudaStream_t -from libcpp cimport bool -from libcpp.memory cimport unique_ptr - -from rmm._lib.cuda_stream_view cimport cuda_stream_view - - -cdef extern from "rmm/cuda_stream.hpp" namespace "rmm" nogil: - cdef cppclass cuda_stream: - cuda_stream() except + - bool is_valid() except + - cudaStream_t value() except + - cuda_stream_view view() except + - void synchronize() except + - void synchronize_no_throw() - - -@cython.final -cdef class CudaStream: - cdef unique_ptr[cuda_stream] c_obj - cdef cudaStream_t value(self) except * nogil - cdef bool is_valid(self) except * nogil +from rmm.librmm.cuda_stream cimport cuda_stream +from rmm.pylibrmm.cuda_stream cimport CudaStream diff --git a/python/rmm/rmm/_lib/cuda_stream_pool.pxd b/python/rmm/rmm/_lib/cuda_stream_pool.pxd index 0286a9377..4da59cc68 100644 --- a/python/rmm/rmm/_lib/cuda_stream_pool.pxd +++ b/python/rmm/rmm/_lib/cuda_stream_pool.pxd @@ -1,4 +1,4 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. +# Copyright (c) 2021-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,14 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -cimport cython - -from rmm._lib.cuda_stream_view cimport cuda_stream_view - - -cdef extern from "rmm/cuda_stream_pool.hpp" namespace "rmm" nogil: - cdef cppclass cuda_stream_pool: - cuda_stream_pool(size_t pool_size) - cuda_stream_view get_stream() - cuda_stream_view get_stream(size_t stream_id) except + - size_t get_pool_size() +from rmm.librmm.cuda_stream_pool cimport cuda_stream_pool diff --git a/python/rmm/rmm/_lib/cuda_stream_view.pxd b/python/rmm/rmm/_lib/cuda_stream_view.pxd index bf0d33c24..c336b0fe8 100644 --- a/python/rmm/rmm/_lib/cuda_stream_view.pxd +++ b/python/rmm/rmm/_lib/cuda_stream_view.pxd @@ -1,4 +1,4 @@ -# Copyright (c) 2020, NVIDIA CORPORATION. +# Copyright (c) 2020-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,21 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from cuda.ccudart cimport cudaStream_t -from libcpp cimport bool - - -cdef extern from "rmm/cuda_stream_view.hpp" namespace "rmm" nogil: - cdef cppclass cuda_stream_view: - cuda_stream_view() - cuda_stream_view(cudaStream_t) - cudaStream_t value() - bool is_default() - bool is_per_thread_default() - void synchronize() except + - - cdef bool operator==(cuda_stream_view const, cuda_stream_view const) - - const cuda_stream_view cuda_stream_default - const cuda_stream_view cuda_stream_legacy - const cuda_stream_view cuda_stream_per_thread +from rmm.librmm.cuda_stream_view cimport ( + cuda_stream_default, + cuda_stream_legacy, + cuda_stream_per_thread, + cuda_stream_view, +) diff --git a/python/rmm/rmm/_lib/device_buffer.pxd b/python/rmm/rmm/_lib/device_buffer.pxd index 0da9ace0c..22833b1b8 100644 --- a/python/rmm/rmm/_lib/device_buffer.pxd +++ b/python/rmm/rmm/_lib/device_buffer.pxd @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,105 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from libc.stdint cimport uintptr_t -from libcpp.memory cimport unique_ptr - -from rmm._cuda.stream cimport Stream -from rmm._lib.cuda_stream_view cimport cuda_stream_view -from rmm._lib.memory_resource cimport ( - DeviceMemoryResource, - device_memory_resource, +from rmm.librmm.device_buffer cimport ( + cuda_device_id, + device_buffer, + get_current_cuda_device, + prefetch, +) +from rmm.pylibrmm.device_buffer cimport ( + DeviceBuffer, + copy_device_to_ptr, + copy_host_to_ptr, + copy_ptr_to_host, + to_device, ) - - -cdef extern from "rmm/mr/device/per_device_resource.hpp" namespace "rmm" nogil: - cdef cppclass cuda_device_id: - ctypedef int value_type - cuda_device_id() - cuda_device_id(value_type id) - value_type value() - - cdef cuda_device_id get_current_cuda_device() - -cdef extern from "rmm/prefetch.hpp" namespace "rmm" nogil: - cdef void prefetch(const void* ptr, - size_t bytes, - cuda_device_id device, - cuda_stream_view stream) except + - -cdef extern from "rmm/device_buffer.hpp" namespace "rmm" nogil: - cdef cppclass device_buffer: - device_buffer() - device_buffer( - size_t size, - cuda_stream_view stream, - device_memory_resource * - ) except + - device_buffer( - const void* source_data, - size_t size, - cuda_stream_view stream, - device_memory_resource * - ) except + - device_buffer( - const device_buffer buf, - cuda_stream_view stream, - device_memory_resource * - ) except + - void reserve(size_t new_capacity, cuda_stream_view stream) except + - void resize(size_t new_size, cuda_stream_view stream) except + - void shrink_to_fit(cuda_stream_view stream) except + - void* data() - size_t size() - size_t capacity() - - -cdef class DeviceBuffer: - cdef unique_ptr[device_buffer] c_obj - - # Holds a reference to the DeviceMemoryResource used for allocation. - # Ensures the MR does not get destroyed before this DeviceBuffer. `mr` is - # needed for deallocation - cdef DeviceMemoryResource mr - - # Holds a reference to the stream used by the underlying `device_buffer`. - # Ensures the stream does not get destroyed before this DeviceBuffer - cdef Stream stream - - @staticmethod - cdef DeviceBuffer c_from_unique_ptr( - unique_ptr[device_buffer] ptr, - Stream stream=*, - DeviceMemoryResource mr=*, - ) - - @staticmethod - cdef DeviceBuffer c_to_device(const unsigned char[::1] b, - Stream stream=*) except * - cpdef copy_to_host(self, ary=*, Stream stream=*) - cpdef copy_from_host(self, ary, Stream stream=*) - cpdef copy_from_device(self, cuda_ary, Stream stream=*) - cpdef bytes tobytes(self, Stream stream=*) - - cdef size_t c_size(self) except * - cpdef void reserve(self, size_t new_capacity, Stream stream=*) except * - cpdef void resize(self, size_t new_size, Stream stream=*) except * - cpdef size_t capacity(self) except * - cdef void* c_data(self) except * - - cdef device_buffer c_release(self) except * - -cpdef DeviceBuffer to_device(const unsigned char[::1] b, - Stream stream=*) -cpdef void copy_ptr_to_host(uintptr_t db, - unsigned char[::1] hb, - Stream stream=*) except * - -cpdef void copy_host_to_ptr(const unsigned char[::1] hb, - uintptr_t db, - Stream stream=*) except * - -cpdef void copy_device_to_ptr(uintptr_t d_src, - uintptr_t d_dst, - size_t count, - Stream stream=*) except * diff --git a/python/rmm/rmm/_lib/device_uvector.pxd b/python/rmm/rmm/_lib/device_uvector.pxd index 29e122bbf..230b0afb3 100644 --- a/python/rmm/rmm/_lib/device_uvector.pxd +++ b/python/rmm/rmm/_lib/device_uvector.pxd @@ -1,4 +1,4 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. +# Copyright (c) 2021-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,28 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -from rmm._lib.cuda_stream_view cimport cuda_stream_view -from rmm._lib.device_buffer cimport device_buffer -from rmm._lib.memory_resource cimport device_memory_resource - - -cdef extern from "rmm/device_buffer.hpp" namespace "rmm" nogil: - cdef cppclass device_uvector[T]: - device_uvector(size_t size, cuda_stream_view stream) except + - T* element_ptr(size_t index) - void set_element(size_t element_index, const T& v, cuda_stream_view s) - void set_element_async( - size_t element_index, - const T& v, - cuda_stream_view s - ) except + - T front_element(cuda_stream_view s) except + - T back_element(cuda_stream_view s) except + - void reserve(size_t new_capacity, cuda_stream_view stream) except + - void resize(size_t new_size, cuda_stream_view stream) except + - void shrink_to_fit(cuda_stream_view stream) except + - device_buffer release() - size_t capacity() - T* data() - size_t size() - device_memory_resource* memory_resource() +from rmm.librmm.device_uvector cimport device_uvector diff --git a/python/rmm/rmm/_lib/helper.pxd b/python/rmm/rmm/_lib/helper.pxd index 8ca151c00..4a5159435 100644 --- a/python/rmm/rmm/_lib/helper.pxd +++ b/python/rmm/rmm/_lib/helper.pxd @@ -12,5 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. - -cdef object parse_bytes(object s) except * +from rmm.pylibrmm.helper cimport parse_bytes diff --git a/python/rmm/rmm/_lib/logger.pxd b/python/rmm/rmm/_lib/logger.pxd new file mode 100644 index 000000000..bef05c903 --- /dev/null +++ b/python/rmm/rmm/_lib/logger.pxd @@ -0,0 +1,24 @@ +# Copyright (c) 2023-2024, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from rmm.librmm._logger cimport logger, logging_level, spdlog_logger +from rmm.pylibrmm.logger cimport ( + _validate_level_type, + flush_logger, + get_flush_level, + get_logging_level, + set_flush_level, + set_logging_level, + should_log, +) diff --git a/python/rmm/rmm/_lib/memory_resource.pxd b/python/rmm/rmm/_lib/memory_resource.pxd index 000a3fe1e..983063914 100644 --- a/python/rmm/rmm/_lib/memory_resource.pxd +++ b/python/rmm/rmm/_lib/memory_resource.pxd @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,92 +12,50 @@ # See the License for the specific language governing permissions and # limitations under the License. -from libc.stdint cimport int8_t -from libcpp.memory cimport shared_ptr -from libcpp.pair cimport pair -from libcpp.string cimport string -from libcpp.vector cimport vector - -from rmm._lib.cuda_stream_view cimport cuda_stream_view - - -cdef extern from "rmm/mr/device/device_memory_resource.hpp" \ - namespace "rmm::mr" nogil: - cdef cppclass device_memory_resource: - void* allocate(size_t bytes) except + - void* allocate(size_t bytes, cuda_stream_view stream) except + - void deallocate(void* ptr, size_t bytes) except + - void deallocate( - void* ptr, - size_t bytes, - cuda_stream_view stream - ) except + - -cdef extern from "rmm/cuda_device.hpp" namespace "rmm" nogil: - size_t percent_of_free_device_memory(int percent) except + - pair[size_t, size_t] available_device_memory() except + - -cdef class DeviceMemoryResource: - cdef shared_ptr[device_memory_resource] c_obj - cdef device_memory_resource* get_mr(self) noexcept nogil - -cdef class UpstreamResourceAdaptor(DeviceMemoryResource): - cdef readonly DeviceMemoryResource upstream_mr - - cpdef DeviceMemoryResource get_upstream(self) - -cdef class CudaMemoryResource(DeviceMemoryResource): - pass - -cdef class ManagedMemoryResource(DeviceMemoryResource): - pass - -cdef class SystemMemoryResource(DeviceMemoryResource): - pass - -cdef class SamHeadroomMemoryResource(DeviceMemoryResource): - pass - -cdef class CudaAsyncMemoryResource(DeviceMemoryResource): - pass - -cdef class PoolMemoryResource(UpstreamResourceAdaptor): - pass - -cdef class FixedSizeMemoryResource(UpstreamResourceAdaptor): - pass - -cdef class BinningMemoryResource(UpstreamResourceAdaptor): - - cdef readonly list _bin_mrs - - cpdef add_bin( - self, - size_t allocation_size, - DeviceMemoryResource bin_resource=*) - -cdef class CallbackMemoryResource(DeviceMemoryResource): - cdef object _allocate_func - cdef object _deallocate_func - -cdef class LimitingResourceAdaptor(UpstreamResourceAdaptor): - pass - -cdef class LoggingResourceAdaptor(UpstreamResourceAdaptor): - cdef object _log_file_name - cpdef get_file_name(self) - cpdef flush(self) - -cdef class StatisticsResourceAdaptor(UpstreamResourceAdaptor): - pass - -cdef class TrackingResourceAdaptor(UpstreamResourceAdaptor): - pass - -cdef class FailureCallbackResourceAdaptor(UpstreamResourceAdaptor): - cdef object _callback - -cdef class PrefetchResourceAdaptor(UpstreamResourceAdaptor): - pass - -cpdef DeviceMemoryResource get_current_device_resource() +from rmm.librmm.memory_resource cimport ( + CppExcept, + allocate_callback_t, + allocation_handle_type, + available_device_memory, + binning_memory_resource, + callback_memory_resource, + cuda_async_memory_resource, + cuda_memory_resource, + deallocate_callback_t, + device_memory_resource, + failure_callback_resource_adaptor, + failure_callback_t, + fixed_size_memory_resource, + limiting_resource_adaptor, + logging_resource_adaptor, + managed_memory_resource, + percent_of_free_device_memory, + pool_memory_resource, + prefetch_resource_adaptor, + sam_headroom_memory_resource, + statistics_resource_adaptor, + system_memory_resource, + throw_cpp_except, + tracking_resource_adaptor, + translate_python_except_to_cpp, +) +from rmm.pylibrmm.memory_resource cimport ( + BinningMemoryResource, + CallbackMemoryResource, + CudaAsyncMemoryResource, + CudaMemoryResource, + DeviceMemoryResource, + FailureCallbackResourceAdaptor, + FixedSizeMemoryResource, + LimitingResourceAdaptor, + LoggingResourceAdaptor, + ManagedMemoryResource, + PoolMemoryResource, + PrefetchResourceAdaptor, + SamHeadroomMemoryResource, + StatisticsResourceAdaptor, + SystemMemoryResource, + TrackingResourceAdaptor, + UpstreamResourceAdaptor, + get_current_device_resource, +) diff --git a/python/rmm/rmm/_lib/per_device_resource.pxd b/python/rmm/rmm/_lib/per_device_resource.pxd index c33217622..29487f503 100644 --- a/python/rmm/rmm/_lib/per_device_resource.pxd +++ b/python/rmm/rmm/_lib/per_device_resource.pxd @@ -1,23 +1,21 @@ -from rmm._lib.memory_resource cimport device_memory_resource +# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. - -cdef extern from "rmm/mr/device/per_device_resource.hpp" namespace "rmm" nogil: - cdef cppclass cuda_device_id: - ctypedef int value_type - - cuda_device_id(value_type id) - - value_type value() - -cdef extern from "rmm/mr/device/per_device_resource.hpp" \ - namespace "rmm::mr" nogil: - cdef device_memory_resource* set_current_device_resource( - device_memory_resource* new_mr - ) - cdef device_memory_resource* get_current_device_resource() - cdef device_memory_resource* set_per_device_resource( - cuda_device_id id, device_memory_resource* new_mr - ) - cdef device_memory_resource* get_per_device_resource ( - cuda_device_id id - ) +from rmm.librmm.per_device_resource cimport ( + cuda_device_id, + get_current_device_resource, + get_per_device_resource, + set_current_device_resource, + set_per_device_resource, +) diff --git a/python/rmm/rmm/allocators/cupy.py b/python/rmm/rmm/allocators/cupy.py index 89947c46b..780ff2abf 100644 --- a/python/rmm/rmm/allocators/cupy.py +++ b/python/rmm/rmm/allocators/cupy.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. +# Copyright (c) 2023-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from rmm import _lib as librmm +from rmm import pylibrmm from rmm._cuda.stream import Stream try: @@ -34,7 +34,7 @@ def rmm_cupy_allocator(nbytes): raise ModuleNotFoundError("No module named 'cupy'") stream = Stream(obj=cupy.cuda.get_current_stream()) - buf = librmm.device_buffer.DeviceBuffer(size=nbytes, stream=stream) + buf = pylibrmm.device_buffer.DeviceBuffer(size=nbytes, stream=stream) dev_id = -1 if buf.ptr else cupy.cuda.device.get_device_id() mem = cupy.cuda.UnownedMemory( ptr=buf.ptr, size=buf.size, owner=buf, device_id=dev_id diff --git a/python/rmm/rmm/allocators/numba.py b/python/rmm/rmm/allocators/numba.py index 5e87b87b6..fd9bacb5a 100644 --- a/python/rmm/rmm/allocators/numba.py +++ b/python/rmm/rmm/allocators/numba.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. +# Copyright (c) 2023-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ from numba import config, cuda from numba.cuda import HostOnlyCUDAMemoryManager, IpcHandle, MemoryPointer -from rmm import _lib as librmm +from rmm import pylibrmm def _make_emm_plugin_finalizer(handle, allocations): @@ -70,7 +70,7 @@ def memalloc(self, size): """ Allocate an on-device array from the RMM pool. """ - buf = librmm.DeviceBuffer(size=size) + buf = pylibrmm.DeviceBuffer(size=size) ctx = self.context if config.CUDA_USE_NVIDIA_BINDING: diff --git a/python/rmm/rmm/allocators/torch.py b/python/rmm/rmm/allocators/torch.py index 753da66da..eee0e9df9 100644 --- a/python/rmm/rmm/allocators/torch.py +++ b/python/rmm/rmm/allocators/torch.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. +# Copyright (c) 2023-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -28,10 +28,10 @@ # allocator .so relative to the current file because the current file # is pure Python and will therefore be in the source directory. # Instead, we search relative to an arbitrary file in the compiled - # package. We use the _lib.lib module because it is small. - from rmm._lib import lib + # package. We use the librmm._logger module because it is small. + from rmm.librmm import _logger - sofile = pathlib.Path(lib.__file__).parent / "_torch_allocator.so" + sofile = pathlib.Path(_logger.__file__).parent / "_torch_allocator.so" rmm_torch_allocator = CUDAPluggableAllocator( str(sofile.absolute()), alloc_fn_name="allocate", diff --git a/python/rmm/rmm/_lib/CMakeLists.txt b/python/rmm/rmm/librmm/CMakeLists.txt similarity index 93% rename from python/rmm/rmm/_lib/CMakeLists.txt rename to python/rmm/rmm/librmm/CMakeLists.txt index 7cdfed971..5da2a1a01 100644 --- a/python/rmm/rmm/_lib/CMakeLists.txt +++ b/python/rmm/rmm/librmm/CMakeLists.txt @@ -12,8 +12,7 @@ # the License. # ============================================================================= -set(cython_sources device_buffer.pyx lib.pyx logger.pyx memory_resource.pyx cuda_stream.pyx - helper.pyx) +set(cython_sources _logger.pyx) set(linked_libraries rmm::rmm) # Build all of the Cython targets diff --git a/python/rmm/rmm/_lib/__init__.pxd b/python/rmm/rmm/librmm/__init__.py similarity index 100% rename from python/rmm/rmm/_lib/__init__.pxd rename to python/rmm/rmm/librmm/__init__.py diff --git a/python/rmm/rmm/librmm/_logger.pxd b/python/rmm/rmm/librmm/_logger.pxd new file mode 100644 index 000000000..241a748c3 --- /dev/null +++ b/python/rmm/rmm/librmm/_logger.pxd @@ -0,0 +1,66 @@ +# Copyright (c) 2023-2024, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcpp cimport bool + + +cdef extern from "spdlog/common.h" namespace "spdlog::level" nogil: + cpdef enum logging_level "spdlog::level::level_enum": + """ + The debug logging level for RMM. + + Debug logging prints messages to a log file. See + `Debug Logging `_ + for more information. + + Valid levels, in decreasing order of verbosity, are TRACE, DEBUG, + INFO, WARN, ERR, CRITICAL, and OFF. Default is INFO. + + Examples + -------- + >>> import rmm + >>> rmm.logging_level.DEBUG + + >>> rmm.logging_level.DEBUG.value + 1 + >>> rmm.logging_level.DEBUG.name + 'DEBUG' + + See Also + -------- + set_logging_level : Set the debug logging level + get_logging_level : Get the current debug logging level + """ + TRACE "spdlog::level::trace" + DEBUG "spdlog::level::debug" + INFO "spdlog::level::info" + WARN "spdlog::level::warn" + ERR "spdlog::level::err" + CRITICAL "spdlog::level::critical" + OFF "spdlog::level::off" + + +cdef extern from "spdlog/spdlog.h" namespace "spdlog" nogil: + cdef cppclass spdlog_logger "spdlog::logger": + spdlog_logger() except + + void set_level(logging_level level) + logging_level level() + void flush() except + + void flush_on(logging_level level) + logging_level flush_level() + bool should_log(logging_level msg_level) + + +cdef extern from "rmm/logger.hpp" namespace "rmm" nogil: + cdef spdlog_logger& logger() except + diff --git a/python/rmm/rmm/_lib/lib.pxd b/python/rmm/rmm/librmm/_logger.pyx similarity index 70% rename from python/rmm/rmm/_lib/lib.pxd rename to python/rmm/rmm/librmm/_logger.pyx index e35b672e4..4392cb106 100644 --- a/python/rmm/rmm/_lib/lib.pxd +++ b/python/rmm/rmm/librmm/_logger.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2020, NVIDIA CORPORATION. +# Copyright (c) 2023-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,9 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -from libc.stdint cimport uintptr_t -from libcpp cimport bool -from libcpp.utility cimport pair -from libcpp.vector cimport vector - -ctypedef pair[const char*, unsigned int] caller_pair +from rmm.librmm._logger cimport logging_level # no-cython-lint diff --git a/python/rmm/rmm/_lib/_torch_allocator.cpp b/python/rmm/rmm/librmm/_torch_allocator.cpp similarity index 100% rename from python/rmm/rmm/_lib/_torch_allocator.cpp rename to python/rmm/rmm/librmm/_torch_allocator.cpp diff --git a/python/rmm/rmm/librmm/cuda_stream.pxd b/python/rmm/rmm/librmm/cuda_stream.pxd new file mode 100644 index 000000000..3f2ac3361 --- /dev/null +++ b/python/rmm/rmm/librmm/cuda_stream.pxd @@ -0,0 +1,28 @@ +# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from cuda.ccudart cimport cudaStream_t +from libcpp cimport bool + +from rmm.librmm.cuda_stream_view cimport cuda_stream_view + + +cdef extern from "rmm/cuda_stream.hpp" namespace "rmm" nogil: + cdef cppclass cuda_stream: + cuda_stream() except + + bool is_valid() except + + cudaStream_t value() except + + cuda_stream_view view() except + + void synchronize() except + + void synchronize_no_throw() diff --git a/python/rmm/rmm/librmm/cuda_stream_pool.pxd b/python/rmm/rmm/librmm/cuda_stream_pool.pxd new file mode 100644 index 000000000..4f2cbb36d --- /dev/null +++ b/python/rmm/rmm/librmm/cuda_stream_pool.pxd @@ -0,0 +1,23 @@ +# Copyright (c) 2021-2024, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from rmm.librmm.cuda_stream_view cimport cuda_stream_view + + +cdef extern from "rmm/cuda_stream_pool.hpp" namespace "rmm" nogil: + cdef cppclass cuda_stream_pool: + cuda_stream_pool(size_t pool_size) + cuda_stream_view get_stream() + cuda_stream_view get_stream(size_t stream_id) except + + size_t get_pool_size() diff --git a/python/rmm/rmm/librmm/cuda_stream_view.pxd b/python/rmm/rmm/librmm/cuda_stream_view.pxd new file mode 100644 index 000000000..bf0d33c24 --- /dev/null +++ b/python/rmm/rmm/librmm/cuda_stream_view.pxd @@ -0,0 +1,32 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from cuda.ccudart cimport cudaStream_t +from libcpp cimport bool + + +cdef extern from "rmm/cuda_stream_view.hpp" namespace "rmm" nogil: + cdef cppclass cuda_stream_view: + cuda_stream_view() + cuda_stream_view(cudaStream_t) + cudaStream_t value() + bool is_default() + bool is_per_thread_default() + void synchronize() except + + + cdef bool operator==(cuda_stream_view const, cuda_stream_view const) + + const cuda_stream_view cuda_stream_default + const cuda_stream_view cuda_stream_legacy + const cuda_stream_view cuda_stream_per_thread diff --git a/python/rmm/rmm/librmm/device_buffer.pxd b/python/rmm/rmm/librmm/device_buffer.pxd new file mode 100644 index 000000000..1c503ac9a --- /dev/null +++ b/python/rmm/rmm/librmm/device_buffer.pxd @@ -0,0 +1,58 @@ +# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from rmm.librmm.cuda_stream_view cimport cuda_stream_view +from rmm.librmm.memory_resource cimport device_memory_resource + + +cdef extern from "rmm/mr/device/per_device_resource.hpp" namespace "rmm" nogil: + cdef cppclass cuda_device_id: + ctypedef int value_type + cuda_device_id() + cuda_device_id(value_type id) + value_type value() + + cdef cuda_device_id get_current_cuda_device() + +cdef extern from "rmm/prefetch.hpp" namespace "rmm" nogil: + cdef void prefetch(const void* ptr, + size_t bytes, + cuda_device_id device, + cuda_stream_view stream) except + + +cdef extern from "rmm/device_buffer.hpp" namespace "rmm" nogil: + cdef cppclass device_buffer: + device_buffer() + device_buffer( + size_t size, + cuda_stream_view stream, + device_memory_resource * + ) except + + device_buffer( + const void* source_data, + size_t size, + cuda_stream_view stream, + device_memory_resource * + ) except + + device_buffer( + const device_buffer buf, + cuda_stream_view stream, + device_memory_resource * + ) except + + void reserve(size_t new_capacity, cuda_stream_view stream) except + + void resize(size_t new_size, cuda_stream_view stream) except + + void shrink_to_fit(cuda_stream_view stream) except + + void* data() + size_t size() + size_t capacity() diff --git a/python/rmm/rmm/librmm/device_uvector.pxd b/python/rmm/rmm/librmm/device_uvector.pxd new file mode 100644 index 000000000..f560a9e38 --- /dev/null +++ b/python/rmm/rmm/librmm/device_uvector.pxd @@ -0,0 +1,39 @@ +# Copyright (c) 2021-2024, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from rmm.librmm.cuda_stream_view cimport cuda_stream_view +from rmm.librmm.device_buffer cimport device_buffer +from rmm.librmm.memory_resource cimport device_memory_resource + + +cdef extern from "rmm/device_buffer.hpp" namespace "rmm" nogil: + cdef cppclass device_uvector[T]: + device_uvector(size_t size, cuda_stream_view stream) except + + T* element_ptr(size_t index) + void set_element(size_t element_index, const T& v, cuda_stream_view s) + void set_element_async( + size_t element_index, + const T& v, + cuda_stream_view s + ) except + + T front_element(cuda_stream_view s) except + + T back_element(cuda_stream_view s) except + + void reserve(size_t new_capacity, cuda_stream_view stream) except + + void resize(size_t new_size, cuda_stream_view stream) except + + void shrink_to_fit(cuda_stream_view stream) except + + device_buffer release() + size_t capacity() + T* data() + size_t size() + device_memory_resource* memory_resource() diff --git a/python/rmm/rmm/librmm/memory_resource.pxd b/python/rmm/rmm/librmm/memory_resource.pxd new file mode 100644 index 000000000..9ddaf04b9 --- /dev/null +++ b/python/rmm/rmm/librmm/memory_resource.pxd @@ -0,0 +1,230 @@ +# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This import is needed for Cython typing in translate_python_except_to_cpp +# See https://github.com/cython/cython/issues/5589 +from builtins import BaseException + +from libc.stddef cimport size_t +from libc.stdint cimport int8_t, int64_t +from libcpp cimport bool +from libcpp.optional cimport optional +from libcpp.pair cimport pair +from libcpp.string cimport string + +from rmm.librmm.cuda_stream_view cimport cuda_stream_view +from rmm.librmm.memory_resource cimport device_memory_resource + + +cdef extern from "rmm/mr/device/device_memory_resource.hpp" \ + namespace "rmm::mr" nogil: + cdef cppclass device_memory_resource: + void* allocate(size_t bytes) except + + void* allocate(size_t bytes, cuda_stream_view stream) except + + void deallocate(void* ptr, size_t bytes) except + + void deallocate( + void* ptr, + size_t bytes, + cuda_stream_view stream + ) except + + +cdef extern from "rmm/cuda_device.hpp" namespace "rmm" nogil: + size_t percent_of_free_device_memory(int percent) except + + pair[size_t, size_t] available_device_memory() except + + +# Transparent handle of a C++ exception +ctypedef pair[int, string] CppExcept + +cdef inline CppExcept translate_python_except_to_cpp(err: BaseException) noexcept: + """Translate a Python exception into a C++ exception handle + + The returned exception handle can then be thrown by `throw_cpp_except()`, + which MUST be done without holding the GIL. + + This is useful when C++ calls a Python function and needs to catch or + propagate exceptions. + """ + if isinstance(err, MemoryError): + return CppExcept(0, str.encode(str(err))) + return CppExcept(-1, str.encode(str(err))) + +# Implementation of `throw_cpp_except()`, which throws a given `CppExcept`. +# This function MUST be called without the GIL otherwise the thrown C++ +# exception are translated back into a Python exception. +cdef extern from *: + """ + #include + #include + + void throw_cpp_except(std::pair res) { + switch(res.first) { + case 0: + throw rmm::out_of_memory(res.second); + default: + throw std::runtime_error(res.second); + } + } + """ + void throw_cpp_except(CppExcept) nogil + + +cdef extern from "rmm/mr/device/cuda_memory_resource.hpp" \ + namespace "rmm::mr" nogil: + cdef cppclass cuda_memory_resource(device_memory_resource): + cuda_memory_resource() except + + +cdef extern from "rmm/mr/device/managed_memory_resource.hpp" \ + namespace "rmm::mr" nogil: + cdef cppclass managed_memory_resource(device_memory_resource): + managed_memory_resource() except + + +cdef extern from "rmm/mr/device/system_memory_resource.hpp" \ + namespace "rmm::mr" nogil: + cdef cppclass system_memory_resource(device_memory_resource): + system_memory_resource() except + + +cdef extern from "rmm/mr/device/sam_headroom_memory_resource.hpp" \ + namespace "rmm::mr" nogil: + cdef cppclass sam_headroom_memory_resource(device_memory_resource): + sam_headroom_memory_resource(size_t headroom) except + + +cdef extern from "rmm/mr/device/cuda_async_memory_resource.hpp" \ + namespace "rmm::mr" nogil: + + cdef cppclass cuda_async_memory_resource(device_memory_resource): + cuda_async_memory_resource( + optional[size_t] initial_pool_size, + optional[size_t] release_threshold, + optional[allocation_handle_type] export_handle_type) except + + +# TODO: when we adopt Cython 3.0 use enum class +cdef extern from "rmm/mr/device/cuda_async_memory_resource.hpp" \ + namespace \ + "rmm::mr::cuda_async_memory_resource::allocation_handle_type" \ + nogil: + enum allocation_handle_type \ + "rmm::mr::cuda_async_memory_resource::allocation_handle_type": + none + posix_file_descriptor + win32 + win32_kmt + + +cdef extern from "rmm/mr/device/pool_memory_resource.hpp" \ + namespace "rmm::mr" nogil: + cdef cppclass pool_memory_resource[Upstream](device_memory_resource): + pool_memory_resource( + Upstream* upstream_mr, + size_t initial_pool_size, + optional[size_t] maximum_pool_size) except + + size_t pool_size() + +cdef extern from "rmm/mr/device/fixed_size_memory_resource.hpp" \ + namespace "rmm::mr" nogil: + cdef cppclass fixed_size_memory_resource[Upstream](device_memory_resource): + fixed_size_memory_resource( + Upstream* upstream_mr, + size_t block_size, + size_t block_to_preallocate) except + + +cdef extern from "rmm/mr/device/callback_memory_resource.hpp" \ + namespace "rmm::mr" nogil: + ctypedef void* (*allocate_callback_t)(size_t, cuda_stream_view, void*) + ctypedef void (*deallocate_callback_t)(void*, size_t, cuda_stream_view, void*) + + cdef cppclass callback_memory_resource(device_memory_resource): + callback_memory_resource( + allocate_callback_t allocate_callback, + deallocate_callback_t deallocate_callback, + void* allocate_callback_arg, + void* deallocate_callback_arg + ) except + + +cdef extern from "rmm/mr/device/binning_memory_resource.hpp" \ + namespace "rmm::mr" nogil: + cdef cppclass binning_memory_resource[Upstream](device_memory_resource): + binning_memory_resource(Upstream* upstream_mr) except + + binning_memory_resource( + Upstream* upstream_mr, + int8_t min_size_exponent, + int8_t max_size_exponent) except + + + void add_bin(size_t allocation_size) except + + void add_bin( + size_t allocation_size, + device_memory_resource* bin_resource) except + + +cdef extern from "rmm/mr/device/limiting_resource_adaptor.hpp" \ + namespace "rmm::mr" nogil: + cdef cppclass limiting_resource_adaptor[Upstream](device_memory_resource): + limiting_resource_adaptor( + Upstream* upstream_mr, + size_t allocation_limit) except + + + size_t get_allocated_bytes() except + + size_t get_allocation_limit() except + + +cdef extern from "rmm/mr/device/logging_resource_adaptor.hpp" \ + namespace "rmm::mr" nogil: + cdef cppclass logging_resource_adaptor[Upstream](device_memory_resource): + logging_resource_adaptor( + Upstream* upstream_mr, + string filename) except + + + void flush() except + + +cdef extern from "rmm/mr/device/statistics_resource_adaptor.hpp" \ + namespace "rmm::mr" nogil: + cdef cppclass statistics_resource_adaptor[Upstream](device_memory_resource): + struct counter: + counter() + + int64_t value + int64_t peak + int64_t total + + statistics_resource_adaptor(Upstream* upstream_mr) except + + + counter get_bytes_counter() except + + counter get_allocations_counter() except + + pair[counter, counter] pop_counters() except + + pair[counter, counter] push_counters() except + + +cdef extern from "rmm/mr/device/tracking_resource_adaptor.hpp" \ + namespace "rmm::mr" nogil: + cdef cppclass tracking_resource_adaptor[Upstream](device_memory_resource): + tracking_resource_adaptor( + Upstream* upstream_mr, + bool capture_stacks) except + + + size_t get_allocated_bytes() except + + string get_outstanding_allocations_str() except + + void log_outstanding_allocations() except + + +cdef extern from "rmm/mr/device/failure_callback_resource_adaptor.hpp" \ + namespace "rmm::mr" nogil: + ctypedef bool (*failure_callback_t)(size_t, void*) + cdef cppclass failure_callback_resource_adaptor[Upstream]( + device_memory_resource + ): + failure_callback_resource_adaptor( + Upstream* upstream_mr, + failure_callback_t callback, + void* callback_arg + ) except + + +cdef extern from "rmm/mr/device/prefetch_resource_adaptor.hpp" \ + namespace "rmm::mr" nogil: + cdef cppclass prefetch_resource_adaptor[Upstream](device_memory_resource): + prefetch_resource_adaptor(Upstream* upstream_mr) except + diff --git a/python/rmm/rmm/librmm/per_device_resource.pxd b/python/rmm/rmm/librmm/per_device_resource.pxd new file mode 100644 index 000000000..63ee29056 --- /dev/null +++ b/python/rmm/rmm/librmm/per_device_resource.pxd @@ -0,0 +1,36 @@ +# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from rmm.librmm.memory_resource cimport device_memory_resource + + +cdef extern from "rmm/mr/device/per_device_resource.hpp" namespace "rmm" nogil: + cdef cppclass cuda_device_id: + ctypedef int value_type + + cuda_device_id(value_type id) + + value_type value() + +cdef extern from "rmm/mr/device/per_device_resource.hpp" \ + namespace "rmm::mr" nogil: + cdef device_memory_resource* set_current_device_resource( + device_memory_resource* new_mr + ) + cdef device_memory_resource* get_current_device_resource() + cdef device_memory_resource* set_per_device_resource( + cuda_device_id id, device_memory_resource* new_mr + ) + cdef device_memory_resource* get_per_device_resource ( + cuda_device_id id + ) diff --git a/python/rmm/rmm/mr.py b/python/rmm/rmm/mr.py index 6eb94da0f..3f0c3fce3 100644 --- a/python/rmm/rmm/mr.py +++ b/python/rmm/rmm/mr.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from rmm._lib.memory_resource import ( +from rmm.pylibrmm.memory_resource import ( BinningMemoryResource, CallbackMemoryResource, CudaAsyncMemoryResource, diff --git a/python/rmm/rmm/pylibrmm/CMakeLists.txt b/python/rmm/rmm/pylibrmm/CMakeLists.txt new file mode 100644 index 000000000..0e88f01bb --- /dev/null +++ b/python/rmm/rmm/pylibrmm/CMakeLists.txt @@ -0,0 +1,27 @@ +# ============================================================================= +# Copyright (c) 2022-2024, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +# in compliance with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under the License +# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express +# or implied. See the License for the specific language governing permissions and limitations under +# the License. +# ============================================================================= + +set(cython_sources device_buffer.pyx logger.pyx memory_resource.pyx cuda_stream.pyx helper.pyx) +set(linked_libraries rmm::rmm) + +# Build all of the Cython targets +rapids_cython_create_modules(SOURCE_FILES "${cython_sources}" LINKED_LIBRARIES "${linked_libraries}" + CXX) + +# mark all symbols in these Cython targets "hidden" by default, so they won't collide with symbols +# loaded from other DSOs +foreach(_cython_target IN LISTS RAPIDS_CYTHON_CREATED_TARGETS) + set_target_properties(${_cython_target} PROPERTIES C_VISIBILITY_PRESET hidden + CXX_VISIBILITY_PRESET hidden) +endforeach() diff --git a/python/rmm/rmm/pylibrmm/__init__.py b/python/rmm/rmm/pylibrmm/__init__.py new file mode 100644 index 000000000..0b8672ef6 --- /dev/null +++ b/python/rmm/rmm/pylibrmm/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) 2019-2021, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .device_buffer import DeviceBuffer diff --git a/python/rmm/rmm/pylibrmm/cuda_stream.pxd b/python/rmm/rmm/pylibrmm/cuda_stream.pxd new file mode 100644 index 000000000..dd38387c2 --- /dev/null +++ b/python/rmm/rmm/pylibrmm/cuda_stream.pxd @@ -0,0 +1,27 @@ +# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cimport cython +from cuda.ccudart cimport cudaStream_t +from libcpp cimport bool +from libcpp.memory cimport unique_ptr + +from rmm.librmm.cuda_stream cimport cuda_stream + + +@cython.final +cdef class CudaStream: + cdef unique_ptr[cuda_stream] c_obj + cdef cudaStream_t value(self) except * nogil + cdef bool is_valid(self) except * nogil diff --git a/python/rmm/rmm/_lib/cuda_stream.pyx b/python/rmm/rmm/pylibrmm/cuda_stream.pyx similarity index 91% rename from python/rmm/rmm/_lib/cuda_stream.pyx rename to python/rmm/rmm/pylibrmm/cuda_stream.pyx index 0861f0663..d6aa4edc7 100644 --- a/python/rmm/rmm/_lib/cuda_stream.pyx +++ b/python/rmm/rmm/pylibrmm/cuda_stream.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2020, NVIDIA CORPORATION. +# Copyright (c) 2020-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,6 +16,8 @@ cimport cython from cuda.ccudart cimport cudaStream_t from libcpp cimport bool +from rmm.librmm.cuda_stream cimport cuda_stream + @cython.final cdef class CudaStream: diff --git a/python/rmm/rmm/pylibrmm/device_buffer.pxd b/python/rmm/rmm/pylibrmm/device_buffer.pxd new file mode 100644 index 000000000..a0d287423 --- /dev/null +++ b/python/rmm/rmm/pylibrmm/device_buffer.pxd @@ -0,0 +1,71 @@ +# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libc.stdint cimport uintptr_t +from libcpp.memory cimport unique_ptr + +from rmm._cuda.stream cimport Stream +from rmm.librmm.device_buffer cimport device_buffer +from rmm.pylibrmm.memory_resource cimport DeviceMemoryResource + + +cdef class DeviceBuffer: + cdef unique_ptr[device_buffer] c_obj + + # Holds a reference to the DeviceMemoryResource used for allocation. + # Ensures the MR does not get destroyed before this DeviceBuffer. `mr` is + # needed for deallocation + cdef DeviceMemoryResource mr + + # Holds a reference to the stream used by the underlying `device_buffer`. + # Ensures the stream does not get destroyed before this DeviceBuffer + cdef Stream stream + + @staticmethod + cdef DeviceBuffer c_from_unique_ptr( + unique_ptr[device_buffer] ptr, + Stream stream=*, + DeviceMemoryResource mr=*, + ) + + @staticmethod + cdef DeviceBuffer c_to_device(const unsigned char[::1] b, + Stream stream=*) except * + cpdef copy_to_host(self, ary=*, Stream stream=*) + cpdef copy_from_host(self, ary, Stream stream=*) + cpdef copy_from_device(self, cuda_ary, Stream stream=*) + cpdef bytes tobytes(self, Stream stream=*) + + cdef size_t c_size(self) except * + cpdef void reserve(self, size_t new_capacity, Stream stream=*) except * + cpdef void resize(self, size_t new_size, Stream stream=*) except * + cpdef size_t capacity(self) except * + cdef void* c_data(self) except * + + cdef device_buffer c_release(self) except * + +cpdef DeviceBuffer to_device(const unsigned char[::1] b, + Stream stream=*) +cpdef void copy_ptr_to_host(uintptr_t db, + unsigned char[::1] hb, + Stream stream=*) except * + +cpdef void copy_host_to_ptr(const unsigned char[::1] hb, + uintptr_t db, + Stream stream=*) except * + +cpdef void copy_device_to_ptr(uintptr_t d_src, + uintptr_t d_dst, + size_t count, + Stream stream=*) except * diff --git a/python/rmm/rmm/_lib/device_buffer.pyx b/python/rmm/rmm/pylibrmm/device_buffer.pyx similarity index 96% rename from python/rmm/rmm/_lib/device_buffer.pyx rename to python/rmm/rmm/pylibrmm/device_buffer.pyx index 94a4dc771..76fbceef8 100644 --- a/python/rmm/rmm/_lib/device_buffer.pyx +++ b/python/rmm/rmm/pylibrmm/device_buffer.pyx @@ -32,9 +32,16 @@ from cuda.ccudart cimport ( cudaStream_t, ) -from rmm._lib.memory_resource cimport ( +from rmm.librmm.cuda_stream_view cimport cuda_stream_view +from rmm.librmm.device_buffer cimport ( + cuda_device_id, + device_buffer, + get_current_cuda_device, + prefetch, +) +from rmm.librmm.memory_resource cimport device_memory_resource +from rmm.pylibrmm.memory_resource cimport ( DeviceMemoryResource, - device_memory_resource, get_current_device_resource, ) @@ -394,7 +401,7 @@ cpdef DeviceBuffer to_device(const unsigned char[::1] b, Examples -------- >>> import rmm - >>> db = rmm._lib.device_buffer.to_device(b"abc") + >>> db = rmm.pylibrmm.device_buffer.to_device(b"abc") >>> print(bytes(db)) b'abc' """ @@ -460,7 +467,7 @@ cpdef void copy_ptr_to_host(uintptr_t db, >>> import rmm >>> db = rmm.DeviceBuffer.to_device(b"abc") >>> hb = bytearray(db.nbytes) - >>> rmm._lib.device_buffer.copy_ptr_to_host(db.ptr, hb) + >>> rmm.pylibrmm.device_buffer.copy_ptr_to_host(db.ptr, hb) >>> print(hb) bytearray(b'abc') """ @@ -502,7 +509,7 @@ cpdef void copy_host_to_ptr(const unsigned char[::1] hb, >>> import rmm >>> db = rmm.DeviceBuffer(size=10) >>> hb = b"abc" - >>> rmm._lib.device_buffer.copy_host_to_ptr(hb, db.ptr) + >>> rmm.pylibrmm.device_buffer.copy_host_to_ptr(hb, db.ptr) >>> hb = db.copy_to_host() >>> print(hb) array([97, 98, 99, 0, 0, 0, 0, 0, 0, 0], dtype=uint8) @@ -541,7 +548,7 @@ cpdef void copy_device_to_ptr(uintptr_t d_src, >>> import rmm >>> db = rmm.DeviceBuffer(size=5) >>> db2 = rmm.DeviceBuffer.to_device(b"abc") - >>> rmm._lib.device_buffer.copy_device_to_ptr(db2.ptr, db.ptr, db2.size) + >>> rmm.pylibrmm.device_buffer.copy_device_to_ptr(db2.ptr, db.ptr, db2.size) >>> hb = db.copy_to_host() >>> hb array([97, 98, 99, 0, 0], dtype=uint8) diff --git a/python/rmm/rmm/_lib/lib.pyx b/python/rmm/rmm/pylibrmm/helper.pxd similarity index 86% rename from python/rmm/rmm/_lib/lib.pyx rename to python/rmm/rmm/pylibrmm/helper.pxd index 46753baa3..8ca151c00 100644 --- a/python/rmm/rmm/_lib/lib.pyx +++ b/python/rmm/rmm/pylibrmm/helper.pxd @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2020, NVIDIA CORPORATION. +# Copyright (c) 2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,3 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + + +cdef object parse_bytes(object s) except * diff --git a/python/rmm/rmm/_lib/helper.pyx b/python/rmm/rmm/pylibrmm/helper.pyx similarity index 100% rename from python/rmm/rmm/_lib/helper.pyx rename to python/rmm/rmm/pylibrmm/helper.pyx diff --git a/python/rmm/rmm/_lib/logger.pyx b/python/rmm/rmm/pylibrmm/logger.pyx similarity index 77% rename from python/rmm/rmm/_lib/logger.pyx rename to python/rmm/rmm/pylibrmm/logger.pyx index 029bbdd79..119e1c92f 100644 --- a/python/rmm/rmm/_lib/logger.pyx +++ b/python/rmm/rmm/pylibrmm/logger.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. +# Copyright (c) 2023-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,58 +14,9 @@ import warnings -from libcpp cimport bool - - -cdef extern from "spdlog/common.h" namespace "spdlog::level" nogil: - cpdef enum logging_level "spdlog::level::level_enum": - """ - The debug logging level for RMM. - - Debug logging prints messages to a log file. See - `Debug Logging `_ - for more information. - - Valid levels, in decreasing order of verbosity, are TRACE, DEBUG, - INFO, WARN, ERR, CRITICAL, and OFF. Default is INFO. - - Examples - -------- - >>> import rmm - >>> rmm.logging_level.DEBUG - - >>> rmm.logging_level.DEBUG.value - 1 - >>> rmm.logging_level.DEBUG.name - 'DEBUG' - - See Also - -------- - set_logging_level : Set the debug logging level - get_logging_level : Get the current debug logging level - """ - TRACE "spdlog::level::trace" - DEBUG "spdlog::level::debug" - INFO "spdlog::level::info" - WARN "spdlog::level::warn" - ERR "spdlog::level::err" - CRITICAL "spdlog::level::critical" - OFF "spdlog::level::off" - - -cdef extern from "spdlog/spdlog.h" namespace "spdlog" nogil: - cdef cppclass spdlog_logger "spdlog::logger": - spdlog_logger() except + - void set_level(logging_level level) - logging_level level() - void flush() except + - void flush_on(logging_level level) - logging_level flush_level() - bool should_log(logging_level msg_level) - - -cdef extern from "rmm/logger.hpp" namespace "rmm" nogil: - cdef spdlog_logger& logger() except + +from rmm.librmm._logger cimport logger + +from rmm.librmm._logger import logging_level def _validate_level_type(level): diff --git a/python/rmm/rmm/pylibrmm/memory_resource.pxd b/python/rmm/rmm/pylibrmm/memory_resource.pxd new file mode 100644 index 000000000..985d5d31b --- /dev/null +++ b/python/rmm/rmm/pylibrmm/memory_resource.pxd @@ -0,0 +1,83 @@ +# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcpp.memory cimport shared_ptr + +from rmm.librmm.memory_resource cimport device_memory_resource + + +cdef class DeviceMemoryResource: + cdef shared_ptr[device_memory_resource] c_obj + cdef device_memory_resource* get_mr(self) noexcept nogil + +cdef class UpstreamResourceAdaptor(DeviceMemoryResource): + cdef readonly DeviceMemoryResource upstream_mr + + cpdef DeviceMemoryResource get_upstream(self) + +cdef class CudaMemoryResource(DeviceMemoryResource): + pass + +cdef class ManagedMemoryResource(DeviceMemoryResource): + pass + +cdef class SystemMemoryResource(DeviceMemoryResource): + pass + +cdef class SamHeadroomMemoryResource(DeviceMemoryResource): + pass + +cdef class CudaAsyncMemoryResource(DeviceMemoryResource): + pass + +cdef class PoolMemoryResource(UpstreamResourceAdaptor): + pass + +cdef class FixedSizeMemoryResource(UpstreamResourceAdaptor): + pass + +cdef class BinningMemoryResource(UpstreamResourceAdaptor): + + cdef readonly list _bin_mrs + + cpdef add_bin( + self, + size_t allocation_size, + DeviceMemoryResource bin_resource=*) + +cdef class CallbackMemoryResource(DeviceMemoryResource): + cdef object _allocate_func + cdef object _deallocate_func + +cdef class LimitingResourceAdaptor(UpstreamResourceAdaptor): + pass + +cdef class LoggingResourceAdaptor(UpstreamResourceAdaptor): + cdef object _log_file_name + cpdef get_file_name(self) + cpdef flush(self) + +cdef class StatisticsResourceAdaptor(UpstreamResourceAdaptor): + pass + +cdef class TrackingResourceAdaptor(UpstreamResourceAdaptor): + pass + +cdef class FailureCallbackResourceAdaptor(UpstreamResourceAdaptor): + cdef object _callback + +cdef class PrefetchResourceAdaptor(UpstreamResourceAdaptor): + pass + +cpdef DeviceMemoryResource get_current_device_resource() diff --git a/python/rmm/rmm/_lib/memory_resource.pyx b/python/rmm/rmm/pylibrmm/memory_resource.pyx similarity index 82% rename from python/rmm/rmm/_lib/memory_resource.pyx rename to python/rmm/rmm/pylibrmm/memory_resource.pyx index 231253e3f..021125567 100644 --- a/python/rmm/rmm/_lib/memory_resource.pyx +++ b/python/rmm/rmm/pylibrmm/memory_resource.pyx @@ -22,12 +22,11 @@ from collections import defaultdict cimport cython from cython.operator cimport dereference as deref from libc.stddef cimport size_t -from libc.stdint cimport int8_t, int64_t, uintptr_t +from libc.stdint cimport int8_t, uintptr_t from libcpp cimport bool from libcpp.memory cimport make_unique, unique_ptr from libcpp.optional cimport optional from libcpp.pair cimport pair -from libcpp.string cimport string from cuda.cudart import cudaError_t @@ -37,206 +36,43 @@ from rmm._cuda.stream cimport Stream from rmm._cuda.stream import DEFAULT_STREAM -from rmm._lib.cuda_stream_view cimport cuda_stream_view -from rmm._lib.helper cimport parse_bytes -from rmm._lib.memory_resource cimport ( - available_device_memory as c_available_device_memory, - percent_of_free_device_memory as c_percent_of_free_device_memory, -) -from rmm._lib.per_device_resource cimport ( +from rmm.librmm.cuda_stream_view cimport cuda_stream_view +from rmm.librmm.per_device_resource cimport ( cuda_device_id, set_per_device_resource as cpp_set_per_device_resource, ) +from rmm.pylibrmm.helper cimport parse_bytes from rmm.statistics import Statistics -# Transparent handle of a C++ exception -ctypedef pair[int, string] CppExcept - -cdef CppExcept translate_python_except_to_cpp(err: BaseException) noexcept: - """Translate a Python exception into a C++ exception handle - - The returned exception handle can then be thrown by `throw_cpp_except()`, - which MUST be done without holding the GIL. - - This is useful when C++ calls a Python function and needs to catch or - propagate exceptions. - """ - if isinstance(err, MemoryError): - return CppExcept(0, str.encode(str(err))) - return CppExcept(-1, str.encode(str(err))) - -# Implementation of `throw_cpp_except()`, which throws a given `CppExcept`. -# This function MUST be called without the GIL otherwise the thrown C++ -# exception are translated back into a Python exception. -cdef extern from *: - """ - #include - #include - - void throw_cpp_except(std::pair res) { - switch(res.first) { - case 0: - throw rmm::out_of_memory(res.second); - default: - throw std::runtime_error(res.second); - } - } - """ - void throw_cpp_except(CppExcept) nogil - - -# NOTE: Keep extern declarations in .pyx file as much as possible to avoid -# leaking dependencies when importing RMM Cython .pxd files -cdef extern from "rmm/mr/device/cuda_memory_resource.hpp" \ - namespace "rmm::mr" nogil: - cdef cppclass cuda_memory_resource(device_memory_resource): - cuda_memory_resource() except + - -cdef extern from "rmm/mr/device/managed_memory_resource.hpp" \ - namespace "rmm::mr" nogil: - cdef cppclass managed_memory_resource(device_memory_resource): - managed_memory_resource() except + - -cdef extern from "rmm/mr/device/system_memory_resource.hpp" \ - namespace "rmm::mr" nogil: - cdef cppclass system_memory_resource(device_memory_resource): - system_memory_resource() except + - -cdef extern from "rmm/mr/device/sam_headroom_memory_resource.hpp" \ - namespace "rmm::mr" nogil: - cdef cppclass sam_headroom_memory_resource(device_memory_resource): - sam_headroom_memory_resource(size_t headroom) except + - -cdef extern from "rmm/mr/device/cuda_async_memory_resource.hpp" \ - namespace "rmm::mr" nogil: - - cdef cppclass cuda_async_memory_resource(device_memory_resource): - cuda_async_memory_resource( - optional[size_t] initial_pool_size, - optional[size_t] release_threshold, - optional[allocation_handle_type] export_handle_type) except + - -# TODO: when we adopt Cython 3.0 use enum class -cdef extern from "rmm/mr/device/cuda_async_memory_resource.hpp" \ - namespace \ - "rmm::mr::cuda_async_memory_resource::allocation_handle_type" \ - nogil: - enum allocation_handle_type \ - "rmm::mr::cuda_async_memory_resource::allocation_handle_type": - none - posix_file_descriptor - win32 - win32_kmt - - -cdef extern from "rmm/mr/device/pool_memory_resource.hpp" \ - namespace "rmm::mr" nogil: - cdef cppclass pool_memory_resource[Upstream](device_memory_resource): - pool_memory_resource( - Upstream* upstream_mr, - size_t initial_pool_size, - optional[size_t] maximum_pool_size) except + - size_t pool_size() - -cdef extern from "rmm/mr/device/fixed_size_memory_resource.hpp" \ - namespace "rmm::mr" nogil: - cdef cppclass fixed_size_memory_resource[Upstream](device_memory_resource): - fixed_size_memory_resource( - Upstream* upstream_mr, - size_t block_size, - size_t block_to_preallocate) except + - -cdef extern from "rmm/mr/device/callback_memory_resource.hpp" \ - namespace "rmm::mr" nogil: - ctypedef void* (*allocate_callback_t)(size_t, cuda_stream_view, void*) - ctypedef void (*deallocate_callback_t)(void*, size_t, cuda_stream_view, void*) - - cdef cppclass callback_memory_resource(device_memory_resource): - callback_memory_resource( - allocate_callback_t allocate_callback, - deallocate_callback_t deallocate_callback, - void* allocate_callback_arg, - void* deallocate_callback_arg - ) except + - -cdef extern from "rmm/mr/device/binning_memory_resource.hpp" \ - namespace "rmm::mr" nogil: - cdef cppclass binning_memory_resource[Upstream](device_memory_resource): - binning_memory_resource(Upstream* upstream_mr) except + - binning_memory_resource( - Upstream* upstream_mr, - int8_t min_size_exponent, - int8_t max_size_exponent) except + - - void add_bin(size_t allocation_size) except + - void add_bin( - size_t allocation_size, - device_memory_resource* bin_resource) except + - -cdef extern from "rmm/mr/device/limiting_resource_adaptor.hpp" \ - namespace "rmm::mr" nogil: - cdef cppclass limiting_resource_adaptor[Upstream](device_memory_resource): - limiting_resource_adaptor( - Upstream* upstream_mr, - size_t allocation_limit) except + - - size_t get_allocated_bytes() except + - size_t get_allocation_limit() except + - -cdef extern from "rmm/mr/device/logging_resource_adaptor.hpp" \ - namespace "rmm::mr" nogil: - cdef cppclass logging_resource_adaptor[Upstream](device_memory_resource): - logging_resource_adaptor( - Upstream* upstream_mr, - string filename) except + - - void flush() except + - -cdef extern from "rmm/mr/device/statistics_resource_adaptor.hpp" \ - namespace "rmm::mr" nogil: - cdef cppclass statistics_resource_adaptor[Upstream](device_memory_resource): - struct counter: - counter() - - int64_t value - int64_t peak - int64_t total - - statistics_resource_adaptor(Upstream* upstream_mr) except + - - counter get_bytes_counter() except + - counter get_allocations_counter() except + - pair[counter, counter] pop_counters() except + - pair[counter, counter] push_counters() except + - -cdef extern from "rmm/mr/device/tracking_resource_adaptor.hpp" \ - namespace "rmm::mr" nogil: - cdef cppclass tracking_resource_adaptor[Upstream](device_memory_resource): - tracking_resource_adaptor( - Upstream* upstream_mr, - bool capture_stacks) except + - - size_t get_allocated_bytes() except + - string get_outstanding_allocations_str() except + - void log_outstanding_allocations() except + - -cdef extern from "rmm/mr/device/failure_callback_resource_adaptor.hpp" \ - namespace "rmm::mr" nogil: - ctypedef bool (*failure_callback_t)(size_t, void*) - cdef cppclass failure_callback_resource_adaptor[Upstream]( - device_memory_resource - ): - failure_callback_resource_adaptor( - Upstream* upstream_mr, - failure_callback_t callback, - void* callback_arg - ) except + - -cdef extern from "rmm/mr/device/prefetch_resource_adaptor.hpp" \ - namespace "rmm::mr" nogil: - cdef cppclass prefetch_resource_adaptor[Upstream](device_memory_resource): - prefetch_resource_adaptor(Upstream* upstream_mr) except + +from rmm.librmm.memory_resource cimport ( + CppExcept, + allocate_callback_t, + allocation_handle_type, + available_device_memory as c_available_device_memory, + binning_memory_resource, + callback_memory_resource, + cuda_async_memory_resource, + cuda_memory_resource, + deallocate_callback_t, + device_memory_resource, + failure_callback_resource_adaptor, + failure_callback_t, + fixed_size_memory_resource, + limiting_resource_adaptor, + logging_resource_adaptor, + managed_memory_resource, + percent_of_free_device_memory as c_percent_of_free_device_memory, + pool_memory_resource, + posix_file_descriptor, + prefetch_resource_adaptor, + sam_headroom_memory_resource, + statistics_resource_adaptor, + system_memory_resource, + throw_cpp_except, + tracking_resource_adaptor, + translate_python_except_to_cpp, +) cdef class DeviceMemoryResource: diff --git a/python/rmm/rmm/_lib/tests/__init__.py b/python/rmm/rmm/pylibrmm/tests/__init__.py similarity index 100% rename from python/rmm/rmm/_lib/tests/__init__.py rename to python/rmm/rmm/pylibrmm/tests/__init__.py diff --git a/python/rmm/rmm/_lib/tests/test_device_buffer.pyx b/python/rmm/rmm/pylibrmm/tests/test_device_buffer.pyx similarity index 83% rename from python/rmm/rmm/_lib/tests/test_device_buffer.pyx rename to python/rmm/rmm/pylibrmm/tests/test_device_buffer.pyx index 733383827..ec2ff4def 100644 --- a/python/rmm/rmm/_lib/tests/test_device_buffer.pyx +++ b/python/rmm/rmm/pylibrmm/tests/test_device_buffer.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2020, NVIDIA CORPORATION. +# Copyright (c) 2020-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,8 +16,9 @@ import numpy as np from libcpp.memory cimport make_unique -from rmm._lib.cuda_stream_view cimport cuda_stream_default -from rmm._lib.device_buffer cimport DeviceBuffer, device_buffer +from rmm.librmm.cuda_stream_view cimport cuda_stream_default +from rmm.librmm.device_buffer cimport device_buffer +from rmm.pylibrmm.device_buffer cimport DeviceBuffer def test_release(): diff --git a/python/rmm/rmm/tests/test_cython.py b/python/rmm/rmm/tests/test_cython.py index 82eba2451..5df933435 100644 --- a/python/rmm/rmm/tests/test_cython.py +++ b/python/rmm/rmm/tests/test_cython.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2021, NVIDIA CORPORATION. +# Copyright (c) 2020-2024, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -29,7 +29,7 @@ def wrapped(*args, **kwargs): return wrapped -cython_test_modules = ["rmm._lib.tests.test_device_buffer"] +cython_test_modules = ["rmm.pylibrmm.tests.test_device_buffer"] for mod in cython_test_modules: diff --git a/python/rmm/rmm/tests/test_rmm.py b/python/rmm/rmm/tests/test_rmm.py index c88d21b38..c03b9e501 100644 --- a/python/rmm/rmm/tests/test_rmm.py +++ b/python/rmm/rmm/tests/test_rmm.py @@ -354,7 +354,7 @@ def test_rmm_pool_numba_stream(stream): rmm.reinitialize(pool_allocator=True) stream = rmm._cuda.stream.Stream(stream) - a = rmm._lib.device_buffer.DeviceBuffer(size=3, stream=stream) + a = rmm.pylibrmm.device_buffer.DeviceBuffer(size=3, stream=stream) assert a.size == 3 assert a.ptr != 0