Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Improvement] Reorganize Cython to separate C++ bindings and make Cython classes public #1676

Merged
merged 19 commits into from
Oct 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 7 additions & 4 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,13 @@ rmm.egg-info/
python/build
python/*/build
python/rmm/docs/_build
python/rmm/**/_lib/**/*.cpp
!python/rmm/_lib/_torch_allocator.cpp
python/rmm/**/_lib/**/*.h
python/rmm/**/_lib/.nfs*
python/rmm/**/librmmm/**/*.cpp
!python/rmm/librmmm/_torch_allocator.cpp
python/rmm/**/librmm/**/*.h
python/rmm/**/librmm/.nfs*
python/rmm/**/pylibrmmm/**/*.cpp
python/rmm/**/pylibrmmm/**/*.h
python/rmm/**/pylibrmmm/.nfs*
python/rmm/_cuda/*.cpp
python/rmm/tests/*.cpp
python/rmm/*.ipynb
Expand Down
3 changes: 2 additions & 1 deletion python/rmm/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -30,4 +30,5 @@ rapids_cython_init()
add_compile_definitions("SPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_${RMM_LOGGING_LEVEL}")

add_subdirectory(rmm/_cuda)
Matt711 marked this conversation as resolved.
Show resolved Hide resolved
add_subdirectory(rmm/_lib)
add_subdirectory(rmm/librmm)
Matt711 marked this conversation as resolved.
Show resolved Hide resolved
add_subdirectory(rmm/pylibrmm)
Matt711 marked this conversation as resolved.
Show resolved Hide resolved
6 changes: 3 additions & 3 deletions python/rmm/docs/guide.md
Original file line number Diff line number Diff line change
Expand Up @@ -236,17 +236,17 @@ Common to both usages is that they modify the currently active RMM memory resour

>>> # We start with the default cuda memory resource
>>> rmm.mr.get_current_device_resource()
<rmm._lib.memory_resource.CudaMemoryResource at 0x7f7e6c0a1ce0>
<rmm.pylibrmm.memory_resource.CudaMemoryResource object at 0x7fa0da48a8e0>
Matt711 marked this conversation as resolved.
Show resolved Hide resolved

>>> # When using statistics, we get a StatisticsResourceAdaptor with the context
>>> with rmm.statistics.statistics():
... rmm.mr.get_current_device_resource()
<rmm._lib.memory_resource.StatisticsResourceAdaptor at 0x7f7e6c524900>
<rmm.pylibrmm.memory_resource.StatisticsResourceAdaptor object at 0x7fa0dd6e4a40>

>>> # We can also enable statistics globally
>>> rmm.statistics.enable_statistics()
>>> print(rmm.mr.get_current_device_resource())
<rmm._lib.memory_resource.StatisticsResourceAdaptor at 0x7f662c2bb3c0>
<rmm.pylibrmm.memory_resource.StatisticsResourceAdaptor object at 0x7f9a11340a40>
```

With statistics enabled, you can query statistics of the current and peak bytes and number of allocations performed by the current RMM memory resource:
Expand Down
20 changes: 15 additions & 5 deletions python/rmm/rmm/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
# Copyright (c) 2018-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -13,8 +13,10 @@
# limitations under the License.

from rmm import mr
from rmm._lib.device_buffer import DeviceBuffer
from rmm._lib.logger import (
from rmm._version import __git_commit__, __version__
from rmm.mr import disable_logging, enable_logging, get_log_filenames
from rmm.pylibrmm.device_buffer import DeviceBuffer
from rmm.pylibrmm.logger import (
flush_logger,
get_flush_level,
get_logging_level,
Expand All @@ -23,8 +25,6 @@
set_logging_level,
should_log,
)
from rmm._version import __git_commit__, __version__
from rmm.mr import disable_logging, enable_logging, get_log_filenames
from rmm.rmm import (
RMMError,
is_initialized,
Expand Down Expand Up @@ -52,3 +52,13 @@
"should_log",
"unregister_reinitialize_hook",
]


def __getattr__(name):
if name == "_lib":
import importlib

module = importlib.import_module("rmm.pylibrmm")
return module
else:
raise AttributeError(f"Module '{__name__}' has no attribute '{name}'")
Matt711 marked this conversation as resolved.
Show resolved Hide resolved
4 changes: 2 additions & 2 deletions python/rmm/rmm/_cuda/stream.pxd
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2020, NVIDIA CORPORATION.
# Copyright (c) 2020-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -16,7 +16,7 @@ from cuda.ccudart cimport cudaStream_t
from libc.stdint cimport uintptr_t
from libcpp cimport bool

from rmm._lib.cuda_stream_view cimport cuda_stream_view
from rmm.librmm.cuda_stream_view cimport cuda_stream_view


cdef class Stream:
Expand Down
4 changes: 2 additions & 2 deletions python/rmm/rmm/_cuda/stream.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,13 @@ from cuda.ccudart cimport cudaStream_t
from libc.stdint cimport uintptr_t
from libcpp cimport bool

from rmm._lib.cuda_stream cimport CudaStream
from rmm._lib.cuda_stream_view cimport (
from rmm.librmm.cuda_stream_view cimport (
cuda_stream_default,
cuda_stream_legacy,
cuda_stream_per_thread,
cuda_stream_view,
)
from rmm.pylibrmm.cuda_stream cimport CudaStream


cdef class Stream:
Expand Down
4 changes: 2 additions & 2 deletions python/rmm/rmm/_lib/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2019-2021, NVIDIA CORPORATION.
# Copyright (c) 2018-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -12,4 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from .device_buffer import DeviceBuffer
from rmm.pylibrmm import *
27 changes: 3 additions & 24 deletions python/rmm/rmm/_lib/cuda_stream.pxd
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2020, NVIDIA CORPORATION.
# Copyright (c) 2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -12,26 +12,5 @@
# See the License for the specific language governing permissions and
# limitations under the License.

cimport cython
from cuda.ccudart cimport cudaStream_t
from libcpp cimport bool
from libcpp.memory cimport unique_ptr

from rmm._lib.cuda_stream_view cimport cuda_stream_view


cdef extern from "rmm/cuda_stream.hpp" namespace "rmm" nogil:
cdef cppclass cuda_stream:
cuda_stream() except +
bool is_valid() except +
cudaStream_t value() except +
cuda_stream_view view() except +
void synchronize() except +
void synchronize_no_throw()


@cython.final
cdef class CudaStream:
cdef unique_ptr[cuda_stream] c_obj
cdef cudaStream_t value(self) except * nogil
cdef bool is_valid(self) except * nogil
from rmm.librmm.cuda_stream cimport cuda_stream
from rmm.pylibrmm.cuda_stream cimport CudaStream
14 changes: 2 additions & 12 deletions python/rmm/rmm/_lib/cuda_stream_pool.pxd
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2021, NVIDIA CORPORATION.
# Copyright (c) 2021-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -12,14 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.

cimport cython

from rmm._lib.cuda_stream_view cimport cuda_stream_view


cdef extern from "rmm/cuda_stream_pool.hpp" namespace "rmm" nogil:
cdef cppclass cuda_stream_pool:
cuda_stream_pool(size_t pool_size)
cuda_stream_view get_stream()
cuda_stream_view get_stream(size_t stream_id) except +
size_t get_pool_size()
from rmm.librmm.cuda_stream_pool cimport cuda_stream_pool
26 changes: 7 additions & 19 deletions python/rmm/rmm/_lib/cuda_stream_view.pxd
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2020, NVIDIA CORPORATION.
# Copyright (c) 2020-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -12,21 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from cuda.ccudart cimport cudaStream_t
from libcpp cimport bool


cdef extern from "rmm/cuda_stream_view.hpp" namespace "rmm" nogil:
cdef cppclass cuda_stream_view:
cuda_stream_view()
cuda_stream_view(cudaStream_t)
cudaStream_t value()
bool is_default()
bool is_per_thread_default()
void synchronize() except +

cdef bool operator==(cuda_stream_view const, cuda_stream_view const)

const cuda_stream_view cuda_stream_default
const cuda_stream_view cuda_stream_legacy
const cuda_stream_view cuda_stream_per_thread
from rmm.librmm.cuda_stream_view cimport (
cuda_stream_default,
cuda_stream_legacy,
cuda_stream_per_thread,
cuda_stream_view,
)
115 changes: 13 additions & 102 deletions python/rmm/rmm/_lib/device_buffer.pxd
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2019-2024, NVIDIA CORPORATION.
# Copyright (c) 2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -12,105 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from libc.stdint cimport uintptr_t
from libcpp.memory cimport unique_ptr

from rmm._cuda.stream cimport Stream
from rmm._lib.cuda_stream_view cimport cuda_stream_view
from rmm._lib.memory_resource cimport (
DeviceMemoryResource,
device_memory_resource,
from rmm.librmm.device_buffer cimport (
cuda_device_id,
device_buffer,
get_current_cuda_device,
prefetch,
)
from rmm.pylibrmm.device_buffer cimport (
DeviceBuffer,
copy_device_to_ptr,
copy_host_to_ptr,
copy_ptr_to_host,
to_device,
)


cdef extern from "rmm/mr/device/per_device_resource.hpp" namespace "rmm" nogil:
cdef cppclass cuda_device_id:
ctypedef int value_type
cuda_device_id()
cuda_device_id(value_type id)
value_type value()

cdef cuda_device_id get_current_cuda_device()

cdef extern from "rmm/prefetch.hpp" namespace "rmm" nogil:
cdef void prefetch(const void* ptr,
size_t bytes,
cuda_device_id device,
cuda_stream_view stream) except +

cdef extern from "rmm/device_buffer.hpp" namespace "rmm" nogil:
cdef cppclass device_buffer:
device_buffer()
device_buffer(
size_t size,
cuda_stream_view stream,
device_memory_resource *
) except +
device_buffer(
const void* source_data,
size_t size,
cuda_stream_view stream,
device_memory_resource *
) except +
device_buffer(
const device_buffer buf,
cuda_stream_view stream,
device_memory_resource *
) except +
void reserve(size_t new_capacity, cuda_stream_view stream) except +
void resize(size_t new_size, cuda_stream_view stream) except +
void shrink_to_fit(cuda_stream_view stream) except +
void* data()
size_t size()
size_t capacity()


cdef class DeviceBuffer:
cdef unique_ptr[device_buffer] c_obj

# Holds a reference to the DeviceMemoryResource used for allocation.
# Ensures the MR does not get destroyed before this DeviceBuffer. `mr` is
# needed for deallocation
cdef DeviceMemoryResource mr

# Holds a reference to the stream used by the underlying `device_buffer`.
# Ensures the stream does not get destroyed before this DeviceBuffer
cdef Stream stream

@staticmethod
cdef DeviceBuffer c_from_unique_ptr(
unique_ptr[device_buffer] ptr,
Stream stream=*,
DeviceMemoryResource mr=*,
)

@staticmethod
cdef DeviceBuffer c_to_device(const unsigned char[::1] b,
Stream stream=*) except *
cpdef copy_to_host(self, ary=*, Stream stream=*)
cpdef copy_from_host(self, ary, Stream stream=*)
cpdef copy_from_device(self, cuda_ary, Stream stream=*)
cpdef bytes tobytes(self, Stream stream=*)

cdef size_t c_size(self) except *
cpdef void reserve(self, size_t new_capacity, Stream stream=*) except *
cpdef void resize(self, size_t new_size, Stream stream=*) except *
cpdef size_t capacity(self) except *
cdef void* c_data(self) except *

cdef device_buffer c_release(self) except *

cpdef DeviceBuffer to_device(const unsigned char[::1] b,
Stream stream=*)
cpdef void copy_ptr_to_host(uintptr_t db,
unsigned char[::1] hb,
Stream stream=*) except *

cpdef void copy_host_to_ptr(const unsigned char[::1] hb,
uintptr_t db,
Stream stream=*) except *

cpdef void copy_device_to_ptr(uintptr_t d_src,
uintptr_t d_dst,
size_t count,
Stream stream=*) except *
28 changes: 2 additions & 26 deletions python/rmm/rmm/_lib/device_uvector.pxd
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2021, NVIDIA CORPORATION.
# Copyright (c) 2021-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -12,28 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from rmm._lib.cuda_stream_view cimport cuda_stream_view
from rmm._lib.device_buffer cimport device_buffer
from rmm._lib.memory_resource cimport device_memory_resource


cdef extern from "rmm/device_buffer.hpp" namespace "rmm" nogil:
cdef cppclass device_uvector[T]:
device_uvector(size_t size, cuda_stream_view stream) except +
T* element_ptr(size_t index)
void set_element(size_t element_index, const T& v, cuda_stream_view s)
void set_element_async(
size_t element_index,
const T& v,
cuda_stream_view s
) except +
T front_element(cuda_stream_view s) except +
T back_element(cuda_stream_view s) except +
void reserve(size_t new_capacity, cuda_stream_view stream) except +
void resize(size_t new_size, cuda_stream_view stream) except +
void shrink_to_fit(cuda_stream_view stream) except +
device_buffer release()
size_t capacity()
T* data()
size_t size()
device_memory_resource* memory_resource()
from rmm.librmm.device_uvector cimport device_uvector
3 changes: 1 addition & 2 deletions python/rmm/rmm/_lib/helper.pxd
Original file line number Diff line number Diff line change
Expand Up @@ -12,5 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.


cdef object parse_bytes(object s) except *
from rmm.pylibrmm.helper cimport parse_bytes
Loading
Loading