Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Improvement] Reorganize Cython to separate C++ bindings and make Cython classes public #1676

Merged
merged 19 commits into from
Oct 3, 2024
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion python/rmm/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -30,4 +30,5 @@ rapids_cython_init()
add_compile_definitions("SPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_${RMM_LOGGING_LEVEL}")

add_subdirectory(rmm/_cuda)
Matt711 marked this conversation as resolved.
Show resolved Hide resolved
add_subdirectory(rmm/_lib)
add_subdirectory(rmm/cpp)
add_subdirectory(rmm/python)
25 changes: 20 additions & 5 deletions python/rmm/rmm/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
# Copyright (c) 2018-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -13,8 +13,10 @@
# limitations under the License.

from rmm import mr
from rmm._lib.device_buffer import DeviceBuffer
from rmm._lib.logger import (
from rmm._version import __git_commit__, __version__
from rmm.mr import disable_logging, enable_logging, get_log_filenames
from rmm.python.device_buffer import DeviceBuffer
from rmm.python.logger import (
flush_logger,
get_flush_level,
get_logging_level,
Expand All @@ -23,8 +25,6 @@
set_logging_level,
should_log,
)
from rmm._version import __git_commit__, __version__
from rmm.mr import disable_logging, enable_logging, get_log_filenames
from rmm.rmm import (
RMMError,
is_initialized,
Expand Down Expand Up @@ -52,3 +52,18 @@
"should_log",
"unregister_reinitialize_hook",
]


def __getattr__(name):
if name == "_lib":
import importlib
import warnings

warnings.warn(
"The `rmm._lib` module is deprecated in will be removed in a future release. Use `rmm.python` instead.",
FutureWarning,
)
module = importlib.import_module("rmm.python")
return module
else:
raise AttributeError(f"Module '{__name__}' has no attribute '{name}'")
Matt711 marked this conversation as resolved.
Show resolved Hide resolved
4 changes: 2 additions & 2 deletions python/rmm/rmm/_cuda/stream.pxd
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2020, NVIDIA CORPORATION.
# Copyright (c) 2020-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -16,7 +16,7 @@ from cuda.ccudart cimport cudaStream_t
from libc.stdint cimport uintptr_t
from libcpp cimport bool

from rmm._lib.cuda_stream_view cimport cuda_stream_view
from rmm.cpp.cuda_stream_view cimport cuda_stream_view


cdef class Stream:
Expand Down
4 changes: 2 additions & 2 deletions python/rmm/rmm/_cuda/stream.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,13 @@ from cuda.ccudart cimport cudaStream_t
from libc.stdint cimport uintptr_t
from libcpp cimport bool

from rmm._lib.cuda_stream cimport CudaStream
from rmm._lib.cuda_stream_view cimport (
from rmm.cpp.cuda_stream_view cimport (
cuda_stream_default,
cuda_stream_legacy,
cuda_stream_per_thread,
cuda_stream_view,
)
from rmm.python.cuda_stream cimport CudaStream


cdef class Stream:
Expand Down
11 changes: 9 additions & 2 deletions python/rmm/rmm/_lib/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2019-2021, NVIDIA CORPORATION.
# Copyright (c) 2018-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -12,4 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from .device_buffer import DeviceBuffer
import warnings

warnings.warn(
"The `rmm._lib` module is deprecated in will be removed in a future release. Use `rmm.python` instead.",
FutureWarning,
)

from rmm.python import *
6 changes: 3 additions & 3 deletions python/rmm/rmm/allocators/cupy.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2023, NVIDIA CORPORATION.
# Copyright (c) 2023-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rmm import _lib as librmm
from rmm import python as pylibrmm
from rmm._cuda.stream import Stream

try:
Expand All @@ -34,7 +34,7 @@ def rmm_cupy_allocator(nbytes):
raise ModuleNotFoundError("No module named 'cupy'")

stream = Stream(obj=cupy.cuda.get_current_stream())
buf = librmm.device_buffer.DeviceBuffer(size=nbytes, stream=stream)
buf = pylibrmm.device_buffer.DeviceBuffer(size=nbytes, stream=stream)
dev_id = -1 if buf.ptr else cupy.cuda.device.get_device_id()
mem = cupy.cuda.UnownedMemory(
ptr=buf.ptr, size=buf.size, owner=buf, device_id=dev_id
Expand Down
6 changes: 3 additions & 3 deletions python/rmm/rmm/allocators/numba.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2023, NVIDIA CORPORATION.
# Copyright (c) 2023-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -19,7 +19,7 @@
from numba import config, cuda
from numba.cuda import HostOnlyCUDAMemoryManager, IpcHandle, MemoryPointer

from rmm import _lib as librmm
from rmm import python as pylibrmm


def _make_emm_plugin_finalizer(handle, allocations):
Expand Down Expand Up @@ -70,7 +70,7 @@ def memalloc(self, size):
"""
Allocate an on-device array from the RMM pool.
"""
buf = librmm.DeviceBuffer(size=size)
buf = pylibrmm.DeviceBuffer(size=size)
ctx = self.context

if config.CUDA_USE_NVIDIA_BINDING:
Expand Down
8 changes: 4 additions & 4 deletions python/rmm/rmm/allocators/torch.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2023, NVIDIA CORPORATION.
# Copyright (c) 2023-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -28,10 +28,10 @@
# allocator .so relative to the current file because the current file
# is pure Python and will therefore be in the source directory.
# Instead, we search relative to an arbitrary file in the compiled
# package. We use the _lib.lib module because it is small.
from rmm._lib import lib
# package. We use the cpp._logger module because it is small.
from rmm.cpp import _logger

sofile = pathlib.Path(lib.__file__).parent / "_torch_allocator.so"
sofile = pathlib.Path(_logger.__file__).parent / "_torch_allocator.so"
rmm_torch_allocator = CUDAPluggableAllocator(
str(sofile.absolute()),
alloc_fn_name="allocate",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,7 @@
# the License.
# =============================================================================

set(cython_sources device_buffer.pyx lib.pyx logger.pyx memory_resource.pyx cuda_stream.pyx
helper.pyx)
set(cython_sources _logger.pyx)
set(linked_libraries rmm::rmm)

# Build all of the Cython targets
Expand Down
File renamed without changes.
File renamed without changes.
66 changes: 66 additions & 0 deletions python/rmm/rmm/cpp/_logger.pxd
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
# Copyright (c) 2023-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from libcpp cimport bool


cdef extern from "spdlog/common.h" namespace "spdlog::level" nogil:
cpdef enum logging_level "spdlog::level::level_enum":
"""
The debug logging level for RMM.

Debug logging prints messages to a log file. See
`Debug Logging <https://github.com/rapidsai/rmm#debug-logging>`_
for more information.

Valid levels, in decreasing order of verbosity, are TRACE, DEBUG,
INFO, WARN, ERR, CRITICAL, and OFF. Default is INFO.

Examples
--------
>>> import rmm
>>> rmm.logging_level.DEBUG
<logging_level.DEBUG: 1>
>>> rmm.logging_level.DEBUG.value
1
>>> rmm.logging_level.DEBUG.name
'DEBUG'

See Also
--------
set_logging_level : Set the debug logging level
get_logging_level : Get the current debug logging level
"""
TRACE "spdlog::level::trace"
DEBUG "spdlog::level::debug"
INFO "spdlog::level::info"
WARN "spdlog::level::warn"
ERR "spdlog::level::err"
CRITICAL "spdlog::level::critical"
OFF "spdlog::level::off"


cdef extern from "spdlog/spdlog.h" namespace "spdlog" nogil:
cdef cppclass spdlog_logger "spdlog::logger":
spdlog_logger() except +
void set_level(logging_level level)
logging_level level()
void flush() except +
void flush_on(logging_level level)
logging_level flush_level()
bool should_log(logging_level msg_level)


cdef extern from "rmm/logger.hpp" namespace "rmm" nogil:
cdef spdlog_logger& logger() except +
15 changes: 15 additions & 0 deletions python/rmm/rmm/cpp/_logger.pyx
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# Copyright (c) 2023-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from ._logger cimport logging_level # no-cython-lint
wence- marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2020, NVIDIA CORPORATION.
# Copyright (c) 2020-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -12,12 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.

cimport cython
from cuda.ccudart cimport cudaStream_t
from libcpp cimport bool
from libcpp.memory cimport unique_ptr

from rmm._lib.cuda_stream_view cimport cuda_stream_view
from rmm.cpp.cuda_stream_view cimport cuda_stream_view


cdef extern from "rmm/cuda_stream.hpp" namespace "rmm" nogil:
Expand All @@ -28,10 +26,3 @@ cdef extern from "rmm/cuda_stream.hpp" namespace "rmm" nogil:
cuda_stream_view view() except +
void synchronize() except +
void synchronize_no_throw()


@cython.final
cdef class CudaStream:
cdef unique_ptr[cuda_stream] c_obj
cdef cudaStream_t value(self) except * nogil
cdef bool is_valid(self) except * nogil
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2021, NVIDIA CORPORATION.
# Copyright (c) 2021-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -12,9 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

cimport cython

from rmm._lib.cuda_stream_view cimport cuda_stream_view
from rmm.cpp.cuda_stream_view cimport cuda_stream_view


cdef extern from "rmm/cuda_stream_pool.hpp" namespace "rmm" nogil:
Expand Down
58 changes: 58 additions & 0 deletions python/rmm/rmm/cpp/device_buffer.pxd
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
# Copyright (c) 2019-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from rmm.cpp.cuda_stream_view cimport cuda_stream_view
from rmm.cpp.memory_resource cimport device_memory_resource


cdef extern from "rmm/mr/device/per_device_resource.hpp" namespace "rmm" nogil:
cdef cppclass cuda_device_id:
ctypedef int value_type
cuda_device_id()
cuda_device_id(value_type id)
value_type value()

cdef cuda_device_id get_current_cuda_device()

cdef extern from "rmm/prefetch.hpp" namespace "rmm" nogil:
cdef void prefetch(const void* ptr,
size_t bytes,
cuda_device_id device,
cuda_stream_view stream) except +

cdef extern from "rmm/device_buffer.hpp" namespace "rmm" nogil:
cdef cppclass device_buffer:
device_buffer()
device_buffer(
size_t size,
cuda_stream_view stream,
device_memory_resource *
) except +
device_buffer(
const void* source_data,
size_t size,
cuda_stream_view stream,
device_memory_resource *
) except +
device_buffer(
const device_buffer buf,
cuda_stream_view stream,
device_memory_resource *
) except +
void reserve(size_t new_capacity, cuda_stream_view stream) except +
void resize(size_t new_size, cuda_stream_view stream) except +
void shrink_to_fit(cuda_stream_view stream) except +
void* data()
size_t size()
size_t capacity()
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2021, NVIDIA CORPORATION.
# Copyright (c) 2021-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from rmm._lib.cuda_stream_view cimport cuda_stream_view
from rmm._lib.device_buffer cimport device_buffer
from rmm._lib.memory_resource cimport device_memory_resource
from rmm.cpp.cuda_stream_view cimport cuda_stream_view
from rmm.cpp.device_buffer cimport device_buffer
from rmm.cpp.memory_resource cimport device_memory_resource


cdef extern from "rmm/device_buffer.hpp" namespace "rmm" nogil:
Expand Down
Loading
Loading