Skip to content

Commit

Permalink
Move init doc to class with a cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
vzhurba01 committed Nov 4, 2024
1 parent cc3b2fd commit f3e7469
Show file tree
Hide file tree
Showing 7 changed files with 72 additions and 94 deletions.
31 changes: 14 additions & 17 deletions cuda_core/cuda/core/experimental/_device.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@


class Device:
"""Represents a GPU and acts as an entry point for cuda.core features.
"""Represent a GPU and act as an entry point for cuda.core features.
This is a singleton object that helps ensure interoperability
across multiple libraries imported in the process to both see
Expand All @@ -30,26 +30,23 @@ class Device:
resource created through this device, will continue to refer to
this device's context.
"""
__slots__ = ("_id", "_mr", "_has_inited")

def __new__(cls, device_id=None):
"""Create and return a singleton :obj:`Device` object.
Newly returend :obj:`Device` object are is a thread-local singleton
for a specified device.
Creates and returns a thread-local singleton :obj:`Device` object
corresponding to a specific device.
Note
----
Will not initialize the GPU.
Note
----
Will not initialize the GPU.
Parameters
----------
device_id : int, optional
Device ordinal to return a :obj:`Device` object for.
Default value of `None` return the currently used device.
Parameters
----------
device_id : int, optional
Device ordinal to return a :obj:`Device` object for.
Default value of `None` return the currently used device.
"""
__slots__ = ("_id", "_mr", "_has_inited")

"""
def __new__(cls, device_id=None):
# important: creating a Device instance does not initialize the GPU!
if device_id is None:
device_id = handle_return(cudart.cudaGetDevice())
Expand Down
10 changes: 4 additions & 6 deletions cuda_core/cuda/core/experimental/_event.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ class EventOptions:


class Event:
"""Represents a record of a specific point of execution within a CUDA stream.
"""Represent a record at a specific point of execution within a CUDA stream.
Applications can asynchronously record events at any point in
the program. An event keeps a record of all previous work within
Expand All @@ -46,15 +46,13 @@ class Event:
of work up to event's record, and help establish dependencies
between GPU work submissions.
Directly creating an :obj:`Event` is not supported due to ambiguity,
and they should instead be created through a :obj:`Stream` object.
"""
__slots__ = ("_handle", "_timing_disabled", "_busy_waited")

def __init__(self):
"""Unsupported function due to ambiguity.
New events should instead be created through a :obj:`Stream` object.
"""
self._handle = None
raise NotImplementedError(
"directly creating an Event object can be ambiguous. Please call "
Expand Down
2 changes: 2 additions & 0 deletions cuda_core/cuda/core/experimental/_launcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,8 @@ def launch(kernel, config, *kernel_args):
Parameters
----------
kernel : :obj:`Kernel`
Kernel to launch.
config : Any
Launch configurations inline with options provided by
:obj:`LaunchConfig` dataclass.
Expand Down
23 changes: 10 additions & 13 deletions cuda_core/cuda/core/experimental/_memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@


class Buffer:
"""Represents a handle to allocated memory.
"""Represent a handle to allocated memory.
This generic object provides a unified representation for how
different memory resources are to give access to their memory
Expand All @@ -32,24 +32,21 @@ class Buffer:
establishing both the DLPack and the Python-level buffer
protocols.
Parameters
----------
ptr : Any
Allocated buffer handle object
size : Any
Memory size of the buffer
mr : :obj:`MemoryResource`, optional
Memory resource associated with the buffer
"""

# TODO: handle ownership? (_mr could be None)
__slots__ = ("_ptr", "_size", "_mr",)

def __init__(self, ptr, size, mr: MemoryResource=None):
"""Initialize a new buffer object.
Parameters
----------
ptr : Any
Allocated buffer handle object
size : Any
Memory size of the buffer
mr : :obj:`MemoryResource`, optional
Memory resource associated with the buffer
"""
self._ptr = ptr
self._size = size
self._mr = mr
Expand Down
62 changes: 28 additions & 34 deletions cuda_core/cuda/core/experimental/_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,17 +46,19 @@ def _lazy_init():


class Kernel:
"""Represents a compiled kernel that had been loaded onto the device.
"""Represent a compiled kernel that had been loaded onto the device.
Kernel instances can execution when passed directly into a
launch function.
Directly creating a :obj:`Kernel` is not supported, and they
should instead be created through a :obj:`ObjectCode` object.
"""

__slots__ = ("_handle", "_module",)

def __init__(self):
"""Unsupported function whose creation is intended through an :obj:`ObjectCode` object."""
raise NotImplementedError("directly constructing a Kernel instance is not supported")

@staticmethod
Expand All @@ -72,49 +74,41 @@ def _from_obj(obj, mod):


class ObjectCode:
"""Represents the compiled program loaded onto the device.
"""Represent a compiled program that was loaded onto the device.
This object provides a unified interface for different types of
compiled programs that are loaded onto the device.
Loads the module library with specified module code and JIT options.
Note
----
Usage under CUDA 11.x will only load to the current device
context.
Parameters
----------
module : Union[bytes, str]
Either a bytes object containing the module to load, or
a file path string containing that module for loading.
code_type : Any
String of the compiled type.
Supported options are "ptx", "cubin" and "fatbin".
jit_options : Optional
Mapping of JIT options to use during module loading.
(Default to no options)
symbol_mapping : Optional
Keyword argument dictionary specifying how symbol names
should be mapped before trying to retrieve them.
(Default to no mappings)
"""

__slots__ = ("_handle", "_code_type", "_module", "_loader", "_sym_map")
_supported_code_type = ("cubin", "ptx", "fatbin")

def __init__(self, module, code_type, jit_options=None, *,
symbol_mapping=None):
"""Create and return a compiled program as an instance of an :obj:`ObjectCode`.
Loads the module library with specified module code and JIT options.
Note
----
Usage under CUDA 11.x will only load to the current device
context.
Parameters
----------
module : Union[bytes, str]
Either a bytes object containing the module to load, or
a file path string containing that module for loading.
code_type : Any
String of the compiled type.
Supported options are "ptx", "cubin" and "fatbin".
jit_options : Optional
Mapping of JIT options to use during module loading.
(Default to no options)
symbol_mapping : Optional
Keyword argument dictionary specifying how symbol names
should be mapped before trying to retrieve them.
(Default to no mappings)
Returns
-------
:obj:`ObjectCode`
Newly created :obj:`ObjectCode`.
"""
if code_type not in self._supported_code_type:
raise ValueError
_lazy_init()
Expand Down
24 changes: 8 additions & 16 deletions cuda_core/cuda/core/experimental/_program.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,34 +8,26 @@


class Program:
"""Represents the compilation machinery for processing programs into :obj:`ObjectCode`.
"""Represent a compilation machinery to process programs into :obj:`ObjectCode`.
This object provides a unified interface to multiple underlying
compiler libraries. Compilation support is enabled for a wide
range of code types and compilation types.
Parameters
----------
code : Any
String of the CUDA Runtime Compilation program.
code_type : Any
String of the code type. Only "c++" is currently supported.
"""

__slots__ = ("_handle", "_backend", )
_supported_code_type = ("c++", )
_supported_target_type = ("ptx", "cubin", "ltoir", )

def __init__(self, code, code_type):
"""Create an instance of a :obj:`Program` object.
Parameters
----------
code : Any
String of the CUDA Runtime Compilation program.
code_type : Any
String of the code type. Only "c++" is currently supported.
Returns
-------
:obj:`Program`
Newly created program object.
"""
self._handle = None
if code_type not in self._supported_code_type:
raise NotImplementedError
Expand Down
14 changes: 6 additions & 8 deletions cuda_core/cuda/core/experimental/_stream.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ class StreamOptions:


class Stream:
"""Represents a queue of GPU operations that are executed in a specific order.
"""Represent a queue of GPU operations that are executed in a specific order.
Applications use streams to control the order of execution for
GPU work. Work within a single stream are executed sequentially.
Expand All @@ -46,19 +46,17 @@ class Stream:
Advanced users can utilize default streams for enforce complex
implicit synchronization behaviors.
Directly creating a :obj:`Stream` is not supported due to ambiguity.
New streams should instead be created through a :obj:`Device`
object, or created directly through using an existing handle
using Stream.from_handle().
"""

__slots__ = ("_handle", "_nonblocking", "_priority", "_owner", "_builtin",
"_device_id", "_ctx_handle")

def __init__(self):
"""Unsupported function due to ambiguity.
New streams should instead be created through a :obj:`Device`
object, or created directly through using an existing handle
using Stream.from_handle()
"""
# minimal requirements for the destructor
self._handle = None
self._owner = None
Expand Down

0 comments on commit f3e7469

Please sign in to comment.