Skip to content

Commit

Permalink
[CUDAX] make uninitialized_buffer usable with launch (#2342)
Browse files Browse the repository at this point in the history
* make `cudax::uninitialized_buffer` usable with `cudax::launch`

* test passing a `const` `uninitialized_buffer` to `launch`
  • Loading branch information
ericniebler authored Sep 3, 2024
1 parent 498251c commit c6b777b
Show file tree
Hide file tree
Showing 2 changed files with 62 additions and 0 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
#include <cuda/std/__new/launder.h>
#include <cuda/std/__utility/move.h>
#include <cuda/std/__utility/swap.h>
#include <cuda/std/span>

#include <cuda/experimental/__memory_resource/any_resource.cuh>

Expand Down Expand Up @@ -82,6 +83,26 @@ private:
reinterpret_cast<_Tp*>(_CUDA_VSTD::align(__alignment, __count_ * sizeof(_Tp), __ptr, __space)));
}

//! @brief Causes the buffer to be treated as a span when passed to cudax::launch.
//! @pre The buffer must have the cuda::mr::device_accessible property.
_CCCL_NODISCARD_FRIEND _CUDA_VSTD::span<_Tp>
__cudax_launch_transform(stream_ref, uninitialized_buffer& __self) noexcept
{
static_assert(_CUDA_VSTD::_One_of<_CUDA_VMR::device_accessible, _Properties...>,
"The buffer must be device accessible to be passed to `launch`");
return {__self.__get_data(), __self.size()};
}

//! @brief Causes the buffer to be treated as a span when passed to cudax::launch
//! @pre The buffer must have the cuda::mr::device_accessible property.
_CCCL_NODISCARD_FRIEND _CUDA_VSTD::span<const _Tp>
__cudax_launch_transform(stream_ref, const uninitialized_buffer& __self) noexcept
{
static_assert(_CUDA_VSTD::_One_of<_CUDA_VMR::device_accessible, _Properties...>,
"The buffer must be device accessible to be passed to `launch`");
return {__self.__get_data(), __self.size()};
}

public:
using value_type = _Tp;
using reference = _Tp&;
Expand Down
41 changes: 41 additions & 0 deletions cudax/test/containers/uninitialized_buffer.cu
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@
#include <cuda/std/utility>

#include <cuda/experimental/buffer.cuh>
#include <cuda/experimental/launch.cuh>
#include <cuda/experimental/stream.cuh>

#include "testing.cuh"

Expand Down Expand Up @@ -158,3 +160,42 @@ TEMPLATE_TEST_CASE(
}
}
}

__global__ void kernel(_CUDA_VSTD::span<int> data)
{
// Touch the memory to be sure it's accessible
CUDAX_CHECK(data.size() == 1024);
data[0] = 42;
}

__global__ void const_kernel(_CUDA_VSTD::span<const int> data)
{
// Touch the memory to be sure it's accessible
CUDAX_CHECK(data.size() == 1024);
}

TEST_CASE("uninitialized_buffer is usable with cudax::launch", "[container]")
{
SECTION("non-const")
{
const int grid_size = 4;
cudax::uninitialized_buffer<int, ::cuda::mr::device_accessible> buffer{cuda::mr::device_memory_resource{}, 1024};
auto dimensions = cudax::make_hierarchy(cudax::grid_dims(grid_size), cudax::block_dims<256>());

cudax::stream stream;

cudax::launch(stream, dimensions, kernel, buffer);
}

SECTION("const")
{
const int grid_size = 4;
const cudax::uninitialized_buffer<int, ::cuda::mr::device_accessible> buffer{
cuda::mr::device_memory_resource{}, 1024};
auto dimensions = cudax::make_hierarchy(cudax::grid_dims(grid_size), cudax::block_dims<256>());

cudax::stream stream;

cudax::launch(stream, dimensions, const_kernel, buffer);
}
}

0 comments on commit c6b777b

Please sign in to comment.