Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Removing cuda stream view include from mdarray #1429

Merged
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions cpp/include/raft/core/mdarray.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
#include <raft/core/mdspan.hpp>
#include <raft/core/mdspan_types.hpp>
#include <raft/core/memory_type.hpp>
#include <rmm/cuda_stream_view.hpp>

namespace raft {
/**
Expand All @@ -45,11 +44,11 @@ namespace raft {
template <typename Base>
class array_interface {
/**
* @brief Get a mdspan that can be passed down to CUDA kernels.
* @brief Get an mdspan
*/
auto view() noexcept { return static_cast<Base*>(this)->view(); }
/**
* @brief Get a mdspan that can be passed down to CUDA kernels.
* @brief Get an mdspan<const T>
*/
auto view() const noexcept { return static_cast<Base*>(this)->view(); }
};
Expand Down Expand Up @@ -108,7 +107,8 @@ inline constexpr bool is_array_interface_v = is_array_interface<Tn...>::value;
* template.
*
* - Most of the constructors from the reference implementation is removed to make sure
* CUDA stream is honorred.
* CUDA stream is honored. Note that this class is not coupled to CUDA and therefore
* will only be used in the case where the device variant is used.
*
* - unique_size is not implemented, which is still working in progress in the proposal
*
Expand Down Expand Up @@ -220,11 +220,11 @@ class mdarray
#undef RAFT_MDARRAY_CTOR_CONSTEXPR

/**
* @brief Get a mdspan that can be passed down to CUDA kernels.
* @brief Get an mdspan
*/
auto view() noexcept { return view_type(c_.data(), map_, cp_.make_accessor_policy()); }
/**
* @brief Get a mdspan that can be passed down to CUDA kernels.
* @brief Get an mdspan<const T>
*/
auto view() const noexcept
{
Expand Down