Skip to content

Commit

Permalink
Enable indexalator for device code (#14206)
Browse files Browse the repository at this point in the history
Enables indexalator to be instantiated from device code.
Also add gtests for the output indexalator.
This change helps enable for the offset-normalizing-iterator #14234

Authors:
  - David Wendt (https://github.com/davidwendt)

Approvers:
  - Bradley Dice (https://github.com/bdice)
  - Yunsong Wang (https://github.com/PointKernel)

URL: #14206
  • Loading branch information
davidwendt authored Oct 17, 2023
1 parent c47546e commit 5f05c18
Show file tree
Hide file tree
Showing 2 changed files with 131 additions and 9 deletions.
40 changes: 31 additions & 9 deletions cpp/include/cudf/detail/normalizing_iterator.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ namespace detail {
*/
template <class Derived, typename Integer>
struct base_normalator {
static_assert(std::is_integral_v<Integer>);
static_assert(cudf::is_index_type<Integer>());
using difference_type = std::ptrdiff_t;
using value_type = Integer;
using pointer = Integer*;
Expand Down Expand Up @@ -202,13 +202,34 @@ struct base_normalator {
return static_cast<Derived const&>(*this).p_ >= rhs.p_;
}

private:
struct integer_sizeof_fn {
template <typename T, std::enable_if_t<not cudf::is_index_type<T>()>* = nullptr>
CUDF_HOST_DEVICE constexpr std::size_t operator()() const
{
#ifndef __CUDA_ARCH__
CUDF_FAIL("only integral types are supported");
#else
CUDF_UNREACHABLE("only integral types are supported");
#endif
}
template <typename T, std::enable_if_t<cudf::is_index_type<T>()>* = nullptr>
CUDF_HOST_DEVICE constexpr std::size_t operator()() const noexcept
{
return sizeof(T);
}
};

protected:
/**
* @brief Constructor assigns width and type member variables for base class.
*/
explicit base_normalator(data_type dtype) : width_(size_of(dtype)), dtype_(dtype) {}
explicit CUDF_HOST_DEVICE base_normalator(data_type dtype) : dtype_(dtype)
{
width_ = static_cast<int32_t>(type_dispatcher(dtype, integer_sizeof_fn{}));
}

int width_; /// integer type width = 1,2,4, or 8
int32_t width_; /// integer type width = 1,2,4, or 8
data_type dtype_; /// for type-dispatcher calls
};

Expand Down Expand Up @@ -244,12 +265,12 @@ struct input_normalator : base_normalator<input_normalator<Integer>, Integer> {
* @brief Dispatch functor for resolving a Integer value from any integer type
*/
struct normalize_type {
template <typename T, std::enable_if_t<cuda::std::is_integral_v<T>>* = nullptr>
template <typename T, std::enable_if_t<cudf::is_index_type<T>()>* = nullptr>
__device__ Integer operator()(void const* tp)
{
return static_cast<Integer>(*static_cast<T const*>(tp));
}
template <typename T, std::enable_if_t<not cuda::std::is_integral_v<T>>* = nullptr>
template <typename T, std::enable_if_t<not cudf::is_index_type<T>()>* = nullptr>
__device__ Integer operator()(void const*)
{
CUDF_UNREACHABLE("only integral types are supported");
Expand All @@ -274,9 +295,10 @@ struct input_normalator : base_normalator<input_normalator<Integer>, Integer> {
* @param data Pointer to an integer array in device memory.
* @param data_type Type of data in data
*/
input_normalator(void const* data, data_type dtype)
CUDF_HOST_DEVICE input_normalator(void const* data, data_type dtype, cudf::size_type offset = 0)
: base_normalator<input_normalator<Integer>, Integer>(dtype), p_{static_cast<char const*>(data)}
{
p_ += offset * this->width_;
}

char const* p_; /// pointer to the integer data in device memory
Expand Down Expand Up @@ -327,12 +349,12 @@ struct output_normalator : base_normalator<output_normalator<Integer>, Integer>
* @brief Dispatch functor for setting the index value from a size_type value.
*/
struct normalize_type {
template <typename T, std::enable_if_t<std::is_integral_v<T>>* = nullptr>
template <typename T, std::enable_if_t<cudf::is_index_type<T>()>* = nullptr>
__device__ void operator()(void* tp, Integer const value)
{
(*static_cast<T*>(tp)) = static_cast<T>(value);
}
template <typename T, std::enable_if_t<not std::is_integral_v<T>>* = nullptr>
template <typename T, std::enable_if_t<not cudf::is_index_type<T>()>* = nullptr>
__device__ void operator()(void*, Integer const)
{
CUDF_UNREACHABLE("only index types are supported");
Expand All @@ -355,7 +377,7 @@ struct output_normalator : base_normalator<output_normalator<Integer>, Integer>
* @param data Pointer to an integer array in device memory.
* @param data_type Type of data in data
*/
output_normalator(void* data, data_type dtype)
CUDF_HOST_DEVICE output_normalator(void* data, data_type dtype)
: base_normalator<output_normalator<Integer>, Integer>(dtype), p_{static_cast<char*>(data)}
{
}
Expand Down
100 changes: 100 additions & 0 deletions cpp/tests/iterator/indexalator_test.cu
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,13 @@

#include <cudf/detail/indexalator.cuh>

#include <thrust/binary_search.h>
#include <thrust/gather.h>
#include <thrust/host_vector.h>
#include <thrust/optional.h>
#include <thrust/pair.h>
#include <thrust/scatter.h>
#include <thrust/sequence.h>

using TestingTypes = cudf::test::IntegralTypesNotBool;

Expand Down Expand Up @@ -94,3 +98,99 @@ TYPED_TEST(IndexalatorTest, optional_iterator)
auto it_dev = cudf::detail::indexalator_factory::make_input_optional_iterator(d_col);
this->iterator_test_thrust(expected_values, it_dev, host_values.size());
}

template <typename Integer>
struct transform_fn {
__device__ cudf::size_type operator()(Integer v)
{
return static_cast<cudf::size_type>(v) + static_cast<cudf::size_type>(v);
}
};

TYPED_TEST(IndexalatorTest, output_iterator)
{
using T = TypeParam;

auto d_col1 =
cudf::test::fixed_width_column_wrapper<T, int32_t>({0, 6, 7, 14, 23, 33, 43, 45, 63});
auto d_col2 =
cudf::test::fixed_width_column_wrapper<cudf::size_type>({0, 0, 0, 0, 0, 0, 0, 0, 0});
auto itr = cudf::detail::indexalator_factory::make_output_iterator(d_col2);
auto input = cudf::column_view(d_col1);
auto stream = cudf::get_default_stream();

auto map = cudf::test::fixed_width_column_wrapper<int>({0, 2, 4, 6, 8, 1, 3, 5, 7});
auto d_map = cudf::column_view(map);
thrust::gather(
rmm::exec_policy_nosync(stream), d_map.begin<int>(), d_map.end<int>(), input.begin<T>(), itr);
auto expected =
cudf::test::fixed_width_column_wrapper<cudf::size_type>({0, 7, 23, 43, 63, 6, 14, 33, 45});
thrust::scatter(
rmm::exec_policy_nosync(stream), input.begin<T>(), input.end<T>(), d_map.begin<int>(), itr);
expected =
cudf::test::fixed_width_column_wrapper<cudf::size_type>({0, 33, 6, 43, 7, 45, 14, 63, 23});

thrust::transform(
rmm::exec_policy(stream), input.begin<T>(), input.end<T>(), itr, transform_fn<T>{});
expected =
cudf::test::fixed_width_column_wrapper<cudf::size_type>({0, 12, 14, 28, 46, 66, 86, 90, 126});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(d_col2, expected);

thrust::fill(rmm::exec_policy(stream), itr, itr + input.size(), 77);
expected =
cudf::test::fixed_width_column_wrapper<cudf::size_type>({77, 77, 77, 77, 77, 77, 77, 77, 77});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(d_col2, expected);

thrust::sequence(rmm::exec_policy(stream), itr, itr + input.size());
expected = cudf::test::fixed_width_column_wrapper<cudf::size_type>({0, 1, 2, 3, 4, 5, 6, 7, 8});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(d_col2, expected);

auto indices =
cudf::test::fixed_width_column_wrapper<T, int32_t>({0, 10, 20, 30, 40, 50, 60, 70, 80});
auto d_indices = cudf::column_view(indices);
thrust::lower_bound(rmm::exec_policy(stream),
d_indices.begin<T>(),
d_indices.end<T>(),
input.begin<T>(),
input.end<T>(),
itr);
expected = cudf::test::fixed_width_column_wrapper<cudf::size_type>({0, 1, 1, 2, 3, 4, 5, 5, 7});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(d_col2, expected);
}

/**
* For testing creating and using the indexalator in device code.
*/
struct device_functor_fn {
cudf::column_device_view const d_col;
__device__ cudf::size_type operator()(cudf::size_type idx)
{
auto itr = cudf::detail::input_indexalator(d_col.head(), d_col.type());
return itr[idx] * 3;
}
};

TYPED_TEST(IndexalatorTest, device_indexalator)
{
using T = TypeParam;

auto d_col1 =
cudf::test::fixed_width_column_wrapper<T, int32_t>({0, 6, 7, 14, 23, 33, 43, 45, 63});
auto d_col2 =
cudf::test::fixed_width_column_wrapper<cudf::size_type>({0, 0, 0, 0, 0, 0, 0, 0, 0});
auto input = cudf::column_view(d_col1);
auto output = cudf::mutable_column_view(d_col2);
auto stream = cudf::get_default_stream();

auto d_input = cudf::column_device_view::create(input, stream);

thrust::transform(rmm::exec_policy(stream),
thrust::counting_iterator<int>(0),
thrust::counting_iterator<int>(input.size()),
output.begin<cudf::size_type>(),
device_functor_fn{*d_input});

auto expected =
cudf::test::fixed_width_column_wrapper<cudf::size_type>({0, 18, 21, 42, 69, 99, 129, 135, 189});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(d_col2, expected);
}

0 comments on commit 5f05c18

Please sign in to comment.