Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use east const in include files #13494

Merged
merged 8 commits into from
Jun 14, 2023
4 changes: 2 additions & 2 deletions cpp/include/cudf/column/column_device_view.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -442,8 +442,8 @@ class alignas(16) column_device_view : public detail::column_device_view_base {
__device__ T element(size_type element_index) const noexcept
{
size_type index = element_index + offset(); // account for this view's _offset
const auto* d_offsets = d_children[strings_column_view::offsets_column_index].data<int32_t>();
const char* d_strings = d_children[strings_column_view::chars_column_index].data<char>();
auto const* d_offsets = d_children[strings_column_view::offsets_column_index].data<int32_t>();
char const* d_strings = d_children[strings_column_view::chars_column_index].data<char>();
size_type offset = d_offsets[index];
return string_view{d_strings + offset, d_offsets[index + 1] - offset};
}
Expand Down
2 changes: 1 addition & 1 deletion cpp/include/cudf/column/column_factories.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -374,7 +374,7 @@ std::unique_ptr<column> make_fixed_width_column(
* @return Constructed strings column
*/
std::unique_ptr<column> make_strings_column(
cudf::device_span<thrust::pair<const char*, size_type> const> strings,
cudf::device_span<thrust::pair<char const*, size_type> const> strings,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());

Expand Down
4 changes: 2 additions & 2 deletions cpp/include/cudf/copying.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ std::unique_ptr<table> scatter(
* @return Result of scattering values from source to target
*/
std::unique_ptr<table> scatter(
std::vector<std::reference_wrapper<const scalar>> const& source,
std::vector<std::reference_wrapper<scalar const>> const& source,
column_view const& indices,
table_view const& target,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
Expand Down Expand Up @@ -716,7 +716,7 @@ std::unique_ptr<table> boolean_mask_scatter(
* @returns Returns a table by scattering `input` into `target` as per `boolean_mask`
*/
std::unique_ptr<table> boolean_mask_scatter(
std::vector<std::reference_wrapper<const scalar>> const& input,
std::vector<std::reference_wrapper<scalar const>> const& input,
table_view const& target,
column_view const& boolean_mask,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
Expand Down
2 changes: 1 addition & 1 deletion cpp/include/cudf/detail/aggregation/result_cache.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ class result_cache {
result_cache() = delete;
~result_cache() = default;
result_cache(result_cache const&) = delete;
result_cache& operator=(const result_cache& other) = delete;
result_cache& operator=(result_cache const& other) = delete;

result_cache(size_t num_columns) : _cache(num_columns) {}

Expand Down
8 changes: 4 additions & 4 deletions cpp/include/cudf/detail/copy_if.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -161,10 +161,10 @@ __launch_bounds__(block_size) __global__

constexpr int num_warps = block_size / cudf::detail::warp_size;
// account for partial blocks with non-warp-aligned offsets
const int last_index = tmp_block_sum + (block_offset % cudf::detail::warp_size) - 1;
const int last_warp = min(num_warps, last_index / cudf::detail::warp_size);
const int wid = threadIdx.x / cudf::detail::warp_size;
const int lane = threadIdx.x % cudf::detail::warp_size;
int const last_index = tmp_block_sum + (block_offset % cudf::detail::warp_size) - 1;
int const last_warp = min(num_warps, last_index / cudf::detail::warp_size);
int const wid = threadIdx.x / cudf::detail::warp_size;
int const lane = threadIdx.x % cudf::detail::warp_size;

cudf::size_type tmp_warp_valid_counts{0};

Expand Down
4 changes: 2 additions & 2 deletions cpp/include/cudf/detail/copy_if_else.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ __launch_bounds__(block_size) __global__
size_type* __restrict__ const valid_count)
{
const size_type tid = threadIdx.x + blockIdx.x * block_size;
const int warp_id = tid / warp_size;
int const warp_id = tid / warp_size;
const size_type warps_per_grid = gridDim.x * block_size / warp_size;

// begin/end indices for the column data
Expand All @@ -59,7 +59,7 @@ __launch_bounds__(block_size) __global__

// lane id within the current warp
constexpr size_type leader_lane{0};
const int lane_id = threadIdx.x % warp_size;
int const lane_id = threadIdx.x % warp_size;

size_type warp_valid_count{0};

Expand Down
12 changes: 6 additions & 6 deletions cpp/include/cudf/detail/copy_range.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -54,10 +54,10 @@ __global__ void copy_range_kernel(SourceValueIterator source_value_begin,
"copy_range_kernel assumes bitmask element size in bits == warp size");

constexpr cudf::size_type leader_lane{0};
const int lane_id = threadIdx.x % warp_size;
int const lane_id = threadIdx.x % warp_size;

const cudf::size_type tid = threadIdx.x + blockIdx.x * blockDim.x;
const int warp_id = tid / warp_size;
int const warp_id = tid / warp_size;

const cudf::size_type offset = target.offset();
const cudf::size_type begin_mask_idx = cudf::word_index(offset + target_begin);
Expand All @@ -79,10 +79,10 @@ __global__ void copy_range_kernel(SourceValueIterator source_value_begin,
if (in_range) target.element<T>(index) = *(source_value_begin + source_idx);

if (has_validity) { // update bitmask
const bool valid = in_range && *(source_validity_begin + source_idx);
const int active_mask = __ballot_sync(0xFFFF'FFFFu, in_range);
const int valid_mask = __ballot_sync(0xFFFF'FFFFu, valid);
const int warp_mask = active_mask & valid_mask;
bool const valid = in_range && *(source_validity_begin + source_idx);
int const active_mask = __ballot_sync(0xFFFF'FFFFu, in_range);
int const valid_mask = __ballot_sync(0xFFFF'FFFFu, valid);
int const warp_mask = active_mask & valid_mask;

cudf::bitmask_type old_mask = target.get_mask_word(mask_idx);
if (lane_id == leader_lane) {
Expand Down
6 changes: 3 additions & 3 deletions cpp/include/cudf/detail/null_mask.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ __global__ void subtract_set_bits_range_boundaries_kernel(bitmask_type const* bi
*/
struct bit_to_word_index {
bit_to_word_index(bool inclusive) : inclusive(inclusive) {}
__device__ inline size_type operator()(const size_type& bit_index) const
__device__ inline size_type operator()(size_type const& bit_index) const
{
return word_index(bit_index) + ((inclusive || intra_word_index(bit_index) == 0) ? 0 : 1);
}
Expand Down Expand Up @@ -379,13 +379,13 @@ size_type validate_segmented_indices(IndexIterator indices_begin, IndexIterator
}

struct index_alternator {
__device__ inline size_type operator()(const size_type& i) const
__device__ inline size_type operator()(size_type const& i) const
{
return *(d_indices + 2 * i + (is_end ? 1 : 0));
}

bool const is_end = false;
const size_type* d_indices;
size_type const* d_indices;
};

/**
Expand Down
4 changes: 2 additions & 2 deletions cpp/include/cudf/detail/scatter.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ std::unique_ptr<table> scatter(table_view const& source,
* @param mr Device memory resource used to allocate the returned table's device memory
* @return Result of scattering values from source to target
*/
std::unique_ptr<table> scatter(std::vector<std::reference_wrapper<const scalar>> const& source,
std::unique_ptr<table> scatter(std::vector<std::reference_wrapper<scalar const>> const& source,
column_view const& indices,
table_view const& target,
rmm::cuda_stream_view stream,
Expand Down Expand Up @@ -136,7 +136,7 @@ std::unique_ptr<table> boolean_mask_scatter(table_view const& source,
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<table> boolean_mask_scatter(
std::vector<std::reference_wrapper<const scalar>> const& source,
std::vector<std::reference_wrapper<scalar const>> const& source,
table_view const& target,
column_view const& boolean_mask,
rmm::cuda_stream_view stream,
Expand Down
4 changes: 2 additions & 2 deletions cpp/include/cudf/detail/utilities/cuda.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@ static constexpr size_type warp_size{32};
*/
class grid_1d {
public:
const int num_threads_per_block;
const int num_blocks;
int const num_threads_per_block;
int const num_blocks;
/**
* @param overall_num_elements The number of elements the kernel needs to
* handle/process, in its main, one-dimensional/linear input (e.g. one or more
Expand Down
20 changes: 10 additions & 10 deletions cpp/include/cudf/detail/utilities/device_operators.cuh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -62,7 +62,7 @@ CUDF_HOST_DEVICE inline auto max(LHS const& lhs, RHS const& rhs)
*/
struct DeviceSum {
template <typename T, std::enable_if_t<!cudf::is_timestamp<T>()>* = nullptr>
CUDF_HOST_DEVICE inline auto operator()(const T& lhs, const T& rhs) -> decltype(lhs + rhs)
CUDF_HOST_DEVICE inline auto operator()(T const& lhs, T const& rhs) -> decltype(lhs + rhs)
{
return lhs + rhs;
}
Expand Down Expand Up @@ -93,13 +93,13 @@ struct DeviceSum {
*/
struct DeviceCount {
template <typename T, std::enable_if_t<cudf::is_timestamp<T>()>* = nullptr>
CUDF_HOST_DEVICE inline T operator()(const T& lhs, const T& rhs)
CUDF_HOST_DEVICE inline T operator()(T const& lhs, T const& rhs)
{
return T{DeviceCount{}(lhs.time_since_epoch(), rhs.time_since_epoch())};
}

template <typename T, std::enable_if_t<!cudf::is_timestamp<T>()>* = nullptr>
CUDF_HOST_DEVICE inline T operator()(const T&, const T& rhs)
CUDF_HOST_DEVICE inline T operator()(T const&, T const& rhs)
{
return rhs + T{1};
}
Expand All @@ -116,7 +116,7 @@ struct DeviceCount {
*/
struct DeviceMin {
template <typename T>
CUDF_HOST_DEVICE inline auto operator()(const T& lhs, const T& rhs)
CUDF_HOST_DEVICE inline auto operator()(T const& lhs, T const& rhs)
-> decltype(cudf::detail::min(lhs, rhs))
{
return numeric::detail::min(lhs, rhs);
Expand Down Expand Up @@ -164,7 +164,7 @@ struct DeviceMin {
*/
struct DeviceMax {
template <typename T>
CUDF_HOST_DEVICE inline auto operator()(const T& lhs, const T& rhs)
CUDF_HOST_DEVICE inline auto operator()(T const& lhs, T const& rhs)
-> decltype(cudf::detail::max(lhs, rhs))
{
return numeric::detail::max(lhs, rhs);
Expand Down Expand Up @@ -211,7 +211,7 @@ struct DeviceMax {
*/
struct DeviceProduct {
template <typename T, std::enable_if_t<!cudf::is_timestamp<T>()>* = nullptr>
CUDF_HOST_DEVICE inline auto operator()(const T& lhs, const T& rhs) -> decltype(lhs * rhs)
CUDF_HOST_DEVICE inline auto operator()(T const& lhs, T const& rhs) -> decltype(lhs * rhs)
{
return lhs * rhs;
}
Expand All @@ -235,7 +235,7 @@ struct DeviceProduct {
*/
struct DeviceAnd {
template <typename T, std::enable_if_t<std::is_integral_v<T>>* = nullptr>
CUDF_HOST_DEVICE inline auto operator()(const T& lhs, const T& rhs) -> decltype(lhs & rhs)
CUDF_HOST_DEVICE inline auto operator()(T const& lhs, T const& rhs) -> decltype(lhs & rhs)
{
return (lhs & rhs);
}
Expand All @@ -246,7 +246,7 @@ struct DeviceAnd {
*/
struct DeviceOr {
template <typename T, std::enable_if_t<std::is_integral_v<T>>* = nullptr>
CUDF_HOST_DEVICE inline auto operator()(const T& lhs, const T& rhs) -> decltype(lhs | rhs)
CUDF_HOST_DEVICE inline auto operator()(T const& lhs, T const& rhs) -> decltype(lhs | rhs)
{
return (lhs | rhs);
}
Expand All @@ -257,7 +257,7 @@ struct DeviceOr {
*/
struct DeviceXor {
template <typename T, std::enable_if_t<std::is_integral_v<T>>* = nullptr>
CUDF_HOST_DEVICE inline auto operator()(const T& lhs, const T& rhs) -> decltype(lhs ^ rhs)
CUDF_HOST_DEVICE inline auto operator()(T const& lhs, T const& rhs) -> decltype(lhs ^ rhs)
{
return (lhs ^ rhs);
}
Expand Down
4 changes: 2 additions & 2 deletions cpp/include/cudf/detail/utilities/hash_functions.cuh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2022, NVIDIA CORPORATION.
* Copyright (c) 2017-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -140,7 +140,7 @@ struct hash_circular_buffer {
available_space = space_to_leave;
}

__device__ inline const uint8_t& operator[](int idx) const { return storage[idx]; }
__device__ inline uint8_t const& operator[](int idx) const { return storage[idx]; }
};

// Get a uint8_t pointer to a column element and its size as a pair.
Expand Down
36 changes: 18 additions & 18 deletions cpp/include/cudf/detail/utilities/int_fastdiv.h
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not for this PR but this should be a cuh file instead of c-style h file.

Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Copyright 2014 Maxim Milakov
*
Expand Down Expand Up @@ -58,7 +58,7 @@ class int_fastdiv {

int p;
unsigned int ad, anc, delta, q1, r1, q2, r2, t;
const unsigned two31 = 0x8000'0000u;
unsigned const two31 = 0x8000'0000u;
ad = (d == 0) ? 1 : abs(d);
t = two31 + ((unsigned int)d >> 31);
anc = t - 1 - t % ad;
Expand Down Expand Up @@ -95,11 +95,11 @@ class int_fastdiv {
n_add_sign = 0;
}

__host__ __device__ __forceinline__ friend int operator/(const int divident,
const int_fastdiv& divisor);
__host__ __device__ __forceinline__ friend int operator/(int const divident,
int_fastdiv const& divisor);
};

__host__ __device__ __forceinline__ int operator/(const int n, const int_fastdiv& divisor)
__host__ __device__ __forceinline__ int operator/(int const n, int_fastdiv const& divisor)
{
int q;
#ifdef __CUDA_ARCH__
Expand All @@ -115,61 +115,61 @@ __host__ __device__ __forceinline__ int operator/(const int n, const int_fastdiv
return q;
}

__host__ __device__ __forceinline__ int operator%(const int n, const int_fastdiv& divisor)
__host__ __device__ __forceinline__ int operator%(int const n, int_fastdiv const& divisor)
{
int quotient = n / divisor;
int remainder = n - quotient * divisor;
return remainder;
}

__host__ __device__ __forceinline__ int operator/(const unsigned int n, const int_fastdiv& divisor)
__host__ __device__ __forceinline__ int operator/(unsigned int const n, int_fastdiv const& divisor)
{
return ((int)n) / divisor;
}

__host__ __device__ __forceinline__ int operator%(const unsigned int n, const int_fastdiv& divisor)
__host__ __device__ __forceinline__ int operator%(unsigned int const n, int_fastdiv const& divisor)
{
return ((int)n) % divisor;
}

__host__ __device__ __forceinline__ int operator/(const short n, const int_fastdiv& divisor)
__host__ __device__ __forceinline__ int operator/(short const n, int_fastdiv const& divisor)
{
return ((int)n) / divisor;
}

__host__ __device__ __forceinline__ int operator%(const short n, const int_fastdiv& divisor)
__host__ __device__ __forceinline__ int operator%(short const n, int_fastdiv const& divisor)
{
return ((int)n) % divisor;
}

__host__ __device__ __forceinline__ int operator/(const unsigned short n,
const int_fastdiv& divisor)
__host__ __device__ __forceinline__ int operator/(unsigned short const n,
int_fastdiv const& divisor)
{
return ((int)n) / divisor;
}

__host__ __device__ __forceinline__ int operator%(const unsigned short n,
const int_fastdiv& divisor)
__host__ __device__ __forceinline__ int operator%(unsigned short const n,
int_fastdiv const& divisor)
{
return ((int)n) % divisor;
}

__host__ __device__ __forceinline__ int operator/(const char n, const int_fastdiv& divisor)
__host__ __device__ __forceinline__ int operator/(char const n, int_fastdiv const& divisor)
{
return ((int)n) / divisor;
}

__host__ __device__ __forceinline__ int operator%(const char n, const int_fastdiv& divisor)
__host__ __device__ __forceinline__ int operator%(char const n, int_fastdiv const& divisor)
{
return ((int)n) % divisor;
}

__host__ __device__ __forceinline__ int operator/(const unsigned char n, const int_fastdiv& divisor)
__host__ __device__ __forceinline__ int operator/(unsigned char const n, int_fastdiv const& divisor)
{
return ((int)n) / divisor;
}

__host__ __device__ __forceinline__ int operator%(const unsigned char n, const int_fastdiv& divisor)
__host__ __device__ __forceinline__ int operator%(unsigned char const n, int_fastdiv const& divisor)
{
return ((int)n) % divisor;
}
2 changes: 1 addition & 1 deletion cpp/include/cudf/detail/utilities/integer_utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ constexpr S round_up_unsafe(S number_to_round, S modulus) noexcept
* the result will be incorrect
*/
template <typename S, typename T>
constexpr S div_rounding_up_unsafe(const S& dividend, const T& divisor) noexcept
constexpr S div_rounding_up_unsafe(S const& dividend, T const& divisor) noexcept
{
return (dividend + divisor - 1) / divisor;
}
Expand Down
6 changes: 3 additions & 3 deletions cpp/include/cudf/detail/utilities/pinned_host_vector.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ class pinned_allocator<void> {
public:
using value_type = void; ///< The type of the elements in the allocator
using pointer = void*; ///< The type returned by address() / allocate()
using const_pointer = const void*; ///< The type returned by address()
using const_pointer = void const*; ///< The type returned by address()
using size_type = std::size_t; ///< The type used for the size of the allocation
using difference_type = std::ptrdiff_t; ///< The type of the distance between two pointers

Expand All @@ -76,9 +76,9 @@ class pinned_allocator {
public:
using value_type = T; ///< The type of the elements in the allocator
using pointer = T*; ///< The type returned by address() / allocate()
using const_pointer = const T*; ///< The type returned by address()
using const_pointer = T const*; ///< The type returned by address()
using reference = T&; ///< The parameter type for address()
using const_reference = const T&; ///< The parameter type for address()
using const_reference = T const&; ///< The parameter type for address()
using size_type = std::size_t; ///< The type used for the size of the allocation
using difference_type = std::ptrdiff_t; ///< The type of the distance between two pointers

Expand Down
Loading