Skip to content

Commit

Permalink
Use east const in src files (#13493)
Browse files Browse the repository at this point in the history
Use east const in _cpp/src/_ files
used clang-format `QualifierAlignment: Right` to do the clean up.

since https://clang.llvm.org/docs/ClangFormatStyleOptions.html#qualifieralignment has warning that 
`Setting QualifierAlignment to something other than Leave, COULD lead to incorrect code formatting due to incorrect decisions made due to clang-formats lack of complete semantic information. As such extra care should be taken to review code changes made by the use of this option.`
So it's not added to .clang-format

Split to multiple PRs for easy reviewing #13491, #13492, #13493, #13494

Authors:
  - Karthikeyan (https://github.com/karthikeyann)

Approvers:
  - Nghia Truong (https://github.com/ttnghia)
  - Vukasin Milovanovic (https://github.com/vuule)

URL: #13493
  • Loading branch information
karthikeyann authored Jun 10, 2023
1 parent c733cc3 commit 5724204
Show file tree
Hide file tree
Showing 147 changed files with 1,275 additions and 1,275 deletions.
2 changes: 1 addition & 1 deletion cpp/src/binaryop/binaryop.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ namespace jit {
void binary_operation(mutable_column_view& out,
column_view const& lhs,
column_view const& rhs,
const std::string& ptx,
std::string const& ptx,
rmm::cuda_stream_view stream)
{
std::string const output_type_name = cudf::type_to_name(out.type());
Expand Down
4 changes: 2 additions & 2 deletions cpp/src/binaryop/compiled/binary_ops.cuh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -269,7 +269,7 @@ void for_each(rmm::cuda_stream_view stream, cudf::size_type size, Functor f)
CUDF_CUDA_TRY(
cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, for_each_kernel<decltype(f)>));
// 2 elements per thread.
const int grid_size = util::div_rounding_up_safe(size, 2 * block_size);
int const grid_size = util::div_rounding_up_safe(size, 2 * block_size);
for_each_kernel<<<grid_size, block_size, 0, stream.value()>>>(size, std::forward<Functor&&>(f));
}

Expand Down
18 changes: 9 additions & 9 deletions cpp/src/bitmask/null_mask.cu
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ __global__ void set_null_mask_kernel(bitmask_type* __restrict__ destination,
size_type number_of_mask_words)
{
auto x = destination + word_index(begin_bit);
const auto last_word = word_index(end_bit) - word_index(begin_bit);
auto const last_word = word_index(end_bit) - word_index(begin_bit);
bitmask_type fill_value = valid ? 0xffff'ffff : 0;

for (size_type destination_word_index = threadIdx.x + blockIdx.x * blockDim.x;
Expand Down Expand Up @@ -372,32 +372,32 @@ cudf::size_type null_count(bitmask_type const* bitmask,
}

// Count non-zero bits in the specified ranges of a bitmask
std::vector<size_type> segmented_count_set_bits(const bitmask_type* bitmask,
host_span<const size_type> indices,
std::vector<size_type> segmented_count_set_bits(bitmask_type const* bitmask,
host_span<size_type const> indices,
rmm::cuda_stream_view stream)
{
return detail::segmented_count_set_bits(bitmask, indices.begin(), indices.end(), stream);
}

// Count zero bits in the specified ranges of a bitmask
std::vector<size_type> segmented_count_unset_bits(const bitmask_type* bitmask,
host_span<const size_type> indices,
std::vector<size_type> segmented_count_unset_bits(bitmask_type const* bitmask,
host_span<size_type const> indices,
rmm::cuda_stream_view stream)
{
return detail::segmented_count_unset_bits(bitmask, indices.begin(), indices.end(), stream);
}

// Count valid elements in the specified ranges of a validity bitmask
std::vector<size_type> segmented_valid_count(const bitmask_type* bitmask,
host_span<const size_type> indices,
std::vector<size_type> segmented_valid_count(bitmask_type const* bitmask,
host_span<size_type const> indices,
rmm::cuda_stream_view stream)
{
return detail::segmented_valid_count(bitmask, indices.begin(), indices.end(), stream);
}

// Count null elements in the specified ranges of a validity bitmask
std::vector<size_type> segmented_null_count(const bitmask_type* bitmask,
host_span<const size_type> indices,
std::vector<size_type> segmented_null_count(bitmask_type const* bitmask,
host_span<size_type const> indices,
rmm::cuda_stream_view stream)
{
return detail::segmented_null_count(bitmask, indices.begin(), indices.end(), stream);
Expand Down
2 changes: 1 addition & 1 deletion cpp/src/column/column.cu
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ struct create_column_from_view {
view.type(),
view.size(),
rmm::device_buffer{
static_cast<const char*>(view.head()) + (view.offset() * cudf::size_of(view.type())),
static_cast<char const*>(view.head()) + (view.offset() * cudf::size_of(view.type())),
view.size() * cudf::size_of(view.type()),
stream,
mr},
Expand Down
2 changes: 1 addition & 1 deletion cpp/src/copying/concatenate.cu
Original file line number Diff line number Diff line change
Expand Up @@ -553,7 +553,7 @@ rmm::device_buffer concatenate_masks(host_span<column_view const> views,
rmm::mr::device_memory_resource* mr)
{
bool const has_nulls =
std::any_of(views.begin(), views.end(), [](const column_view col) { return col.has_nulls(); });
std::any_of(views.begin(), views.end(), [](column_view const col) { return col.has_nulls(); });
if (has_nulls) {
size_type const total_element_count =
std::accumulate(views.begin(), views.end(), 0, [](auto accumulator, auto const& v) {
Expand Down
6 changes: 3 additions & 3 deletions cpp/src/copying/contiguous_split.cu
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ static constexpr std::size_t desired_batch_size = 1 * 1024 * 1024;
*/
struct src_buf_info {
src_buf_info(cudf::type_id _type,
const int* _offsets,
int const* _offsets,
int _offset_stack_pos,
int _parent_offsets_index,
bool _is_validity,
Expand All @@ -86,7 +86,7 @@ struct src_buf_info {
}

cudf::type_id type;
const int* offsets; // a pointer to device memory offsets if I am an offset buffer
int const* offsets; // a pointer to device memory offsets if I am an offset buffer
int offset_stack_pos; // position in the offset stack buffer
int parent_offsets_index; // immediate parent that has offsets, or -1 if none
bool is_validity; // if I am a validity buffer
Expand Down Expand Up @@ -172,7 +172,7 @@ __device__ void copy_buffer(uint8_t* __restrict__ dst,
stride *= 16;
while (pos + 20 <= num_bytes) {
// read from the nearest aligned address.
const uint32_t* in32 = reinterpret_cast<const uint32_t*>((src + pos) - ofs);
uint32_t const* in32 = reinterpret_cast<uint32_t const*>((src + pos) - ofs);
uint4 v = uint4{in32[0], in32[1], in32[2], in32[3]};
if (ofs || bit_shift) {
v.x = __funnelshift_r(v.x, v.y, ofs * 8 + bit_shift);
Expand Down
2 changes: 1 addition & 1 deletion cpp/src/copying/copy.cu
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ std::unique_ptr<column> scatter_gather_based_if_else(cudf::scalar const& lhs,
is_left);

auto const scatter_map_size = std::distance(scatter_map.begin(), scatter_map_end);
auto scatter_source = std::vector<std::reference_wrapper<const scalar>>{std::ref(lhs)};
auto scatter_source = std::vector<std::reference_wrapper<scalar const>>{std::ref(lhs)};
auto scatter_map_column_view = cudf::column_view{cudf::data_type{cudf::type_id::INT32},
static_cast<cudf::size_type>(scatter_map_size),
scatter_map.begin(),
Expand Down
26 changes: 13 additions & 13 deletions cpp/src/copying/scatter.cu
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ __global__ void marking_bitmask_kernel(mutable_column_device_view destination,
}

template <typename MapIterator>
void scatter_scalar_bitmask_inplace(std::reference_wrapper<const scalar> const& source,
void scatter_scalar_bitmask_inplace(std::reference_wrapper<scalar const> const& source,
MapIterator scatter_map,
size_type num_scatter_rows,
column& target,
Expand Down Expand Up @@ -101,7 +101,7 @@ void scatter_scalar_bitmask_inplace(std::reference_wrapper<const scalar> const&

template <typename Element, typename MapIterator>
struct column_scalar_scatterer_impl {
std::unique_ptr<column> operator()(std::reference_wrapper<const scalar> const& source,
std::unique_ptr<column> operator()(std::reference_wrapper<scalar const> const& source,
MapIterator scatter_iter,
size_type scatter_rows,
column_view const& target,
Expand All @@ -115,7 +115,7 @@ struct column_scalar_scatterer_impl {
auto result_view = result->mutable_view();

// Use permutation iterator with constant index to dereference scalar data
auto scalar_impl = static_cast<const scalar_type_t<Element>*>(&source.get());
auto scalar_impl = static_cast<scalar_type_t<Element> const*>(&source.get());
auto scalar_iter =
thrust::make_permutation_iterator(scalar_impl->data(), thrust::make_constant_iterator(0));

Expand All @@ -132,7 +132,7 @@ struct column_scalar_scatterer_impl {

template <typename MapIterator>
struct column_scalar_scatterer_impl<string_view, MapIterator> {
std::unique_ptr<column> operator()(std::reference_wrapper<const scalar> const& source,
std::unique_ptr<column> operator()(std::reference_wrapper<scalar const> const& source,
MapIterator scatter_iter,
size_type scatter_rows,
column_view const& target,
Expand All @@ -141,7 +141,7 @@ struct column_scalar_scatterer_impl<string_view, MapIterator> {
{
CUDF_EXPECTS(source.get().type() == target.type(), "scalar and column types must match");

auto const scalar_impl = static_cast<const string_scalar*>(&source.get());
auto const scalar_impl = static_cast<string_scalar const*>(&source.get());
auto const source_view = string_view(scalar_impl->data(), scalar_impl->size());
auto const begin = thrust::make_constant_iterator(source_view);
auto const end = begin + scatter_rows;
Expand All @@ -154,7 +154,7 @@ struct column_scalar_scatterer_impl<string_view, MapIterator> {

template <typename MapIterator>
struct column_scalar_scatterer_impl<list_view, MapIterator> {
std::unique_ptr<column> operator()(std::reference_wrapper<const scalar> const& source,
std::unique_ptr<column> operator()(std::reference_wrapper<scalar const> const& source,
MapIterator scatter_iter,
size_type scatter_rows,
column_view const& target,
Expand All @@ -171,7 +171,7 @@ struct column_scalar_scatterer_impl<list_view, MapIterator> {

template <typename MapIterator>
struct column_scalar_scatterer_impl<dictionary32, MapIterator> {
std::unique_ptr<column> operator()(std::reference_wrapper<const scalar> const& source,
std::unique_ptr<column> operator()(std::reference_wrapper<scalar const> const& source,
MapIterator scatter_iter,
size_type scatter_rows,
column_view const& target,
Expand Down Expand Up @@ -223,7 +223,7 @@ struct column_scalar_scatterer_impl<dictionary32, MapIterator> {
template <typename MapIterator>
struct column_scalar_scatterer {
template <typename Element>
std::unique_ptr<column> operator()(std::reference_wrapper<const scalar> const& source,
std::unique_ptr<column> operator()(std::reference_wrapper<scalar const> const& source,
MapIterator scatter_iter,
size_type scatter_rows,
column_view const& target,
Expand All @@ -237,7 +237,7 @@ struct column_scalar_scatterer {

template <typename MapIterator>
struct column_scalar_scatterer_impl<struct_view, MapIterator> {
std::unique_ptr<column> operator()(std::reference_wrapper<const scalar> const& source,
std::unique_ptr<column> operator()(std::reference_wrapper<scalar const> const& source,
MapIterator scatter_iter,
size_type scatter_rows,
column_view const& target,
Expand Down Expand Up @@ -331,7 +331,7 @@ std::unique_ptr<table> scatter(table_view const& source,
return scatter(source, map_col, target, stream, mr);
}

std::unique_ptr<table> scatter(std::vector<std::reference_wrapper<const scalar>> const& source,
std::unique_ptr<table> scatter(std::vector<std::reference_wrapper<scalar const>> const& source,
column_view const& indices,
table_view const& target,
rmm::cuda_stream_view stream,
Expand Down Expand Up @@ -452,7 +452,7 @@ std::unique_ptr<table> boolean_mask_scatter(table_view const& input,
}

std::unique_ptr<table> boolean_mask_scatter(
std::vector<std::reference_wrapper<const scalar>> const& input,
std::vector<std::reference_wrapper<scalar const>> const& input,
table_view const& target,
column_view const& boolean_mask,
rmm::cuda_stream_view stream,
Expand Down Expand Up @@ -501,7 +501,7 @@ std::unique_ptr<table> scatter(table_view const& source,
return detail::scatter(source, scatter_map, target, cudf::get_default_stream(), mr);
}

std::unique_ptr<table> scatter(std::vector<std::reference_wrapper<const scalar>> const& source,
std::unique_ptr<table> scatter(std::vector<std::reference_wrapper<scalar const>> const& source,
column_view const& indices,
table_view const& target,
rmm::mr::device_memory_resource* mr)
Expand All @@ -520,7 +520,7 @@ std::unique_ptr<table> boolean_mask_scatter(table_view const& input,
}

std::unique_ptr<table> boolean_mask_scatter(
std::vector<std::reference_wrapper<const scalar>> const& input,
std::vector<std::reference_wrapper<scalar const>> const& input,
table_view const& target,
column_view const& boolean_mask,
rmm::mr::device_memory_resource* mr)
Expand Down
2 changes: 1 addition & 1 deletion cpp/src/datetime/datetime_ops.cu
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ struct extract_last_day_of_month {
__device__ inline timestamp_D operator()(Timestamp const ts) const
{
using namespace cuda::std::chrono;
const year_month_day ymd(floor<days>(ts));
year_month_day const ymd(floor<days>(ts));
auto const ymdl = year_month_day_last{ymd.year() / ymd.month() / last};
return timestamp_D{sys_days{ymdl}};
}
Expand Down
2 changes: 1 addition & 1 deletion cpp/src/groupby/groupby.cu
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@ detail::sort::sort_groupby_helper& groupby::helper()
std::pair<std::unique_ptr<table>, std::unique_ptr<table>> groupby::shift(
table_view const& values,
host_span<size_type const> offsets,
std::vector<std::reference_wrapper<const scalar>> const& fill_values,
std::vector<std::reference_wrapper<scalar const>> const& fill_values,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
Expand Down
2 changes: 1 addition & 1 deletion cpp/src/groupby/sort/scan.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ void scan_result_functor::operator()<aggregation::RANK>(aggregation const& agg)
"Unsupported list type in grouped rank scan.");
auto const& rank_agg = dynamic_cast<cudf::detail::rank_aggregation const&>(agg);
auto const& group_labels = helper.group_labels(stream);
auto const group_labels_view = column_view(cudf::device_span<const size_type>(group_labels));
auto const group_labels_view = column_view(cudf::device_span<size_type const>(group_labels));
auto const gather_map = [&]() {
if (is_presorted()) { // assumes both keys and values are sorted, Spark does this.
return cudf::detail::sequence(group_labels.size(),
Expand Down
28 changes: 14 additions & 14 deletions cpp/src/hash/concurrent_unordered_map.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ class concurrent_unordered_map {
using mapped_type = Element;
using value_type = thrust::pair<Key, Element>;
using iterator = cycle_iterator_adapter<value_type*>;
using const_iterator = const cycle_iterator_adapter<value_type*>;
using const_iterator = cycle_iterator_adapter<value_type*> const;

public:
/**
Expand Down Expand Up @@ -160,11 +160,11 @@ class concurrent_unordered_map {
*/
static auto create(size_type capacity,
rmm::cuda_stream_view stream,
const mapped_type unused_element = std::numeric_limits<mapped_type>::max(),
const key_type unused_key = std::numeric_limits<key_type>::max(),
const Hasher& hash_function = hasher(),
const Equality& equal = key_equal(),
const allocator_type& allocator = allocator_type())
mapped_type const unused_element = std::numeric_limits<mapped_type>::max(),
key_type const unused_key = std::numeric_limits<key_type>::max(),
Hasher const& hash_function = hasher(),
Equality const& equal = key_equal(),
allocator_type const& allocator = allocator_type())
{
CUDF_FUNC_RANGE();
using Self = concurrent_unordered_map<Key, Element, Hasher, Equality, Allocator>;
Expand Down Expand Up @@ -327,7 +327,7 @@ class concurrent_unordered_map {
*/
__device__ thrust::pair<iterator, bool> insert(value_type const& insert_pair)
{
const size_type key_hash{m_hf(insert_pair.first)};
size_type const key_hash{m_hf(insert_pair.first)};
size_type index{key_hash % m_capacity};

insert_result status{insert_result::CONTINUE};
Expand Down Expand Up @@ -421,7 +421,7 @@ class concurrent_unordered_map {
}
}

void assign_async(const concurrent_unordered_map& other, rmm::cuda_stream_view stream)
void assign_async(concurrent_unordered_map const& other, rmm::cuda_stream_view stream)
{
if (other.m_capacity <= m_capacity) {
m_capacity = other.m_capacity;
Expand Down Expand Up @@ -454,7 +454,7 @@ class concurrent_unordered_map {
}
}

void prefetch(const int dev_id, rmm::cuda_stream_view stream)
void prefetch(int const dev_id, rmm::cuda_stream_view stream)
{
cudaPointerAttributes hashtbl_values_ptr_attributes;
cudaError_t status = cudaPointerGetAttributes(&hashtbl_values_ptr_attributes, m_hashtbl_values);
Expand Down Expand Up @@ -510,11 +510,11 @@ class concurrent_unordered_map {
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
concurrent_unordered_map(size_type capacity,
const mapped_type unused_element,
const key_type unused_key,
const Hasher& hash_function,
const Equality& equal,
const allocator_type& allocator,
mapped_type const unused_element,
key_type const unused_key,
Hasher const& hash_function,
Equality const& equal,
allocator_type const& allocator,
rmm::cuda_stream_view stream)
: m_hf(hash_function),
m_equal(equal),
Expand Down
Loading

0 comments on commit 5724204

Please sign in to comment.