Skip to content

Commit

Permalink
Use east const in benchmarks files (#13491)
Browse files Browse the repository at this point in the history
Use east const in _cpp/benchmarks/_ files
used clang-format `QualifierAlignment: Right` to do the clean up.

since https://clang.llvm.org/docs/ClangFormatStyleOptions.html#qualifieralignment has warning that 
`Setting QualifierAlignment to something other than Leave, COULD lead to incorrect code formatting due to incorrect decisions made due to clang-formats lack of complete semantic information. As such extra care should be taken to review code changes made by the use of this option.`
So it's not added to .clang-format

Split to multiple PRs for easy reviewing #13491, #13492, #13493, #13494

Authors:
  - Karthikeyan (https://github.com/karthikeyann)
  - Nghia Truong (https://github.com/ttnghia)

Approvers:
  - Nghia Truong (https://github.com/ttnghia)
  - Bradley Dice (https://github.com/bdice)

URL: #13491
  • Loading branch information
karthikeyann authored Jul 13, 2023
1 parent e1aec7b commit 3bacb12
Show file tree
Hide file tree
Showing 36 changed files with 97 additions and 97 deletions.
2 changes: 1 addition & 1 deletion cpp/benchmarks/copying/contiguous_split.cu
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ void BM_contiguous_split_strings(benchmark::State& state, ContiguousSplitImpl& i
bool const include_validity = state.range(3) != 0;

constexpr int64_t string_len = 8;
std::vector<const char*> h_strings{
std::vector<char const*> h_strings{
"aaaaaaaa", "bbbbbbbb", "cccccccc", "dddddddd", "eeeeeeee", "ffffffff", "gggggggg", "hhhhhhhh"};

int64_t const col_len_bytes = total_desired_bytes / num_cols;
Expand Down
4 changes: 2 additions & 2 deletions cpp/benchmarks/copying/gather.cu
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@ class Gather : public cudf::benchmark {};
template <class TypeParam, bool coalesce>
void BM_gather(benchmark::State& state)
{
const cudf::size_type source_size{(cudf::size_type)state.range(0)};
const auto n_cols = (cudf::size_type)state.range(1);
cudf::size_type const source_size{(cudf::size_type)state.range(0)};
auto const n_cols = (cudf::size_type)state.range(1);

// Gather indices
auto gather_map_table =
Expand Down
12 changes: 6 additions & 6 deletions cpp/benchmarks/fixture/benchmark_fixture.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -73,28 +73,28 @@ class benchmark : public ::benchmark::Fixture {
public:
benchmark() : ::benchmark::Fixture()
{
const char* env_iterations = std::getenv("CUDF_BENCHMARK_ITERATIONS");
char const* env_iterations = std::getenv("CUDF_BENCHMARK_ITERATIONS");
if (env_iterations != nullptr) { this->Iterations(std::max(0L, atol(env_iterations))); }
}

void SetUp(const ::benchmark::State& state) override
void SetUp(::benchmark::State const& state) override
{
mr = make_pool_instance();
rmm::mr::set_current_device_resource(mr.get()); // set default resource to pool
}

void TearDown(const ::benchmark::State& state) override
void TearDown(::benchmark::State const& state) override
{
// reset default resource to the initial resource
rmm::mr::set_current_device_resource(nullptr);
mr.reset();
}

// eliminate partial override warnings (see benchmark/benchmark.h)
void SetUp(::benchmark::State& st) override { SetUp(const_cast<const ::benchmark::State&>(st)); }
void SetUp(::benchmark::State& st) override { SetUp(const_cast<::benchmark::State const&>(st)); }
void TearDown(::benchmark::State& st) override
{
TearDown(const_cast<const ::benchmark::State&>(st));
TearDown(const_cast<::benchmark::State const&>(st));
}

std::shared_ptr<rmm::mr::device_memory_resource> mr;
Expand Down
4 changes: 2 additions & 2 deletions cpp/benchmarks/fixture/templated_benchmark_fixture.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -45,7 +45,7 @@ namespace cudf {
template <class Fixture>
class FunctionTemplateBenchmark : public Fixture {
public:
FunctionTemplateBenchmark(const char* name, ::benchmark::internal::Function* func)
FunctionTemplateBenchmark(char const* name, ::benchmark::internal::Function* func)
: Fixture(), func_(func)
{
this->SetName(name);
Expand Down
2 changes: 1 addition & 1 deletion cpp/benchmarks/groupby/group_max.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
template <typename Type>
void bench_groupby_max(nvbench::state& state, nvbench::type_list<Type>)
{
const auto size = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const size = static_cast<cudf::size_type>(state.get_int64("num_rows"));

auto const keys = [&] {
data_profile const profile = data_profile_builder().cardinality(0).no_validity().distribution(
Expand Down
4 changes: 2 additions & 2 deletions cpp/benchmarks/groupby/group_no_requests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ class Groupby : public cudf::benchmark {};

void BM_basic_no_requests(benchmark::State& state)
{
const cudf::size_type column_size{(cudf::size_type)state.range(0)};
cudf::size_type const column_size{(cudf::size_type)state.range(0)};

data_profile const profile = data_profile_builder().cardinality(0).no_validity().distribution(
cudf::type_to_id<int64_t>(), distribution_id::UNIFORM, 0, 100);
Expand Down Expand Up @@ -59,7 +59,7 @@ BENCHMARK_REGISTER_F(Groupby, BasicNoRequest)

void BM_pre_sorted_no_requests(benchmark::State& state)
{
const cudf::size_type column_size{(cudf::size_type)state.range(0)};
cudf::size_type const column_size{(cudf::size_type)state.range(0)};

data_profile const profile = data_profile_builder().cardinality(0).no_validity().distribution(
cudf::type_to_id<int64_t>(), distribution_id::UNIFORM, 0, 100);
Expand Down
2 changes: 1 addition & 1 deletion cpp/benchmarks/groupby/group_nth.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ class Groupby : public cudf::benchmark {};
void BM_pre_sorted_nth(benchmark::State& state)
{
// const cudf::size_type num_columns{(cudf::size_type)state.range(0)};
const cudf::size_type column_size{(cudf::size_type)state.range(0)};
cudf::size_type const column_size{(cudf::size_type)state.range(0)};

data_profile const profile = data_profile_builder().cardinality(0).no_validity().distribution(
cudf::type_to_id<int64_t>(), distribution_id::UNIFORM, 0, 100);
Expand Down
2 changes: 1 addition & 1 deletion cpp/benchmarks/groupby/group_nunique.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ auto make_aggregation_request_vector(cudf::column_view const& values, Args&&...
template <typename Type>
void bench_groupby_nunique(nvbench::state& state, nvbench::type_list<Type>)
{
const auto size = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const size = static_cast<cudf::size_type>(state.get_int64("num_rows"));

auto const keys = [&] {
data_profile profile = data_profile_builder().cardinality(0).no_validity().distribution(
Expand Down
4 changes: 2 additions & 2 deletions cpp/benchmarks/groupby/group_scan.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ class Groupby : public cudf::benchmark {};

void BM_basic_sum_scan(benchmark::State& state)
{
const cudf::size_type column_size{(cudf::size_type)state.range(0)};
cudf::size_type const column_size{(cudf::size_type)state.range(0)};

data_profile const profile = data_profile_builder().cardinality(0).no_validity().distribution(
cudf::type_to_id<int64_t>(), distribution_id::UNIFORM, 0, 100);
Expand Down Expand Up @@ -61,7 +61,7 @@ BENCHMARK_REGISTER_F(Groupby, BasicSumScan)

void BM_pre_sorted_sum_scan(benchmark::State& state)
{
const cudf::size_type column_size{(cudf::size_type)state.range(0)};
cudf::size_type const column_size{(cudf::size_type)state.range(0)};

data_profile profile = data_profile_builder().cardinality(0).no_validity().distribution(
cudf::type_to_id<int64_t>(), distribution_id::UNIFORM, 0, 100);
Expand Down
4 changes: 2 additions & 2 deletions cpp/benchmarks/groupby/group_shift.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@ class Groupby : public cudf::benchmark {};

void BM_group_shift(benchmark::State& state)
{
const cudf::size_type column_size{(cudf::size_type)state.range(0)};
const int num_groups = 100;
cudf::size_type const column_size{(cudf::size_type)state.range(0)};
int const num_groups = 100;

data_profile const profile =
data_profile_builder().cardinality(0).null_probability(0.01).distribution(
Expand Down
8 changes: 4 additions & 4 deletions cpp/benchmarks/groupby/group_struct_keys.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,10 @@ void bench_groupby_struct_keys(nvbench::state& state)
std::default_random_engine generator;
std::uniform_int_distribution<int> distribution(0, 100);

const cudf::size_type n_rows{static_cast<cudf::size_type>(state.get_int64("NumRows"))};
const cudf::size_type n_cols{1};
const cudf::size_type depth{static_cast<cudf::size_type>(state.get_int64("Depth"))};
const bool nulls{static_cast<bool>(state.get_int64("Nulls"))};
cudf::size_type const n_rows{static_cast<cudf::size_type>(state.get_int64("NumRows"))};
cudf::size_type const n_cols{1};
cudf::size_type const depth{static_cast<cudf::size_type>(state.get_int64("Depth"))};
bool const nulls{static_cast<bool>(state.get_int64("Nulls"))};

// Create columns with values in the range [0,100)
std::vector<column_wrapper> columns;
Expand Down
4 changes: 2 additions & 2 deletions cpp/benchmarks/groupby/group_sum.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ class Groupby : public cudf::benchmark {};

void BM_basic_sum(benchmark::State& state)
{
const cudf::size_type column_size{(cudf::size_type)state.range(0)};
cudf::size_type const column_size{(cudf::size_type)state.range(0)};

data_profile const profile = data_profile_builder().cardinality(0).no_validity().distribution(
cudf::type_to_id<int64_t>(), distribution_id::UNIFORM, 0, 100);
Expand Down Expand Up @@ -61,7 +61,7 @@ BENCHMARK_REGISTER_F(Groupby, Basic)

void BM_pre_sorted_sum(benchmark::State& state)
{
const cudf::size_type column_size{(cudf::size_type)state.range(0)};
cudf::size_type const column_size{(cudf::size_type)state.range(0)};

data_profile profile = data_profile_builder().cardinality(0).no_validity().distribution(
cudf::type_to_id<int64_t>(), distribution_id::UNIFORM, 0, 100);
Expand Down
2 changes: 1 addition & 1 deletion cpp/benchmarks/io/fst.cu
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ auto make_test_json_data(nvbench::state& state)

auto d_input_scalar = cudf::make_string_scalar(input);
auto& d_string_scalar = static_cast<cudf::string_scalar&>(*d_input_scalar);
const cudf::size_type repeat_times = string_size / input.size();
cudf::size_type const repeat_times = string_size / input.size();
return cudf::strings::repeat_string(d_string_scalar, repeat_times);
}

Expand Down
6 changes: 3 additions & 3 deletions cpp/benchmarks/io/json/nested_json.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ std::string generate_row(
int num_columns, int max_depth, int max_list_size, int max_struct_size, size_t max_bytes)
{
std::string s = "{";
const std::vector<std::string> elems{
std::vector<std::string> const elems{
R"(1)", R"(-2)", R"(3.4)", R"("5")", R"("abcdefghij")", R"(true)", R"(null)"};
for (int i = 0; i < num_columns; i++) {
s += R"("col)" + num_to_string(i) + R"(": )";
Expand Down Expand Up @@ -140,7 +140,7 @@ auto make_test_json_data(cudf::size_type string_size, rmm::cuda_stream_view stre
{"a":1,"b":Infinity,"c":[null], "d": {"year":-600,"author": "Kaniyan"}},
{"a": 1, "b": 8.0, "d": { "author": "Jean-Jacques Rousseau"}},)";

const cudf::size_type repeat_times = string_size / input.size();
cudf::size_type const repeat_times = string_size / input.size();

auto d_input_scalar = cudf::make_string_scalar(input, stream);
auto& d_string_scalar = static_cast<cudf::string_scalar&>(*d_input_scalar);
Expand Down Expand Up @@ -191,7 +191,7 @@ void BM_NESTED_JSON_DEPTH(nvbench::state& state)

auto d_scalar = cudf::string_scalar(
generate_json(100'000'000, 10, depth, 10, 10, string_size), true, cudf::get_default_stream());
auto input = cudf::device_span<const char>(d_scalar.data(), d_scalar.size());
auto input = cudf::device_span<char const>(d_scalar.data(), d_scalar.size());

state.add_element_count(input.size());
auto const default_options = cudf::io::json_reader_options{};
Expand Down
4 changes: 2 additions & 2 deletions cpp/benchmarks/iterator/iterator.cu
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ class Iterator : public cudf::benchmark {};
template <class TypeParam, bool cub_or_thrust, bool raw_or_iterator>
void BM_iterator(benchmark::State& state)
{
const cudf::size_type column_size{(cudf::size_type)state.range(0)};
cudf::size_type const column_size{(cudf::size_type)state.range(0)};
using T = TypeParam;
auto num_gen = thrust::counting_iterator<cudf::size_type>(0);

Expand Down Expand Up @@ -195,7 +195,7 @@ void pair_iterator_bench_thrust(cudf::column_view& col,
template <class TypeParam, bool cub_or_thrust>
void BM_pair_iterator(benchmark::State& state)
{
const cudf::size_type column_size{(cudf::size_type)state.range(0)};
cudf::size_type const column_size{(cudf::size_type)state.range(0)};
using T = TypeParam;
auto num_gen = thrust::counting_iterator<cudf::size_type>(0);
auto null_gen =
Expand Down
34 changes: 17 additions & 17 deletions cpp/benchmarks/join/generate_input_tables.cuh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -31,7 +31,7 @@

#include <cassert>

__global__ static void init_curand(curandState* state, const int nstates)
__global__ static void init_curand(curandState* state, int const nstates)
{
int ithread = threadIdx.x + blockIdx.x * blockDim.x;

Expand All @@ -40,10 +40,10 @@ __global__ static void init_curand(curandState* state, const int nstates)

template <typename key_type, typename size_type>
__global__ static void init_build_tbl(key_type* const build_tbl,
const size_type build_tbl_size,
const int multiplicity,
size_type const build_tbl_size,
int const multiplicity,
curandState* state,
const int num_states)
int const num_states)
{
auto const start_idx = blockIdx.x * blockDim.x + threadIdx.x;
auto const stride = blockDim.x * gridDim.x;
Expand All @@ -52,7 +52,7 @@ __global__ static void init_build_tbl(key_type* const build_tbl,
curandState localState = state[start_idx];

for (size_type idx = start_idx; idx < build_tbl_size; idx += stride) {
const double x = curand_uniform_double(&localState);
double const x = curand_uniform_double(&localState);

build_tbl[idx] = static_cast<key_type>(x * (build_tbl_size / multiplicity));
}
Expand All @@ -62,13 +62,13 @@ __global__ static void init_build_tbl(key_type* const build_tbl,

template <typename key_type, typename size_type>
__global__ void init_probe_tbl(key_type* const probe_tbl,
const size_type probe_tbl_size,
const size_type build_tbl_size,
const key_type rand_max,
const double selectivity,
const int multiplicity,
size_type const probe_tbl_size,
size_type const build_tbl_size,
key_type const rand_max,
double const selectivity,
int const multiplicity,
curandState* state,
const int num_states)
int const num_states)
{
auto const start_idx = blockIdx.x * blockDim.x + threadIdx.x;
auto const stride = blockDim.x * gridDim.x;
Expand Down Expand Up @@ -123,11 +123,11 @@ __global__ void init_probe_tbl(key_type* const probe_tbl,
*/
template <typename key_type, typename size_type>
void generate_input_tables(key_type* const build_tbl,
const size_type build_tbl_size,
size_type const build_tbl_size,
key_type* const probe_tbl,
const size_type probe_tbl_size,
const double selectivity,
const int multiplicity)
size_type const probe_tbl_size,
double const selectivity,
int const multiplicity)
{
// With large values of rand_max the a lot of temporary storage is needed for the lottery. At the
// expense of not being that accurate with applying the selectivity an especially more memory
Expand All @@ -152,7 +152,7 @@ void generate_input_tables(key_type* const build_tbl,
int num_sms{-1};
CUDF_CUDA_TRY(cudaDeviceGetAttribute(&num_sms, cudaDevAttrMultiProcessorCount, dev_id));

const int num_states =
int const num_states =
num_sms * std::max(num_blocks_init_build_tbl, num_blocks_init_probe_tbl) * block_size;
rmm::device_uvector<curandState> devStates(num_states, cudf::get_default_stream());

Expand Down
4 changes: 2 additions & 2 deletions cpp/benchmarks/join/join_common.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,8 @@ void BM_join(state_type& state, Join JoinFunc)
}
}();

const double selectivity = 0.3;
const int multiplicity = 1;
double const selectivity = 0.3;
int const multiplicity = 1;

// Generate build and probe tables
auto build_random_null_mask = [](int size) {
Expand Down
6 changes: 3 additions & 3 deletions cpp/benchmarks/lists/copying/scatter_lists.cu
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,9 @@ void BM_lists_scatter(::benchmark::State& state)
auto stream = cudf::get_default_stream();
auto mr = rmm::mr::get_current_device_resource();

const cudf::size_type base_size{(cudf::size_type)state.range(0)};
const cudf::size_type num_elements_per_row{(cudf::size_type)state.range(1)};
const auto num_rows = (cudf::size_type)ceil(double(base_size) / num_elements_per_row);
cudf::size_type const base_size{(cudf::size_type)state.range(0)};
cudf::size_type const num_elements_per_row{(cudf::size_type)state.range(1)};
auto const num_rows = (cudf::size_type)ceil(double(base_size) / num_elements_per_row);

auto source_base_col = make_fixed_width_column(cudf::data_type{cudf::type_to_id<TypeParam>()},
base_size,
Expand Down
2 changes: 1 addition & 1 deletion cpp/benchmarks/null_mask/set_null_mask.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ class SetNullmask : public cudf::benchmark {};

void BM_setnullmask(benchmark::State& state)
{
const cudf::size_type size{(cudf::size_type)state.range(0)};
cudf::size_type const size{(cudf::size_type)state.range(0)};
rmm::device_buffer mask = cudf::create_null_mask(size, cudf::mask_state::UNINITIALIZED);
auto begin = 0, end = size;

Expand Down
6 changes: 3 additions & 3 deletions cpp/benchmarks/quantiles/quantiles.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@ static void BM_quantiles(benchmark::State& state, bool nulls)
{
using Type = int;

const cudf::size_type n_rows{(cudf::size_type)state.range(0)};
const cudf::size_type n_cols{(cudf::size_type)state.range(1)};
const cudf::size_type n_quantiles{(cudf::size_type)state.range(2)};
cudf::size_type const n_rows{(cudf::size_type)state.range(0)};
cudf::size_type const n_cols{(cudf::size_type)state.range(1)};
cudf::size_type const n_quantiles{(cudf::size_type)state.range(2)};

// Create columns with values in the range [0,100)
data_profile profile = data_profile_builder().cardinality(0).distribution(
Expand Down
2 changes: 1 addition & 1 deletion cpp/benchmarks/reduction/anyall.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ template <typename type>
void BM_reduction_anyall(benchmark::State& state,
std::unique_ptr<cudf::reduce_aggregation> const& agg)
{
const cudf::size_type column_size{static_cast<cudf::size_type>(state.range(0))};
cudf::size_type const column_size{static_cast<cudf::size_type>(state.range(0))};
auto const dtype = cudf::type_to_id<type>();
data_profile const profile = data_profile_builder().no_validity().distribution(
dtype, distribution_id::UNIFORM, 0, agg->kind == cudf::aggregation::ANY ? 0 : 100);
Expand Down
2 changes: 1 addition & 1 deletion cpp/benchmarks/reduction/dictionary.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ template <typename T>
void BM_reduction_dictionary(benchmark::State& state,
std::unique_ptr<cudf::reduce_aggregation> const& agg)
{
const cudf::size_type column_size{static_cast<cudf::size_type>(state.range(0))};
cudf::size_type const column_size{static_cast<cudf::size_type>(state.range(0))};

// int column and encoded dictionary column
data_profile const profile = data_profile_builder().cardinality(0).no_validity().distribution(
Expand Down
2 changes: 1 addition & 1 deletion cpp/benchmarks/reduction/minmax.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ class Reduction : public cudf::benchmark {};
template <typename type>
void BM_reduction(benchmark::State& state)
{
const cudf::size_type column_size{(cudf::size_type)state.range(0)};
cudf::size_type const column_size{(cudf::size_type)state.range(0)};
auto const dtype = cudf::type_to_id<type>();
auto const input_column =
create_random_column(dtype, row_count{column_size}, data_profile_builder().no_validity());
Expand Down
Loading

0 comments on commit 3bacb12

Please sign in to comment.