Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add nvbench environment class for initializing RMM in benchmarks #12728

Merged
merged 12 commits into from
Feb 10, 2023
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 12 additions & 1 deletion cpp/benchmarks/fixture/rmm_pool_raii.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -69,4 +69,15 @@ class rmm_pool_raii {
std::shared_ptr<rmm::mr::device_memory_resource> mr;
};

/**
* Base fixture for cudf benchmarks using nvbench.
*
* Initializes the default memory resource to use the RMM pool device resource.
*/
struct nvbench_base_fixture {
rmm_pool_raii _mr;
};
davidwendt marked this conversation as resolved.
Show resolved Hide resolved

} // namespace cudf

#define NVBENCH_ENVIRONMENT cudf::nvbench_base_fixture
davidwendt marked this conversation as resolved.
Show resolved Hide resolved
3 changes: 1 addition & 2 deletions cpp/benchmarks/groupby/group_max.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -24,7 +24,6 @@
template <typename Type>
void bench_groupby_max(nvbench::state& state, nvbench::type_list<Type>)
{
cudf::rmm_pool_raii pool_raii;
const auto size = static_cast<cudf::size_type>(state.get_int64("num_rows"));

auto const keys = [&] {
Expand Down
3 changes: 1 addition & 2 deletions cpp/benchmarks/groupby/group_nunique.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -40,7 +40,6 @@ auto make_aggregation_request_vector(cudf::column_view const& values, Args&&...
template <typename Type>
void bench_groupby_nunique(nvbench::state& state, nvbench::type_list<Type>)
{
cudf::rmm_pool_raii pool_raii;
const auto size = static_cast<cudf::size_type>(state.get_int64("num_rows"));

auto const keys = [&] {
Expand Down
3 changes: 1 addition & 2 deletions cpp/benchmarks/groupby/group_rank.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -30,7 +30,6 @@ static void nvbench_groupby_rank(nvbench::state& state,
{
using namespace cudf;
constexpr auto dtype = type_to_id<int64_t>();
cudf::rmm_pool_raii pool_raii;

bool const is_sorted = state.get_int64("is_sorted");
cudf::size_type const column_size = state.get_int64("data_size");
Expand Down
4 changes: 1 addition & 3 deletions cpp/benchmarks/groupby/group_struct_keys.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -29,8 +29,6 @@

void bench_groupby_struct_keys(nvbench::state& state)
{
cudf::rmm_pool_raii pool_raii;

using Type = int;
using column_wrapper = cudf::test::fixed_width_column_wrapper<Type>;
std::default_random_engine generator;
Expand Down
6 changes: 1 addition & 5 deletions cpp/benchmarks/io/csv/csv_reader_input.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -67,8 +67,6 @@ void csv_read_common(DataType const& data_types,
template <data_type DataType>
void BM_csv_read_input(nvbench::state& state, nvbench::type_list<nvbench::enum_type<DataType>>)
{
cudf::rmm_pool_raii rmm_pool;

auto const d_type = get_type_or_group(static_cast<int32_t>(DataType));
auto const source_type = io_type::FILEPATH;

Expand All @@ -78,8 +76,6 @@ void BM_csv_read_input(nvbench::state& state, nvbench::type_list<nvbench::enum_t
template <cudf::io::io_type IO>
void BM_csv_read_io(nvbench::state& state, nvbench::type_list<nvbench::enum_type<IO>>)
{
cudf::rmm_pool_raii rmm_pool;

auto const d_type = get_type_or_group({static_cast<int32_t>(data_type::INTEGRAL),
static_cast<int32_t>(data_type::FLOAT),
static_cast<int32_t>(data_type::DECIMAL),
Expand Down
4 changes: 1 addition & 3 deletions cpp/benchmarks/io/csv/csv_reader_options.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -32,8 +32,6 @@ void BM_csv_read_varying_options(
nvbench::state& state,
nvbench::type_list<nvbench::enum_type<ColSelection>, nvbench::enum_type<RowSelection>>)
{
cudf::rmm_pool_raii rmm_pool;

auto const data_types =
dtypes_for_column_selection(get_type_or_group({static_cast<int32_t>(data_type::INTEGRAL),
static_cast<int32_t>(data_type::FLOAT),
Expand Down
14 changes: 1 addition & 13 deletions cpp/benchmarks/io/fst.cu
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -78,9 +78,6 @@ constexpr std::size_t single_item = 1;

void BM_FST_JSON(nvbench::state& state)
{
// TODO: to be replaced by nvbench fixture once it's ready
cudf::rmm_pool_raii rmm_pool;

CUDF_EXPECTS(state.get_int64("string_size") <= std::numeric_limits<size_type>::max(),
"Benchmarks only support up to size_type's maximum number of items");
auto const string_size{size_type(state.get_int64("string_size"))};
Expand Down Expand Up @@ -116,9 +113,6 @@ void BM_FST_JSON(nvbench::state& state)

void BM_FST_JSON_no_outidx(nvbench::state& state)
{
// TODO: to be replaced by nvbench fixture once it's ready
cudf::rmm_pool_raii rmm_pool;

CUDF_EXPECTS(state.get_int64("string_size") <= std::numeric_limits<size_type>::max(),
"Benchmarks only support up to size_type's maximum number of items");
auto const string_size{size_type(state.get_int64("string_size"))};
Expand Down Expand Up @@ -154,9 +148,6 @@ void BM_FST_JSON_no_outidx(nvbench::state& state)

void BM_FST_JSON_no_out(nvbench::state& state)
{
// TODO: to be replaced by nvbench fixture once it's ready
cudf::rmm_pool_raii rmm_pool;

CUDF_EXPECTS(state.get_int64("string_size") <= std::numeric_limits<size_type>::max(),
"Benchmarks only support up to size_type's maximum number of items");
auto const string_size{size_type(state.get_int64("string_size"))};
Expand Down Expand Up @@ -190,9 +181,6 @@ void BM_FST_JSON_no_out(nvbench::state& state)

void BM_FST_JSON_no_str(nvbench::state& state)
{
// TODO: to be replaced by nvbench fixture once it's ready
cudf::rmm_pool_raii rmm_pool;

CUDF_EXPECTS(state.get_int64("string_size") <= std::numeric_limits<size_type>::max(),
"Benchmarks only support up to size_type's maximum number of items");
auto const string_size{size_type(state.get_int64("string_size"))};
Expand Down
6 changes: 0 additions & 6 deletions cpp/benchmarks/io/json/nested_json.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -157,9 +157,6 @@ auto make_test_json_data(cudf::size_type string_size, rmm::cuda_stream_view stre

void BM_NESTED_JSON(nvbench::state& state)
{
// TODO: to be replaced by nvbench fixture once it's ready
cudf::rmm_pool_raii rmm_pool;

auto const string_size{cudf::size_type(state.get_int64("string_size"))};
auto const default_options = cudf::io::json_reader_options{};

Expand Down Expand Up @@ -189,9 +186,6 @@ NVBENCH_BENCH(BM_NESTED_JSON)

void BM_NESTED_JSON_DEPTH(nvbench::state& state)
{
// TODO: to be replaced by nvbench fixture once it's ready
cudf::rmm_pool_raii rmm_pool;

auto const string_size{cudf::size_type(state.get_int64("string_size"))};
auto const depth{cudf::size_type(state.get_int64("depth"))};

Expand Down
4 changes: 0 additions & 4 deletions cpp/benchmarks/io/orc/orc_reader_input.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,6 @@ void orc_read_common(cudf::io::orc_writer_options const& opts,
template <data_type DataType>
void BM_orc_read_data(nvbench::state& state, nvbench::type_list<nvbench::enum_type<DataType>>)
{
cudf::rmm_pool_raii rmm_pool;

auto const d_type = get_type_or_group(static_cast<int32_t>(DataType));
cudf::size_type const cardinality = state.get_int64("cardinality");
cudf::size_type const run_length = state.get_int64("run_length");
Expand All @@ -84,8 +82,6 @@ void BM_orc_read_io_compression(
nvbench::state& state,
nvbench::type_list<nvbench::enum_type<IO>, nvbench::enum_type<Compression>>)
{
cudf::rmm_pool_raii rmm_pool;

auto const d_type = get_type_or_group({static_cast<int32_t>(data_type::INTEGRAL_SIGNED),
static_cast<int32_t>(data_type::FLOAT),
static_cast<int32_t>(data_type::DECIMAL),
Expand Down
4 changes: 1 addition & 3 deletions cpp/benchmarks/io/orc/orc_reader_options.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -56,8 +56,6 @@ void BM_orc_read_varying_options(nvbench::state& state,
nvbench::enum_type<UsesNumpyDType>,
nvbench::enum_type<Timestamp>>)
{
cudf::rmm_pool_raii rmm_pool;

auto const num_chunks = RowSelection == row_selection::ALL ? 1 : chunked_read_num_chunks;

auto const use_index = UsesIndex == uses_index::YES;
Expand Down
8 changes: 1 addition & 7 deletions cpp/benchmarks/io/orc/orc_writer.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -46,8 +46,6 @@ constexpr cudf::size_type num_cols = 64;
template <data_type DataType>
void BM_orc_write_encode(nvbench::state& state, nvbench::type_list<nvbench::enum_type<DataType>>)
{
cudf::rmm_pool_raii rmm_pool;

auto const d_type = get_type_or_group(static_cast<int32_t>(DataType));
cudf::size_type const cardinality = state.get_int64("cardinality");
cudf::size_type const run_length = state.get_int64("run_length");
Expand Down Expand Up @@ -90,8 +88,6 @@ void BM_orc_write_io_compression(
nvbench::state& state,
nvbench::type_list<nvbench::enum_type<IO>, nvbench::enum_type<Compression>>)
{
cudf::rmm_pool_raii rmm_pool;

auto const d_type = get_type_or_group({static_cast<int32_t>(data_type::INTEGRAL_SIGNED),
static_cast<int32_t>(data_type::FLOAT),
static_cast<int32_t>(data_type::DECIMAL),
Expand Down Expand Up @@ -141,8 +137,6 @@ void BM_orc_write_statistics(
nvbench::state& state,
nvbench::type_list<nvbench::enum_type<Statistics>, nvbench::enum_type<Compression>>)
{
cudf::rmm_pool_raii rmm_pool;

auto const d_type = get_type_or_group({static_cast<int32_t>(data_type::INTEGRAL_SIGNED),
static_cast<int32_t>(data_type::FLOAT),
static_cast<int32_t>(data_type::DECIMAL),
Expand Down
6 changes: 1 addition & 5 deletions cpp/benchmarks/io/orc/orc_writer_chunks.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -35,8 +35,6 @@ constexpr int64_t data_size = 512 << 20;

void nvbench_orc_write(nvbench::state& state)
{
cudf::rmm_pool_raii rmm_pool;

cudf::size_type num_cols = state.get_int64("num_columns");

auto tbl = create_random_table(
Expand Down Expand Up @@ -79,8 +77,6 @@ void nvbench_orc_write(nvbench::state& state)

void nvbench_orc_chunked_write(nvbench::state& state)
{
cudf::rmm_pool_raii rmm_pool;

cudf::size_type num_cols = state.get_int64("num_columns");
cudf::size_type num_tables = state.get_int64("num_chunks");

Expand Down
4 changes: 0 additions & 4 deletions cpp/benchmarks/io/parquet/parquet_reader_input.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,6 @@ void parquet_read_common(cudf::io::parquet_writer_options const& write_opts,
template <data_type DataType>
void BM_parquet_read_data(nvbench::state& state, nvbench::type_list<nvbench::enum_type<DataType>>)
{
cudf::rmm_pool_raii rmm_pool;

auto const d_type = get_type_or_group(static_cast<int32_t>(DataType));
cudf::size_type const cardinality = state.get_int64("cardinality");
cudf::size_type const run_length = state.get_int64("run_length");
Expand All @@ -87,8 +85,6 @@ void BM_parquet_read_io_compression(
nvbench::state& state,
nvbench::type_list<nvbench::enum_type<IO>, nvbench::enum_type<Compression>>)
{
cudf::rmm_pool_raii rmm_pool;

auto const d_type = get_type_or_group({static_cast<int32_t>(data_type::INTEGRAL),
static_cast<int32_t>(data_type::FLOAT),
static_cast<int32_t>(data_type::DECIMAL),
Expand Down
2 changes: 0 additions & 2 deletions cpp/benchmarks/io/parquet/parquet_reader_options.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,6 @@ void BM_parquet_read_options(nvbench::state& state,
nvbench::enum_type<UsesPandasMetadata>,
nvbench::enum_type<Timestamp>>)
{
cudf::rmm_pool_raii rmm_pool;

auto constexpr str_to_categories = ConvertsStrings == converts_strings::YES;
auto constexpr uses_pd_metadata = UsesPandasMetadata == uses_pandas_metadata::YES;

Expand Down
6 changes: 1 addition & 5 deletions cpp/benchmarks/io/parquet/parquet_writer.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -46,8 +46,6 @@ constexpr cudf::size_type num_cols = 64;
template <data_type DataType>
void BM_parq_write_encode(nvbench::state& state, nvbench::type_list<nvbench::enum_type<DataType>>)
{
cudf::rmm_pool_raii rmm_pool;

auto const data_types = get_type_or_group(static_cast<int32_t>(DataType));
cudf::size_type const cardinality = state.get_int64("cardinality");
cudf::size_type const run_length = state.get_int64("run_length");
Expand Down Expand Up @@ -90,8 +88,6 @@ void BM_parq_write_io_compression(
nvbench::state& state,
nvbench::type_list<nvbench::enum_type<IO>, nvbench::enum_type<Compression>>)
{
cudf::rmm_pool_raii rmm_pool;

auto const data_types = get_type_or_group({static_cast<int32_t>(data_type::INTEGRAL),
static_cast<int32_t>(data_type::FLOAT),
static_cast<int32_t>(data_type::DECIMAL),
Expand Down
6 changes: 1 addition & 5 deletions cpp/benchmarks/io/parquet/parquet_writer_chunks.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -33,8 +33,6 @@ constexpr int64_t data_size = 512 << 20;

void PQ_write(nvbench::state& state)
{
cudf::rmm_pool_raii rmm_pool;

cudf::size_type const num_cols = state.get_int64("num_cols");

auto const tbl = create_random_table(cycle_dtypes({cudf::type_id::INT32}, num_cols),
Expand Down Expand Up @@ -67,8 +65,6 @@ void PQ_write(nvbench::state& state)

void PQ_write_chunked(nvbench::state& state)
{
cudf::rmm_pool_raii rmm_pool;

cudf::size_type const num_cols = state.get_int64("num_cols");
cudf::size_type const num_tables = state.get_int64("num_chunks");

Expand Down
2 changes: 0 additions & 2 deletions cpp/benchmarks/io/text/multibyte_split.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -116,8 +116,6 @@ template <data_chunk_source_type source_type>
static void bench_multibyte_split(nvbench::state& state,
nvbench::type_list<nvbench::enum_type<source_type>>)
{
cudf::rmm_pool_raii pool_raii;

auto const delim_size = state.get_int64("delim_size");
auto const delim_percent = state.get_int64("delim_percent");
auto const file_size_approx = state.get_int64("size_approx");
Expand Down
Loading