diff --git a/cpp/include/cudf/detail/reshape.hpp b/cpp/include/cudf/detail/reshape.hpp
index ccffcbc61df..5ab53690a23 100644
--- a/cpp/include/cudf/detail/reshape.hpp
+++ b/cpp/include/cudf/detail/reshape.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2022, NVIDIA CORPORATION.
+ * Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -30,21 +30,19 @@ namespace detail {
*
* @param stream CUDA stream used for device memory operations and kernel launches
*/
-std::unique_ptr
tile(
- table_view const& input,
- size_type count,
- rmm::cuda_stream_view,
- rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
+std::unique_ptr tile(table_view const& input,
+ size_type count,
+ rmm::cuda_stream_view,
+ rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::interleave_columns
*
* @param stream CUDA stream used for device memory operations and kernel launches
*/
-std::unique_ptr interleave_columns(
- table_view const& input,
- rmm::cuda_stream_view,
- rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
+std::unique_ptr interleave_columns(table_view const& input,
+ rmm::cuda_stream_view,
+ rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace cudf
diff --git a/cpp/include/cudf/io/detail/csv.hpp b/cpp/include/cudf/io/detail/csv.hpp
index 90d730338fc..9fdc7a47fb9 100644
--- a/cpp/include/cudf/io/detail/csv.hpp
+++ b/cpp/include/cudf/io/detail/csv.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2022, NVIDIA CORPORATION.
+ * Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -56,7 +56,7 @@ void write_csv(data_sink* sink,
host_span column_names,
csv_writer_options const& options,
rmm::cuda_stream_view stream,
- rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
+ rmm::mr::device_memory_resource* mr);
} // namespace csv
} // namespace detail
diff --git a/cpp/include/cudf/io/detail/tokenize_json.hpp b/cpp/include/cudf/io/detail/tokenize_json.hpp
index b03dbd4fb70..4914f434c98 100644
--- a/cpp/include/cudf/io/detail/tokenize_json.hpp
+++ b/cpp/include/cudf/io/detail/tokenize_json.hpp
@@ -131,7 +131,7 @@ std::pair, rmm::device_uvector> ge
device_span json_in,
cudf::io::json_reader_options const& options,
rmm::cuda_stream_view stream,
- rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
+ rmm::mr::device_memory_resource* mr);
} // namespace detail
diff --git a/cpp/include/cudf/lists/detail/combine.hpp b/cpp/include/cudf/lists/detail/combine.hpp
index 9f28074173a..4bc45e48a9f 100644
--- a/cpp/include/cudf/lists/detail/combine.hpp
+++ b/cpp/include/cudf/lists/detail/combine.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, NVIDIA CORPORATION.
+ * Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -27,22 +27,20 @@ namespace detail {
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
-std::unique_ptr concatenate_rows(
- table_view const& input,
- concatenate_null_policy null_policy,
- rmm::cuda_stream_view stream,
- rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
+std::unique_ptr concatenate_rows(table_view const& input,
+ concatenate_null_policy null_policy,
+ rmm::cuda_stream_view stream,
+ rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::lists::concatenate_list_elements
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
-std::unique_ptr concatenate_list_elements(
- column_view const& input,
- concatenate_null_policy null_policy,
- rmm::cuda_stream_view stream,
- rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
+std::unique_ptr concatenate_list_elements(column_view const& input,
+ concatenate_null_policy null_policy,
+ rmm::cuda_stream_view stream,
+ rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace lists
diff --git a/cpp/include/cudf/lists/detail/set_operations.hpp b/cpp/include/cudf/lists/detail/set_operations.hpp
index ef4255de430..1411c65448e 100644
--- a/cpp/include/cudf/lists/detail/set_operations.hpp
+++ b/cpp/include/cudf/lists/detail/set_operations.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022, NVIDIA CORPORATION.
+ * Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -30,52 +30,48 @@ namespace cudf::lists::detail {
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
-std::unique_ptr have_overlap(
- lists_column_view const& lhs,
- lists_column_view const& rhs,
- null_equality nulls_equal,
- nan_equality nans_equal,
- rmm::cuda_stream_view stream,
- rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
+std::unique_ptr have_overlap(lists_column_view const& lhs,
+ lists_column_view const& rhs,
+ null_equality nulls_equal,
+ nan_equality nans_equal,
+ rmm::cuda_stream_view stream,
+ rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::list::intersect_distinct
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
-std::unique_ptr intersect_distinct(
- lists_column_view const& lhs,
- lists_column_view const& rhs,
- null_equality nulls_equal,
- nan_equality nans_equal,
- rmm::cuda_stream_view stream,
- rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
+std::unique_ptr intersect_distinct(lists_column_view const& lhs,
+ lists_column_view const& rhs,
+ null_equality nulls_equal,
+ nan_equality nans_equal,
+ rmm::cuda_stream_view stream,
+ rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::list::union_distinct
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
-std::unique_ptr union_distinct(
- lists_column_view const& lhs,
- lists_column_view const& rhs,
- null_equality nulls_equal,
- nan_equality nans_equal,
- rmm::cuda_stream_view stream,
- rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
+std::unique_ptr union_distinct(lists_column_view const& lhs,
+ lists_column_view const& rhs,
+ null_equality nulls_equal,
+ nan_equality nans_equal,
+ rmm::cuda_stream_view stream,
+ rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::list::difference_distinct
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
-std::unique_ptr difference_distinct(
- lists_column_view const& lhs,
- lists_column_view const& rhs,
- null_equality nulls_equal,
- nan_equality nans_equal,
- rmm::cuda_stream_view stream,
- rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
+std::unique_ptr difference_distinct(lists_column_view const& lhs,
+ lists_column_view const& rhs,
+ null_equality nulls_equal,
+ nan_equality nans_equal,
+ rmm::cuda_stream_view stream,
+ rmm::mr::device_memory_resource* mr);
/** @} */ // end of group
} // namespace cudf::lists::detail
diff --git a/cpp/src/copying/get_element.cu b/cpp/src/copying/get_element.cu
index 5e76b4adbbe..cc12aaa1382 100644
--- a/cpp/src/copying/get_element.cu
+++ b/cpp/src/copying/get_element.cu
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2022, NVIDIA CORPORATION.
+ * Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -37,11 +37,10 @@ namespace {
struct get_element_functor {
template () && !is_fixed_point()>* p = nullptr>
- std::unique_ptr operator()(
- column_view const& input,
- size_type index,
- rmm::cuda_stream_view stream,
- rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
+ std::unique_ptr operator()(column_view const& input,
+ size_type index,
+ rmm::cuda_stream_view stream,
+ rmm::mr::device_memory_resource* mr)
{
auto s = make_fixed_width_scalar(data_type(type_to_id()), stream, mr);
@@ -61,11 +60,10 @@ struct get_element_functor {
}
template >* p = nullptr>
- std::unique_ptr operator()(
- column_view const& input,
- size_type index,
- rmm::cuda_stream_view stream,
- rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
+ std::unique_ptr operator()(column_view const& input,
+ size_type index,
+ rmm::cuda_stream_view stream,
+ rmm::mr::device_memory_resource* mr)
{
auto device_col = column_device_view::create(input, stream);
@@ -86,11 +84,10 @@ struct get_element_functor {
}
template >* p = nullptr>
- std::unique_ptr operator()(
- column_view const& input,
- size_type index,
- rmm::cuda_stream_view stream,
- rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
+ std::unique_ptr operator()(column_view const& input,
+ size_type index,
+ rmm::cuda_stream_view stream,
+ rmm::mr::device_memory_resource* mr)
{
auto dict_view = dictionary_column_view(input);
auto indices_iter = detail::indexalator_factory::make_input_iterator(dict_view.indices());
@@ -122,11 +119,10 @@ struct get_element_functor {
}
template >* p = nullptr>
- std::unique_ptr operator()(
- column_view const& input,
- size_type index,
- rmm::cuda_stream_view stream,
- rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
+ std::unique_ptr operator()(column_view const& input,
+ size_type index,
+ rmm::cuda_stream_view stream,
+ rmm::mr::device_memory_resource* mr)
{
bool valid = is_element_valid_sync(input, index, stream);
auto const child_col_idx = lists_column_view::child_column_index;
@@ -147,11 +143,10 @@ struct get_element_functor {
}
template ()>* p = nullptr>
- std::unique_ptr operator()(
- column_view const& input,
- size_type index,
- rmm::cuda_stream_view stream,
- rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
+ std::unique_ptr operator()(column_view const& input,
+ size_type index,
+ rmm::cuda_stream_view stream,
+ rmm::mr::device_memory_resource* mr)
{
using Type = typename T::rep;
@@ -178,11 +173,10 @@ struct get_element_functor {
}
template >* p = nullptr>
- std::unique_ptr operator()(
- column_view const& input,
- size_type index,
- rmm::cuda_stream_view stream,
- rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
+ std::unique_ptr operator()(column_view const& input,
+ size_type index,
+ rmm::cuda_stream_view stream,
+ rmm::mr::device_memory_resource* mr)
{
bool valid = is_element_valid_sync(input, index, stream);
auto row_contents =
diff --git a/cpp/src/filling/fill.cu b/cpp/src/filling/fill.cu
index ecd66f1b0c9..a747cc195ae 100644
--- a/cpp/src/filling/fill.cu
+++ b/cpp/src/filling/fill.cu
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2022, NVIDIA CORPORATION.
+ * Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -104,11 +104,10 @@ struct out_of_place_fill_range_dispatch {
template () or cudf::is_fixed_point())>
- std::unique_ptr operator()(
- cudf::size_type begin,
- cudf::size_type end,
- rmm::cuda_stream_view stream,
- rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
+ std::unique_ptr operator()(cudf::size_type begin,
+ cudf::size_type end,
+ rmm::cuda_stream_view stream,
+ rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(input.type() == value.type(), "Data type mismatch.");
auto p_ret = std::make_unique(input, stream, mr);
diff --git a/cpp/src/filling/sequence.cu b/cpp/src/filling/sequence.cu
index 284e7c46347..b4bab369c61 100644
--- a/cpp/src/filling/sequence.cu
+++ b/cpp/src/filling/sequence.cu
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2022, NVIDIA CORPORATION.
+ * Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -134,11 +134,10 @@ std::unique_ptr sequence(size_type size,
return type_dispatcher(init.type(), sequence_functor{}, size, init, step, stream, mr);
}
-std::unique_ptr sequence(
- size_type size,
- scalar const& init,
- rmm::cuda_stream_view stream,
- rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
+std::unique_ptr sequence(size_type size,
+ scalar const& init,
+ rmm::cuda_stream_view stream,
+ rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(size >= 0, "size must be >= 0");
CUDF_EXPECTS(is_numeric(init.type()), "init scalar type must be numeric");
diff --git a/cpp/src/io/csv/durations.hpp b/cpp/src/io/csv/durations.hpp
index d42ddf3817c..ac925011c58 100644
--- a/cpp/src/io/csv/durations.hpp
+++ b/cpp/src/io/csv/durations.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, NVIDIA CORPORATION.
+ * Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -28,10 +28,9 @@ namespace io {
namespace detail {
namespace csv {
-std::unique_ptr pandas_format_durations(
- column_view const& durations,
- rmm::cuda_stream_view stream,
- rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
+std::unique_ptr pandas_format_durations(column_view const& durations,
+ rmm::cuda_stream_view stream,
+ rmm::mr::device_memory_resource* mr);
} // namespace csv
} // namespace detail
diff --git a/cpp/src/io/json/json_column.cu b/cpp/src/io/json/json_column.cu
index 16273b35a11..6e095c6658c 100644
--- a/cpp/src/io/json/json_column.cu
+++ b/cpp/src/io/json/json_column.cu
@@ -891,7 +891,8 @@ table_with_metadata device_parse_nested_json(device_span d_input,
auto gpu_tree = [&]() {
// Parse the JSON and get the token stream
- const auto [tokens_gpu, token_indices_gpu] = get_token_stream(d_input, options, stream);
+ const auto [tokens_gpu, token_indices_gpu] =
+ get_token_stream(d_input, options, stream, rmm::mr::get_current_device_resource());
// gpu tree generation
return get_tree_representation(tokens_gpu, token_indices_gpu, stream);
}(); // IILE used to free memory of token data.
diff --git a/cpp/src/lists/set_operations.cu b/cpp/src/lists/set_operations.cu
index a31b7c6e5be..45891cd0e62 100644
--- a/cpp/src/lists/set_operations.cu
+++ b/cpp/src/lists/set_operations.cu
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022, NVIDIA CORPORATION.
+ * Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -191,8 +191,11 @@ std::unique_ptr union_distinct(lists_column_view const& lhs,
// Algorithm: `return distinct(concatenate_rows(lhs, rhs))`.
- auto const union_col = lists::detail::concatenate_rows(
- table_view{{lhs.parent(), rhs.parent()}}, concatenate_null_policy::NULLIFY_OUTPUT_ROW, stream);
+ auto const union_col =
+ lists::detail::concatenate_rows(table_view{{lhs.parent(), rhs.parent()}},
+ concatenate_null_policy::NULLIFY_OUTPUT_ROW,
+ stream,
+ rmm::mr::get_current_device_resource());
return cudf::lists::detail::distinct(
lists_column_view{union_col->view()}, nulls_equal, nans_equal, stream, mr);
diff --git a/cpp/tests/io/json_tree.cpp b/cpp/tests/io/json_tree.cpp
index c6b181fe8a1..46bc1198079 100644
--- a/cpp/tests/io/json_tree.cpp
+++ b/cpp/tests/io/json_tree.cpp
@@ -586,8 +586,8 @@ TEST_F(JsonTest, TreeRepresentation)
cudf::io::json_reader_options const options{};
// Parse the JSON and get the token stream
- const auto [tokens_gpu, token_indices_gpu] =
- cudf::io::json::detail::get_token_stream(d_input, options, stream);
+ const auto [tokens_gpu, token_indices_gpu] = cudf::io::json::detail::get_token_stream(
+ d_input, options, stream, rmm::mr::get_current_device_resource());
// Get the JSON's tree representation
auto gpu_tree = cuio_json::detail::get_tree_representation(tokens_gpu, token_indices_gpu, stream);
@@ -672,8 +672,8 @@ TEST_F(JsonTest, TreeRepresentation2)
cudf::io::json_reader_options const options{};
// Parse the JSON and get the token stream
- const auto [tokens_gpu, token_indices_gpu] =
- cudf::io::json::detail::get_token_stream(d_input, options, stream);
+ const auto [tokens_gpu, token_indices_gpu] = cudf::io::json::detail::get_token_stream(
+ d_input, options, stream, rmm::mr::get_current_device_resource());
// Get the JSON's tree representation
auto gpu_tree = cuio_json::detail::get_tree_representation(tokens_gpu, token_indices_gpu, stream);
@@ -745,8 +745,8 @@ TEST_F(JsonTest, TreeRepresentation3)
options.enable_lines(true);
// Parse the JSON and get the token stream
- const auto [tokens_gpu, token_indices_gpu] =
- cudf::io::json::detail::get_token_stream(d_input, options, stream);
+ const auto [tokens_gpu, token_indices_gpu] = cudf::io::json::detail::get_token_stream(
+ d_input, options, stream, rmm::mr::get_current_device_resource());
// Get the JSON's tree representation
auto gpu_tree = cuio_json::detail::get_tree_representation(tokens_gpu, token_indices_gpu, stream);
@@ -769,8 +769,8 @@ TEST_F(JsonTest, TreeRepresentationError)
cudf::io::json_reader_options const options{};
// Parse the JSON and get the token stream
- const auto [tokens_gpu, token_indices_gpu] =
- cudf::io::json::detail::get_token_stream(d_input, options, stream);
+ const auto [tokens_gpu, token_indices_gpu] = cudf::io::json::detail::get_token_stream(
+ d_input, options, stream, rmm::mr::get_current_device_resource());
// Get the JSON's tree representation
// This JSON is invalid and will raise an exception.
@@ -851,8 +851,8 @@ TEST_P(JsonTreeTraversalTest, CPUvsGPUTraversal)
static_cast(d_scalar.size())};
// Parse the JSON and get the token stream
- const auto [tokens_gpu, token_indices_gpu] =
- cudf::io::json::detail::get_token_stream(d_input, options, stream);
+ const auto [tokens_gpu, token_indices_gpu] = cudf::io::json::detail::get_token_stream(
+ d_input, options, stream, rmm::mr::get_current_device_resource());
// host tree generation
auto cpu_tree = get_tree_representation_cpu(tokens_gpu, token_indices_gpu, options, stream);
bool const is_array_of_arrays =
diff --git a/cpp/tests/io/nested_json_test.cpp b/cpp/tests/io/nested_json_test.cpp
index 3c01bd4de25..04dcc994d97 100644
--- a/cpp/tests/io/nested_json_test.cpp
+++ b/cpp/tests/io/nested_json_test.cpp
@@ -262,8 +262,8 @@ TEST_F(JsonTest, TokenStream)
cudf::device_span{d_scalar.data(), static_cast(d_scalar.size())};
// Parse the JSON and get the token stream
- auto [d_tokens_gpu, d_token_indices_gpu] =
- cuio_json::detail::get_token_stream(d_input, default_options, stream);
+ auto [d_tokens_gpu, d_token_indices_gpu] = cuio_json::detail::get_token_stream(
+ d_input, default_options, stream, rmm::mr::get_current_device_resource());
// Copy back the number of tokens that were written
thrust::host_vector const tokens_gpu =
cudf::detail::make_host_vector_async(d_tokens_gpu, stream);
@@ -398,8 +398,8 @@ TEST_F(JsonTest, TokenStream2)
cudf::device_span{d_scalar.data(), static_cast(d_scalar.size())};
// Parse the JSON and get the token stream
- auto [d_tokens_gpu, d_token_indices_gpu] =
- cuio_json::detail::get_token_stream(d_input, default_options, stream);
+ auto [d_tokens_gpu, d_token_indices_gpu] = cuio_json::detail::get_token_stream(
+ d_input, default_options, stream, rmm::mr::get_current_device_resource());
// Copy back the number of tokens that were written
thrust::host_vector const tokens_gpu =
cudf::detail::make_host_vector_async(d_tokens_gpu, stream);