Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove default detail mrs: part4 #12967

54 changes: 24 additions & 30 deletions cpp/include/cudf/detail/null_mask.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -31,11 +31,10 @@ namespace detail {
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
rmm::device_buffer create_null_mask(
size_type size,
mask_state state,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
rmm::device_buffer create_null_mask(size_type size,
mask_state state,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::set_null_mask(bitmask_type*, size_type, size_type, bool)
Expand Down Expand Up @@ -209,55 +208,50 @@ std::vector<size_type> segmented_null_count(bitmask_type const* bitmask,
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
rmm::device_buffer copy_bitmask(
bitmask_type const* mask,
size_type begin_bit,
size_type end_bit,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
rmm::device_buffer copy_bitmask(bitmask_type const* mask,
size_type begin_bit,
size_type end_bit,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::copy_bitmask(column_view const& view, rmm::mr::device_memory_resource*)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
rmm::device_buffer copy_bitmask(
column_view const& view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
rmm::device_buffer copy_bitmask(column_view const& view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc bitmask_and(host_span<bitmask_type const* const>, host_span<size_type> const,
* size_type, rmm::mr::device_memory_resource *)
*
* @param stream CUDA stream used for device memory operations and kernel launches
*/
std::pair<rmm::device_buffer, size_type> bitmask_and(
host_span<bitmask_type const* const> masks,
host_span<size_type const> masks_begin_bits,
size_type mask_size_bits,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::pair<rmm::device_buffer, size_type> bitmask_and(host_span<bitmask_type const* const> masks,
host_span<size_type const> masks_begin_bits,
size_type mask_size_bits,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::bitmask_and
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::pair<rmm::device_buffer, size_type> bitmask_and(
table_view const& view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::pair<rmm::device_buffer, size_type> bitmask_and(table_view const& view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::bitmask_or
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::pair<rmm::device_buffer, size_type> bitmask_or(
table_view const& view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::pair<rmm::device_buffer, size_type> bitmask_or(table_view const& view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @brief Performs a bitwise AND of the specified bitmasks,
Expand Down
10 changes: 3 additions & 7 deletions cpp/include/cudf/detail/structs/utilities.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ class flattened_table {
std::vector<null_order> const& null_precedence,
column_nullability nullability,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
rmm::mr::device_memory_resource* mr);

/**
* @brief Superimpose nulls from a given null mask into the input column, using bitwise AND.
Expand Down Expand Up @@ -222,9 +222,7 @@ class flattened_table {
* to be kept alive.
*/
[[nodiscard]] std::pair<column_view, temporary_nullable_data> push_down_nulls(
column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
column_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr);

/**
* @brief Push down nulls from columns of the input table into their children columns, using
Expand All @@ -251,9 +249,7 @@ class flattened_table {
* to be kept alive.
*/
[[nodiscard]] std::pair<table_view, temporary_nullable_data> push_down_nulls(
table_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
table_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr);

/**
* @brief Checks if a column or any of its children is a struct column with structs that are null.
Expand Down
9 changes: 4 additions & 5 deletions cpp/include/cudf/lists/detail/concatenate.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -43,10 +43,9 @@ namespace detail {
* @param mr Device memory resource used to allocate the returned column's device memory.
* @return New column with concatenated results.
*/
std::unique_ptr<column> concatenate(
host_span<column_view const> columns,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> concatenate(host_span<column_view const> columns,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

} // namespace detail
} // namespace lists
Expand Down
20 changes: 9 additions & 11 deletions cpp/include/cudf/lists/detail/extract.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -27,22 +27,20 @@ namespace detail {
* rmm::mr::device_memory_resource*)
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> extract_list_element(
lists_column_view lists_column,
size_type const index,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> extract_list_element(lists_column_view lists_column,
size_type const index,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::lists::extract_list_element(lists_column_view, column_view const&,
* rmm::mr::device_memory_resource*)
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> extract_list_element(
lists_column_view lists_column,
column_view const& indices,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> extract_list_element(lists_column_view lists_column,
column_view const& indices,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

} // namespace detail
} // namespace lists
Expand Down
11 changes: 5 additions & 6 deletions cpp/include/cudf/lists/detail/interleave_columns.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -44,11 +44,10 @@ namespace detail {
* @param mr Device memory resource used to allocate the returned column's device memory.
* @return The interleaved columns as a single column.
*/
std::unique_ptr<column> interleave_columns(
table_view const& input,
bool has_null_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> interleave_columns(table_view const& input,
bool has_null_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

} // namespace detail
} // namespace lists
Expand Down
9 changes: 4 additions & 5 deletions cpp/include/cudf/lists/detail/reverse.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -23,9 +23,8 @@ namespace cudf::lists::detail {
* @copydoc cudf::lists::reverse
* @param stream CUDA stream used for device memory operations and kernel launches
*/
std::unique_ptr<column> reverse(
lists_column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> reverse(lists_column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

} // namespace cudf::lists::detail
43 changes: 20 additions & 23 deletions cpp/include/cudf/lists/detail/scatter.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -89,15 +89,14 @@ rmm::device_uvector<unbound_list_view> list_vector_from_column(
* @return New lists column.
*/
template <typename MapIterator>
std::unique_ptr<column> scatter_impl(
rmm::device_uvector<unbound_list_view> const& source_vector,
rmm::device_uvector<unbound_list_view>& target_vector,
MapIterator scatter_map_begin,
MapIterator scatter_map_end,
column_view const& source,
column_view const& target,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
std::unique_ptr<column> scatter_impl(rmm::device_uvector<unbound_list_view> const& source_vector,
rmm::device_uvector<unbound_list_view>& target_vector,
MapIterator scatter_map_begin,
MapIterator scatter_map_end,
column_view const& source,
column_view const& target,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(column_types_equal(source, target), "Mismatched column types.");

Expand Down Expand Up @@ -170,13 +169,12 @@ std::unique_ptr<column> scatter_impl(
* @return New lists column.
*/
template <typename MapIterator>
std::unique_ptr<column> scatter(
column_view const& source,
MapIterator scatter_map_begin,
MapIterator scatter_map_end,
column_view const& target,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
std::unique_ptr<column> scatter(column_view const& source,
MapIterator scatter_map_begin,
MapIterator scatter_map_end,
column_view const& target,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const num_rows = target.size();
if (num_rows == 0) { return cudf::empty_like(target); }
Expand Down Expand Up @@ -227,13 +225,12 @@ std::unique_ptr<column> scatter(
* @return New lists column.
*/
template <typename MapIterator>
std::unique_ptr<column> scatter(
scalar const& slr,
MapIterator scatter_map_begin,
MapIterator scatter_map_end,
column_view const& target,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
std::unique_ptr<column> scatter(scalar const& slr,
MapIterator scatter_map_begin,
MapIterator scatter_map_end,
column_view const& target,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const num_rows = target.size();
if (num_rows == 0) { return cudf::empty_like(target); }
Expand Down
24 changes: 11 additions & 13 deletions cpp/include/cudf/lists/detail/sorting.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -28,24 +28,22 @@ namespace detail {
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> sort_lists(
lists_column_view const& input,
order column_order,
null_order null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> sort_lists(lists_column_view const& input,
order column_order,
null_order null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::lists::stable_sort_lists
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> stable_sort_lists(
lists_column_view const& input,
order column_order,
null_order null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> stable_sort_lists(lists_column_view const& input,
order column_order,
null_order null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

} // namespace detail
} // namespace lists
Expand Down
22 changes: 10 additions & 12 deletions cpp/include/cudf/lists/detail/stream_compaction.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -28,22 +28,20 @@ namespace cudf::lists::detail {
*
* @param stream CUDA stream used for device memory operations and kernel launches
*/
std::unique_ptr<column> apply_boolean_mask(
lists_column_view const& input,
lists_column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> apply_boolean_mask(lists_column_view const& input,
lists_column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::list::distinct
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> distinct(
lists_column_view const& input,
null_equality nulls_equal,
nan_equality nans_equal,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> distinct(lists_column_view const& input,
null_equality nulls_equal,
nan_equality nans_equal,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

} // namespace cudf::lists::detail
5 changes: 3 additions & 2 deletions cpp/src/dictionary/detail/concatenate.cu
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -114,7 +114,8 @@ struct compute_children_offsets_fn {
[](auto lhs, auto rhs) {
return offsets_pair{lhs.first + rhs.first, lhs.second + rhs.second};
});
return cudf::detail::make_device_uvector_sync(offsets, stream);
return cudf::detail::make_device_uvector_sync(
offsets, stream, rmm::mr::get_current_device_resource());
}

private:
Expand Down
Loading