Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove default detail mrs: part6 #12969

Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions cpp/benchmarks/iterator/iterator.cu
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -140,8 +140,8 @@ void BM_iterator(benchmark::State& state)
cudf::column_view hasnull_F = wrap_hasnull_F;

// Initialize dev_result to false
auto dev_result =
cudf::detail::make_zeroed_device_uvector_sync<TypeParam>(1, cudf::get_default_stream());
auto dev_result = cudf::detail::make_zeroed_device_uvector_sync<TypeParam>(
1, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
if (cub_or_thrust) {
Expand Down Expand Up @@ -210,7 +210,7 @@ void BM_pair_iterator(benchmark::State& state)

// Initialize dev_result to false
auto dev_result = cudf::detail::make_zeroed_device_uvector_sync<thrust::pair<T, bool>>(
1, cudf::get_default_stream());
1, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
if (cub_or_thrust) {
Expand Down
6 changes: 4 additions & 2 deletions cpp/include/cudf/detail/gather.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -583,10 +583,12 @@ void gather_bitmask(table_view const& source,
std::transform(target.begin(), target.end(), target_masks.begin(), [](auto const& col) {
return col->mutable_view().null_mask();
});
auto d_target_masks = make_device_uvector_async(target_masks, stream);
auto d_target_masks =
make_device_uvector_async(target_masks, stream, rmm::mr::get_current_device_resource());

auto const device_source = table_device_view::create(source, stream);
auto d_valid_counts = make_zeroed_device_uvector_async<size_type>(target.size(), stream);
auto d_valid_counts = make_zeroed_device_uvector_async<size_type>(
target.size(), stream, rmm::mr::get_current_device_resource());

// Dispatch operation enum to get implementation
auto const impl = [op]() {
Expand Down
3 changes: 2 additions & 1 deletion cpp/include/cudf/detail/null_mask.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -426,7 +426,8 @@ std::vector<size_type> segmented_count_bits(bitmask_type const* bitmask,

// Construct a contiguous host buffer of indices and copy to device.
auto const h_indices = std::vector<size_type>(indices_begin, indices_end);
auto const d_indices = make_device_uvector_async(h_indices, stream);
auto const d_indices =
make_device_uvector_async(h_indices, stream, rmm::mr::get_current_device_resource());

// Compute the bit counts over each segment.
auto first_bit_indices_begin = thrust::make_transform_iterator(
Expand Down
58 changes: 22 additions & 36 deletions cpp/include/cudf/detail/utilities/vector_factories.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,10 +48,9 @@ namespace detail {
* @return A device_uvector containing zeros
*/
template <typename T>
rmm::device_uvector<T> make_zeroed_device_uvector_async(
std::size_t size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
rmm::device_uvector<T> make_zeroed_device_uvector_async(std::size_t size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
rmm::device_uvector<T> ret(size, stream, mr);
CUDF_CUDA_TRY(cudaMemsetAsync(ret.data(), 0, size * sizeof(T), stream.value()));
Expand All @@ -70,10 +69,9 @@ rmm::device_uvector<T> make_zeroed_device_uvector_async(
* @return A device_uvector containing zeros
*/
template <typename T>
rmm::device_uvector<T> make_zeroed_device_uvector_sync(
std::size_t size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
rmm::device_uvector<T> make_zeroed_device_uvector_sync(std::size_t size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
rmm::device_uvector<T> ret(size, stream, mr);
CUDF_CUDA_TRY(cudaMemsetAsync(ret.data(), 0, size * sizeof(T), stream.value()));
Expand All @@ -94,10 +92,9 @@ rmm::device_uvector<T> make_zeroed_device_uvector_sync(
* @return A device_uvector containing the copied data
*/
template <typename T>
rmm::device_uvector<T> make_device_uvector_async(
host_span<T const> source_data,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
rmm::device_uvector<T> make_device_uvector_async(host_span<T const> source_data,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
rmm::device_uvector<T> ret(source_data.size(), stream, mr);
CUDF_CUDA_TRY(cudaMemcpyAsync(ret.data(),
Expand Down Expand Up @@ -126,9 +123,7 @@ template <
std::enable_if_t<
std::is_convertible_v<Container, host_span<typename Container::value_type const>>>* = nullptr>
rmm::device_uvector<typename Container::value_type> make_device_uvector_async(
Container const& c,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
Container const& c, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
return make_device_uvector_async(host_span<typename Container::value_type const>{c}, stream, mr);
}
Expand All @@ -146,10 +141,9 @@ rmm::device_uvector<typename Container::value_type> make_device_uvector_async(
* @return A device_uvector containing the copied data
*/
template <typename T>
rmm::device_uvector<T> make_device_uvector_async(
device_span<T const> source_data,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
rmm::device_uvector<T> make_device_uvector_async(device_span<T const> source_data,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
rmm::device_uvector<T> ret(source_data.size(), stream, mr);
CUDF_CUDA_TRY(cudaMemcpyAsync(ret.data(),
Expand Down Expand Up @@ -178,9 +172,7 @@ template <
std::enable_if_t<
std::is_convertible_v<Container, device_span<typename Container::value_type const>>>* = nullptr>
rmm::device_uvector<typename Container::value_type> make_device_uvector_async(
Container const& c,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
Container const& c, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
return make_device_uvector_async(
device_span<typename Container::value_type const>{c}, stream, mr);
Expand All @@ -199,10 +191,9 @@ rmm::device_uvector<typename Container::value_type> make_device_uvector_async(
* @return A device_uvector containing the copied data
*/
template <typename T>
rmm::device_uvector<T> make_device_uvector_sync(
host_span<T const> source_data,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
rmm::device_uvector<T> make_device_uvector_sync(host_span<T const> source_data,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto ret = make_device_uvector_async(source_data, stream, mr);
stream.synchronize();
Expand All @@ -227,9 +218,7 @@ template <
std::enable_if_t<
std::is_convertible_v<Container, host_span<typename Container::value_type const>>>* = nullptr>
rmm::device_uvector<typename Container::value_type> make_device_uvector_sync(
Container const& c,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
Container const& c, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
return make_device_uvector_sync(host_span<typename Container::value_type const>{c}, stream, mr);
}
Expand All @@ -247,10 +236,9 @@ rmm::device_uvector<typename Container::value_type> make_device_uvector_sync(
* @return A device_uvector containing the copied data
*/
template <typename T>
rmm::device_uvector<T> make_device_uvector_sync(
device_span<T const> source_data,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
rmm::device_uvector<T> make_device_uvector_sync(device_span<T const> source_data,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto ret = make_device_uvector_async(source_data, stream, mr);
stream.synchronize();
Expand All @@ -275,9 +263,7 @@ template <
std::enable_if_t<
std::is_convertible_v<Container, device_span<typename Container::value_type const>>>* = nullptr>
rmm::device_uvector<typename Container::value_type> make_device_uvector_sync(
Container const& c,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
Container const& c, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
return make_device_uvector_sync(device_span<typename Container::value_type const>{c}, stream, mr);
}
Expand Down
11 changes: 5 additions & 6 deletions cpp/include/cudf/lists/lists_column_factories.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -35,11 +35,10 @@ namespace detail {
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @param[in] mr Device memory resource used to allocate the returned column's device memory.
*/
std::unique_ptr<cudf::column> make_lists_column_from_scalar(
list_scalar const& value,
size_type size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<cudf::column> make_lists_column_from_scalar(list_scalar const& value,
size_type size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

} // namespace detail
} // namespace lists
Expand Down
9 changes: 4 additions & 5 deletions cpp/include/cudf/structs/detail/concatenate.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -48,10 +48,9 @@ namespace detail {
* @param mr Device memory resource used to allocate the returned column's device memory.
* @return New column with concatenated results.
*/
std::unique_ptr<column> concatenate(
host_span<column_view const> columns,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> concatenate(host_span<column_view const> columns,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

} // namespace detail
} // namespace structs
Expand Down
21 changes: 13 additions & 8 deletions cpp/include/cudf_test/column_wrapper.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -732,9 +732,11 @@ class strings_column_wrapper : public detail::column_wrapper {
{
auto all_valid = thrust::make_constant_iterator(true);
auto [chars, offsets] = detail::make_chars_and_offsets(begin, end, all_valid);
auto d_chars = cudf::detail::make_device_uvector_sync(chars, cudf::get_default_stream());
auto d_offsets = cudf::detail::make_device_uvector_sync(offsets, cudf::get_default_stream());
wrapped = cudf::make_strings_column(d_chars, d_offsets);
auto d_chars = cudf::detail::make_device_uvector_sync(
chars, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
auto d_offsets = cudf::detail::make_device_uvector_sync(
offsets, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
wrapped = cudf::make_strings_column(d_chars, d_offsets);
}

/**
Expand Down Expand Up @@ -772,10 +774,13 @@ class strings_column_wrapper : public detail::column_wrapper {
size_type num_strings = std::distance(begin, end);
auto [chars, offsets] = detail::make_chars_and_offsets(begin, end, v);
auto null_mask = detail::make_null_mask_vector(v, v + num_strings);
auto d_chars = cudf::detail::make_device_uvector_sync(chars, cudf::get_default_stream());
auto d_offsets = cudf::detail::make_device_uvector_sync(offsets, cudf::get_default_stream());
auto d_bitmask = cudf::detail::make_device_uvector_sync(null_mask, cudf::get_default_stream());
wrapped = cudf::make_strings_column(d_chars, d_offsets, d_bitmask);
auto d_chars = cudf::detail::make_device_uvector_sync(
chars, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
auto d_offsets = cudf::detail::make_device_uvector_sync(
offsets, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
auto d_bitmask = cudf::detail::make_device_uvector_sync(
null_mask, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
wrapped = cudf::make_strings_column(d_chars, d_offsets, d_bitmask);
}

/**
Expand Down
5 changes: 3 additions & 2 deletions cpp/include/cudf_test/tdigest_utilities.cuh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -168,7 +168,8 @@ void tdigest_minmax_compare(cudf::tdigest::tdigest_column_view const& tdv,
// verify min/max
thrust::host_vector<device_span<T const>> h_spans;
h_spans.push_back({input_values.begin<T>(), static_cast<size_t>(input_values.size())});
auto spans = cudf::detail::make_device_uvector_async(h_spans, cudf::get_default_stream());
auto spans = cudf::detail::make_device_uvector_async(
h_spans, cudf::get_default_stream(), rmm::mr::get_current_device_resource());

auto expected_min = cudf::make_fixed_width_column(
data_type{type_id::FLOAT64}, spans.size(), mask_state::UNALLOCATED);
Expand Down
6 changes: 4 additions & 2 deletions cpp/src/copying/concatenate.cu
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,8 @@ auto create_device_views(host_span<column_view const> views, rmm::cuda_stream_vi
std::back_inserter(device_views),
[](auto const& col) { return *col; });

auto d_views = make_device_uvector_async(device_views, stream);
auto d_views =
make_device_uvector_async(device_views, stream, rmm::mr::get_current_device_resource());

// Compute the partition offsets
auto offsets = thrust::host_vector<size_t>(views.size() + 1);
Expand All @@ -87,7 +88,8 @@ auto create_device_views(host_span<column_view const> views, rmm::cuda_stream_vi
std::next(offsets.begin()),
[](auto const& col) { return col.size(); },
thrust::plus{});
auto d_offsets = make_device_uvector_async(offsets, stream);
auto d_offsets =
make_device_uvector_async(offsets, stream, rmm::mr::get_current_device_resource());
auto const output_size = offsets.back();

return std::make_tuple(
Expand Down
5 changes: 3 additions & 2 deletions cpp/src/dictionary/detail/concatenate.cu
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -114,7 +114,8 @@ struct compute_children_offsets_fn {
[](auto lhs, auto rhs) {
return offsets_pair{lhs.first + rhs.first, lhs.second + rhs.second};
});
return cudf::detail::make_device_uvector_sync(offsets, stream);
return cudf::detail::make_device_uvector_sync(
offsets, stream, rmm::mr::get_current_device_resource());
}

private:
Expand Down
3 changes: 2 additions & 1 deletion cpp/src/groupby/hash/groupby.cu
Original file line number Diff line number Diff line change
Expand Up @@ -481,7 +481,8 @@ void compute_single_pass_aggs(table_view const& keys,
// prepare to launch kernel to do the actual aggregation
auto d_sparse_table = mutable_table_device_view::create(sparse_table, stream);
auto d_values = table_device_view::create(flattened_values, stream);
auto const d_aggs = cudf::detail::make_device_uvector_async(agg_kinds, stream);
auto const d_aggs = cudf::detail::make_device_uvector_async(
agg_kinds, stream, rmm::mr::get_current_device_resource());
auto const skip_key_rows_with_nulls =
keys_have_nulls and include_null_keys == null_policy::EXCLUDE;

Expand Down
5 changes: 3 additions & 2 deletions cpp/src/groupby/sort/group_quantiles.cu
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -156,7 +156,8 @@ std::unique_ptr<column> group_quantiles(column_view const& values,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto dv_quantiles = cudf::detail::make_device_uvector_async(quantiles, stream);
auto dv_quantiles = cudf::detail::make_device_uvector_async(
quantiles, stream, rmm::mr::get_current_device_resource());

auto values_type = cudf::is_dictionary(values.type())
? dictionary_column_view(values).keys().type()
Expand Down
10 changes: 5 additions & 5 deletions cpp/src/hash/unordered_multiset.cuh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -84,10 +84,10 @@ class unordered_multiset {
auto d_column = column_device_view::create(col, stream);
auto d_col = *d_column;

auto hash_bins_start =
cudf::detail::make_zeroed_device_uvector_async<size_type>(2 * d_col.size() + 1, stream);
auto hash_bins_end =
cudf::detail::make_zeroed_device_uvector_async<size_type>(2 * d_col.size() + 1, stream);
auto hash_bins_start = cudf::detail::make_zeroed_device_uvector_async<size_type>(
2 * d_col.size() + 1, stream, rmm::mr::get_current_device_resource());
auto hash_bins_end = cudf::detail::make_zeroed_device_uvector_async<size_type>(
2 * d_col.size() + 1, stream, rmm::mr::get_current_device_resource());
auto hash_data = rmm::device_uvector<Element>(d_col.size(), stream);

Hasher hasher;
Expand Down
9 changes: 6 additions & 3 deletions cpp/src/io/avro/reader_impl.cu
Original file line number Diff line number Diff line change
Expand Up @@ -444,7 +444,8 @@ std::vector<column_buffer> decode_data(metadata& meta,
}
}

auto block_list = cudf::detail::make_device_uvector_async(meta.block_list, stream);
auto block_list = cudf::detail::make_device_uvector_async(
meta.block_list, stream, rmm::mr::get_current_device_resource());

schema_desc.host_to_device(stream);

Expand Down Expand Up @@ -574,8 +575,10 @@ table_with_metadata read_avro(std::unique_ptr<cudf::io::datasource>&& source,
}
}

d_global_dict = cudf::detail::make_device_uvector_async(h_global_dict, stream);
d_global_dict_data = cudf::detail::make_device_uvector_async(h_global_dict_data, stream);
d_global_dict = cudf::detail::make_device_uvector_async(
h_global_dict, stream, rmm::mr::get_current_device_resource());
d_global_dict_data = cudf::detail::make_device_uvector_async(
h_global_dict_data, stream, rmm::mr::get_current_device_resource());

stream.synchronize();
}
Expand Down
Loading