Skip to content

Commit

Permalink
Remove default parameters from detail headers in include (#12888)
Browse files Browse the repository at this point in the history
Contributes to #9854. None of these changes should affect users, nor do they impose a particularly onerous burden on libcudf developers (just some extra passing through `mr` or `cudf::get_default_stream()`.

Authors:
  - Vyas Ramasubramani (https://github.com/vyasr)

Approvers:
  - Nghia Truong (https://github.com/ttnghia)
  - David Wendt (https://github.com/davidwendt)

URL: #12888
  • Loading branch information
vyasr authored Mar 13, 2023
1 parent 0723f3f commit 9c9dd54
Show file tree
Hide file tree
Showing 20 changed files with 270 additions and 263 deletions.
4 changes: 3 additions & 1 deletion cpp/benchmarks/common/generate_input.cu
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@

#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/per_device_resource.hpp>

#include <thrust/binary_search.h>
#include <thrust/copy.h>
Expand Down Expand Up @@ -542,7 +543,8 @@ std::unique_ptr<cudf::column> create_random_column<cudf::string_view>(data_profi
sample_indices,
cudf::out_of_bounds_policy::DONT_CHECK,
cudf::detail::negative_index_policy::NOT_ALLOWED,
cudf::get_default_stream());
cudf::get_default_stream(),
rmm::mr::get_current_device_resource());
return std::move(str_table->release()[0]);
}

Expand Down
121 changes: 55 additions & 66 deletions cpp/include/cudf/detail/copy.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -144,12 +144,11 @@ std::vector<table_view> split(table_view const& input,
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> shift(
column_view const& input,
size_type offset,
scalar const& fill_value,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> shift(column_view const& input,
size_type offset,
scalar const& fill_value,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @brief Performs segmented shifts for specified values.
Expand Down Expand Up @@ -184,24 +183,22 @@ std::unique_ptr<column> shift(
*
* @note If `offset == 0`, a copy of @p segmented_values is returned.
*/
std::unique_ptr<column> segmented_shift(
column_view const& segmented_values,
device_span<size_type const> segment_offsets,
size_type offset,
scalar const& fill_value,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> segmented_shift(column_view const& segmented_values,
device_span<size_type const> segment_offsets,
size_type offset,
scalar const& fill_value,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::contiguous_split
*
* @param stream CUDA stream used for device memory operations and kernel launches.
**/
std::vector<packed_table> contiguous_split(
cudf::table_view const& input,
std::vector<size_type> const& splits,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::vector<packed_table> contiguous_split(cudf::table_view const& input,
std::vector<size_type> const& splits,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::pack
Expand All @@ -210,96 +207,89 @@ std::vector<packed_table> contiguous_split(
**/
packed_columns pack(cudf::table_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::allocate_like(column_view const&, size_type, mask_allocation_policy,
* rmm::mr::device_memory_resource*)
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> allocate_like(
column_view const& input,
size_type size,
mask_allocation_policy mask_alloc = mask_allocation_policy::RETAIN,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> allocate_like(column_view const& input,
size_type size,
mask_allocation_policy mask_alloc,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::copy_if_else( column_view const&, column_view const&,
* column_view const&, rmm::mr::device_memory_resource*)
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> copy_if_else(
column_view const& lhs,
column_view const& rhs,
column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> copy_if_else(column_view const& lhs,
column_view const& rhs,
column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::copy_if_else( scalar const&, column_view const&,
* column_view const&, rmm::mr::device_memory_resource*)
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> copy_if_else(
scalar const& lhs,
column_view const& rhs,
column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> copy_if_else(scalar const& lhs,
column_view const& rhs,
column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::copy_if_else( column_view const&, scalar const&,
* column_view const&, rmm::mr::device_memory_resource*)
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> copy_if_else(
column_view const& lhs,
scalar const& rhs,
column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> copy_if_else(column_view const& lhs,
scalar const& rhs,
column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::copy_if_else( scalar const&, scalar const&,
* column_view const&, rmm::mr::device_memory_resource*)
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> copy_if_else(
scalar const& lhs,
scalar const& rhs,
column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> copy_if_else(scalar const& lhs,
scalar const& rhs,
column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::sample
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<table> sample(
table_view const& input,
size_type const n,
sample_with_replacement replacement = sample_with_replacement::FALSE,
int64_t const seed = 0,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<table> sample(table_view const& input,
size_type const n,
sample_with_replacement replacement,
int64_t const seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::get_element
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<scalar> get_element(
column_view const& input,
size_type index,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<scalar> get_element(column_view const& input,
size_type index,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::has_nonempty_nulls
Expand All @@ -320,10 +310,9 @@ bool may_have_nonempty_nulls(column_view const& input, rmm::cuda_stream_view str
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> purge_nonempty_nulls(
column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> purge_nonempty_nulls(column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

} // namespace detail
} // namespace cudf
15 changes: 7 additions & 8 deletions cpp/include/cudf/detail/gather.cuh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -647,13 +647,12 @@ void gather_bitmask(table_view const& source,
* @return cudf::table Result of the gather
*/
template <typename MapIterator>
std::unique_ptr<table> gather(
table_view const& source_table,
MapIterator gather_map_begin,
MapIterator gather_map_end,
out_of_bounds_policy bounds_policy = out_of_bounds_policy::DONT_CHECK,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
std::unique_ptr<table> gather(table_view const& source_table,
MapIterator gather_map_begin,
MapIterator gather_map_end,
out_of_bounds_policy bounds_policy,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
std::vector<std::unique_ptr<column>> destination_columns;

Expand Down
28 changes: 13 additions & 15 deletions cpp/include/cudf/detail/gather.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -61,13 +61,12 @@ enum class negative_index_policy : bool { ALLOWED, NOT_ALLOWED };
* @param[in] mr Device memory resource used to allocate the returned table's device memory
* @return Result of the gather
*/
std::unique_ptr<table> gather(
table_view const& source_table,
column_view const& gather_map,
out_of_bounds_policy bounds_policy,
negative_index_policy neg_indices,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<table> gather(table_view const& source_table,
column_view const& gather_map,
out_of_bounds_policy bounds_policy,
negative_index_policy neg_indices,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::detail::gather(table_view const&,column_view const&,table_view
Expand All @@ -76,13 +75,12 @@ std::unique_ptr<table> gather(
*
* @throws cudf::logic_error if `gather_map` span size is larger than max of `size_type`.
*/
std::unique_ptr<table> gather(
table_view const& source_table,
device_span<size_type const> const gather_map,
out_of_bounds_policy bounds_policy,
negative_index_policy neg_indices,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<table> gather(table_view const& source_table,
device_span<size_type const> const gather_map,
out_of_bounds_policy bounds_policy,
negative_index_policy neg_indices,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

} // namespace detail
} // namespace cudf
38 changes: 17 additions & 21 deletions cpp/include/cudf/detail/hashing.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -31,29 +31,25 @@ namespace detail {
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> hash(
table_view const& input,
hash_id hash_function = hash_id::HASH_MURMUR3,
uint32_t seed = cudf::DEFAULT_HASH_SEED,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> hash(table_view const& input,
hash_id hash_function,
uint32_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

std::unique_ptr<column> murmur_hash3_32(
table_view const& input,
uint32_t seed = cudf::DEFAULT_HASH_SEED,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> murmur_hash3_32(table_view const& input,
uint32_t seed,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource* mr);

std::unique_ptr<column> spark_murmur_hash3_32(
table_view const& input,
uint32_t seed = cudf::DEFAULT_HASH_SEED,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> spark_murmur_hash3_32(table_view const& input,
uint32_t seed,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource* mr);

std::unique_ptr<column> md5_hash(
table_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> md5_hash(table_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/* Copyright 2005-2014 Daniel James.
*
Expand Down
8 changes: 4 additions & 4 deletions cpp/include/cudf/detail/interop.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -111,9 +111,9 @@ data_type arrow_to_cudf_type(arrow::DataType const& arrow_type);
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::shared_ptr<arrow::Table> to_arrow(table_view input,
std::vector<column_metadata> const& metadata = {},
rmm::cuda_stream_view stream = cudf::get_default_stream(),
arrow::MemoryPool* ar_mr = arrow::default_memory_pool());
std::vector<column_metadata> const& metadata,
rmm::cuda_stream_view stream,
arrow::MemoryPool* ar_mr);

/**
* @copydoc cudf::arrow_to_cudf
Expand Down
Loading

0 comments on commit 9c9dd54

Please sign in to comment.