Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove default parameters from detail headers in include #12888

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
5b710d7
Remove some params.
vyasr Mar 2, 2023
a26045c
copy APIs.
vyasr Mar 2, 2023
fd5c0e9
Update arrow calls.
vyasr Mar 2, 2023
2d4c23f
stream_compaction.
vyasr Mar 2, 2023
385b629
Some binary_ops.
vyasr Mar 2, 2023
1cbc3ea
Merge remote-tracking branch 'origin/branch-23.04' into feat/remove_u…
vyasr Mar 6, 2023
2f24667
Some stream_compaction.
vyasr Mar 6, 2023
af42347
Merge remote-tracking branch 'origin/branch-23.04' into feat/remove_u…
vyasr Mar 7, 2023
a50bf6b
Use current memory resource for temporaries instead of user-provided …
vyasr Mar 7, 2023
ba57a94
Bump
vyasr Mar 8, 2023
7471f60
Merge branch 'branch-23.04' into feat/remove_unnecessary_default_para…
vyasr Mar 8, 2023
fc227b7
Remove unused mr parameters.
vyasr Mar 8, 2023
d0371d1
Remove one more.
vyasr Mar 8, 2023
c6ad4cf
Reorder includes.
vyasr Mar 9, 2023
05d7fb3
Merge branch 'branch-23.04' into feat/remove_unnecessary_default_para…
vyasr Mar 9, 2023
489985b
Update java call.
vyasr Mar 9, 2023
146afb2
clang-format.
vyasr Mar 9, 2023
394dee0
Merge branch 'branch-23.04' into feat/remove_unnecessary_default_para…
vyasr Mar 9, 2023
91cb177
Update cpp/src/stream_compaction/distinct.cu
vyasr Mar 10, 2023
791d873
Go back to calling detail::gather and pass the now required parameters.
vyasr Mar 13, 2023
da9ba5f
Merge branch 'feat/remove_unnecessary_default_parameters' of github.c…
vyasr Mar 13, 2023
3a3e8f3
Remove default mr from detail::gather and update all call sites.
vyasr Mar 13, 2023
31f587b
Merge remote-tracking branch 'origin/branch-23.04' into feat/remove_u…
vyasr Mar 13, 2023
3deb09c
Apply suggestions from code review
vyasr Mar 13, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion cpp/benchmarks/common/generate_input.cu
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@

#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/per_device_resource.hpp>

#include <thrust/binary_search.h>
#include <thrust/copy.h>
Expand Down Expand Up @@ -542,7 +543,8 @@ std::unique_ptr<cudf::column> create_random_column<cudf::string_view>(data_profi
sample_indices,
cudf::out_of_bounds_policy::DONT_CHECK,
cudf::detail::negative_index_policy::NOT_ALLOWED,
cudf::get_default_stream());
cudf::get_default_stream(),
rmm::mr::get_current_device_resource());
return std::move(str_table->release()[0]);
}

Expand Down
121 changes: 55 additions & 66 deletions cpp/include/cudf/detail/copy.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -144,12 +144,11 @@ std::vector<table_view> split(table_view const& input,
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> shift(
column_view const& input,
size_type offset,
scalar const& fill_value,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> shift(column_view const& input,
size_type offset,
scalar const& fill_value,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @brief Performs segmented shifts for specified values.
Expand Down Expand Up @@ -184,24 +183,22 @@ std::unique_ptr<column> shift(
*
* @note If `offset == 0`, a copy of @p segmented_values is returned.
*/
std::unique_ptr<column> segmented_shift(
column_view const& segmented_values,
device_span<size_type const> segment_offsets,
size_type offset,
scalar const& fill_value,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> segmented_shift(column_view const& segmented_values,
device_span<size_type const> segment_offsets,
size_type offset,
scalar const& fill_value,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::contiguous_split
*
* @param stream CUDA stream used for device memory operations and kernel launches.
**/
std::vector<packed_table> contiguous_split(
cudf::table_view const& input,
std::vector<size_type> const& splits,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::vector<packed_table> contiguous_split(cudf::table_view const& input,
std::vector<size_type> const& splits,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::pack
Expand All @@ -210,96 +207,89 @@ std::vector<packed_table> contiguous_split(
**/
packed_columns pack(cudf::table_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::allocate_like(column_view const&, size_type, mask_allocation_policy,
* rmm::mr::device_memory_resource*)
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> allocate_like(
column_view const& input,
size_type size,
mask_allocation_policy mask_alloc = mask_allocation_policy::RETAIN,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> allocate_like(column_view const& input,
size_type size,
mask_allocation_policy mask_alloc,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::copy_if_else( column_view const&, column_view const&,
* column_view const&, rmm::mr::device_memory_resource*)
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> copy_if_else(
column_view const& lhs,
column_view const& rhs,
column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> copy_if_else(column_view const& lhs,
column_view const& rhs,
column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::copy_if_else( scalar const&, column_view const&,
* column_view const&, rmm::mr::device_memory_resource*)
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> copy_if_else(
scalar const& lhs,
column_view const& rhs,
column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> copy_if_else(scalar const& lhs,
column_view const& rhs,
column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::copy_if_else( column_view const&, scalar const&,
* column_view const&, rmm::mr::device_memory_resource*)
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> copy_if_else(
column_view const& lhs,
scalar const& rhs,
column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> copy_if_else(column_view const& lhs,
scalar const& rhs,
column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::copy_if_else( scalar const&, scalar const&,
* column_view const&, rmm::mr::device_memory_resource*)
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> copy_if_else(
scalar const& lhs,
scalar const& rhs,
column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> copy_if_else(scalar const& lhs,
scalar const& rhs,
column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::sample
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<table> sample(
table_view const& input,
size_type const n,
sample_with_replacement replacement = sample_with_replacement::FALSE,
int64_t const seed = 0,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<table> sample(table_view const& input,
size_type const n,
sample_with_replacement replacement,
int64_t const seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::get_element
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<scalar> get_element(
column_view const& input,
size_type index,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<scalar> get_element(column_view const& input,
size_type index,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::has_nonempty_nulls
Expand All @@ -320,10 +310,9 @@ bool may_have_nonempty_nulls(column_view const& input, rmm::cuda_stream_view str
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> purge_nonempty_nulls(
column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> purge_nonempty_nulls(column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

} // namespace detail
} // namespace cudf
15 changes: 7 additions & 8 deletions cpp/include/cudf/detail/gather.cuh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -647,13 +647,12 @@ void gather_bitmask(table_view const& source,
* @return cudf::table Result of the gather
*/
template <typename MapIterator>
std::unique_ptr<table> gather(
table_view const& source_table,
MapIterator gather_map_begin,
MapIterator gather_map_end,
out_of_bounds_policy bounds_policy = out_of_bounds_policy::DONT_CHECK,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
std::unique_ptr<table> gather(table_view const& source_table,
MapIterator gather_map_begin,
MapIterator gather_map_end,
out_of_bounds_policy bounds_policy,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
std::vector<std::unique_ptr<column>> destination_columns;

Expand Down
28 changes: 13 additions & 15 deletions cpp/include/cudf/detail/gather.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -61,13 +61,12 @@ enum class negative_index_policy : bool { ALLOWED, NOT_ALLOWED };
* @param[in] mr Device memory resource used to allocate the returned table's device memory
* @return Result of the gather
*/
std::unique_ptr<table> gather(
table_view const& source_table,
column_view const& gather_map,
out_of_bounds_policy bounds_policy,
negative_index_policy neg_indices,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<table> gather(table_view const& source_table,
column_view const& gather_map,
out_of_bounds_policy bounds_policy,
negative_index_policy neg_indices,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @copydoc cudf::detail::gather(table_view const&,column_view const&,table_view
Expand All @@ -76,13 +75,12 @@ std::unique_ptr<table> gather(
*
* @throws cudf::logic_error if `gather_map` span size is larger than max of `size_type`.
*/
std::unique_ptr<table> gather(
table_view const& source_table,
device_span<size_type const> const gather_map,
out_of_bounds_policy bounds_policy,
negative_index_policy neg_indices,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<table> gather(table_view const& source_table,
device_span<size_type const> const gather_map,
out_of_bounds_policy bounds_policy,
negative_index_policy neg_indices,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

} // namespace detail
} // namespace cudf
38 changes: 17 additions & 21 deletions cpp/include/cudf/detail/hashing.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -31,29 +31,25 @@ namespace detail {
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> hash(
table_view const& input,
hash_id hash_function = hash_id::HASH_MURMUR3,
uint32_t seed = cudf::DEFAULT_HASH_SEED,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> hash(table_view const& input,
hash_id hash_function,
uint32_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

std::unique_ptr<column> murmur_hash3_32(
table_view const& input,
uint32_t seed = cudf::DEFAULT_HASH_SEED,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> murmur_hash3_32(table_view const& input,
uint32_t seed,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource* mr);

std::unique_ptr<column> spark_murmur_hash3_32(
table_view const& input,
uint32_t seed = cudf::DEFAULT_HASH_SEED,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> spark_murmur_hash3_32(table_view const& input,
uint32_t seed,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource* mr);

std::unique_ptr<column> md5_hash(
table_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<column> md5_hash(table_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/* Copyright 2005-2014 Daniel James.
*
Expand Down
8 changes: 4 additions & 4 deletions cpp/include/cudf/detail/interop.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -111,9 +111,9 @@ data_type arrow_to_cudf_type(arrow::DataType const& arrow_type);
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::shared_ptr<arrow::Table> to_arrow(table_view input,
std::vector<column_metadata> const& metadata = {},
rmm::cuda_stream_view stream = cudf::get_default_stream(),
arrow::MemoryPool* ar_mr = arrow::default_memory_pool());
std::vector<column_metadata> const& metadata,
rmm::cuda_stream_view stream,
arrow::MemoryPool* ar_mr);

/**
* @copydoc cudf::arrow_to_cudf
Expand Down
Loading