Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Move detail reduction functions to cudf::reduction::detail namespace #12971

Merged
merged 13 commits into from
Mar 23, 2023
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions conda/recipes/libcudf/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,6 @@ outputs:
- test -f $PREFIX/include/cudf/detail/nvtx/nvtx3.hpp
- test -f $PREFIX/include/cudf/detail/nvtx/ranges.hpp
- test -f $PREFIX/include/cudf/detail/quantiles.hpp
- test -f $PREFIX/include/cudf/detail/reduction_functions.hpp
- test -f $PREFIX/include/cudf/detail/repeat.hpp
- test -f $PREFIX/include/cudf/detail/replace.hpp
- test -f $PREFIX/include/cudf/detail/reshape.hpp
Expand All @@ -122,7 +121,6 @@ outputs:
- test -f $PREFIX/include/cudf/detail/scan.hpp
- test -f $PREFIX/include/cudf/detail/scatter.hpp
- test -f $PREFIX/include/cudf/detail/search.hpp
- test -f $PREFIX/include/cudf/detail/segmented_reduction_functions.hpp
- test -f $PREFIX/include/cudf/detail/sequence.hpp
- test -f $PREFIX/include/cudf/detail/sorting.hpp
- test -f $PREFIX/include/cudf/detail/stream_compaction.hpp
Expand Down Expand Up @@ -215,6 +213,8 @@ outputs:
- test -f $PREFIX/include/cudf/partitioning.hpp
- test -f $PREFIX/include/cudf/quantiles.hpp
- test -f $PREFIX/include/cudf/reduction.hpp
- test -f $PREFIX/include/cudf/reduction/detail/reduction_functions.hpp
- test -f $PREFIX/include/cudf/reduction/detail/segmented_reduction_functions.hpp
- test -f $PREFIX/include/cudf/replace.hpp
- test -f $PREFIX/include/cudf/reshape.hpp
- test -f $PREFIX/include/cudf/rolling.hpp
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

#pragma once

#include <cudf/detail/reduction_operators.cuh>
#include "reduction_operators.cuh"

#include <cudf/column/column_factories.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
Expand All @@ -31,6 +31,8 @@
#include <thrust/for_each.h>
#include <thrust/iterator/iterator_traits.h>

#include <optional>

namespace cudf {
namespace reduction {
namespace detail {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@

namespace cudf {
namespace reduction {
namespace detail {
/**
* @brief Computes sum of elements in input column
*
Expand All @@ -42,12 +43,11 @@ namespace reduction {
* @param mr Device memory resource used to allocate the returned scalar's device memory
* @return Sum as scalar of type `output_dtype`
*/
std::unique_ptr<scalar> sum(
column_view const& col,
data_type const output_dtype,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<scalar> sum(column_view const& col,
data_type const output_dtype,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @brief Computes minimum of elements in input column
Expand All @@ -63,12 +63,11 @@ std::unique_ptr<scalar> sum(
* @param mr Device memory resource used to allocate the returned scalar's device memory
* @return Minimum element as scalar of type `output_dtype`
*/
std::unique_ptr<scalar> min(
column_view const& col,
data_type const output_dtype,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<scalar> min(column_view const& col,
data_type const output_dtype,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @brief Computes maximum of elements in input column
Expand All @@ -84,12 +83,11 @@ std::unique_ptr<scalar> min(
* @param mr Device memory resource used to allocate the returned scalar's device memory
* @return Maximum element as scalar of type `output_dtype`
*/
std::unique_ptr<scalar> max(
column_view const& col,
data_type const output_dtype,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<scalar> max(column_view const& col,
data_type const output_dtype,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @brief Computes any of elements in input column is true when typecasted to bool
Expand All @@ -106,12 +104,11 @@ std::unique_ptr<scalar> max(
* @param mr Device memory resource used to allocate the returned scalar's device memory
* @return bool scalar if any of elements is true when typecasted to bool
*/
std::unique_ptr<scalar> any(
column_view const& col,
data_type const output_dtype,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<scalar> any(column_view const& col,
data_type const output_dtype,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @brief Computes all of elements in input column is true when typecasted to bool
Expand All @@ -128,12 +125,11 @@ std::unique_ptr<scalar> any(
* @param mr Device memory resource used to allocate the returned scalar's device memory
* @return bool scalar if all of elements is true when typecasted to bool
*/
std::unique_ptr<scalar> all(
column_view const& col,
data_type const output_dtype,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<scalar> all(column_view const& col,
data_type const output_dtype,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @brief Computes product of elements in input column
Expand All @@ -150,12 +146,11 @@ std::unique_ptr<scalar> all(
* @param mr Device memory resource used to allocate the returned scalar's device memory
* @return Product as scalar of type `output_dtype`
*/
std::unique_ptr<scalar> product(
column_view const& col,
data_type const output_dtype,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<scalar> product(column_view const& col,
data_type const output_dtype,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @brief Computes sum of squares of elements in input column
Expand All @@ -171,11 +166,10 @@ std::unique_ptr<scalar> product(
* @param mr Device memory resource used to allocate the returned scalar's device memory
* @return Sum of squares as scalar of type `output_dtype`
*/
std::unique_ptr<scalar> sum_of_squares(
column_view const& col,
data_type const output_dtype,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<scalar> sum_of_squares(column_view const& col,
data_type const output_dtype,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @brief Computes mean of elements in input column
Expand All @@ -191,11 +185,10 @@ std::unique_ptr<scalar> sum_of_squares(
* @param mr Device memory resource used to allocate the returned scalar's device memory
* @return Mean as scalar of type `output_dtype`
*/
std::unique_ptr<scalar> mean(
column_view const& col,
data_type const output_dtype,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<scalar> mean(column_view const& col,
data_type const output_dtype,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @brief Computes variance of elements in input column
Expand All @@ -213,12 +206,11 @@ std::unique_ptr<scalar> mean(
* @param mr Device memory resource used to allocate the returned scalar's device memory
* @return Variance as scalar of type `output_dtype`
*/
std::unique_ptr<scalar> variance(
column_view const& col,
data_type const output_dtype,
size_type ddof,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<scalar> variance(column_view const& col,
data_type const output_dtype,
size_type ddof,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @brief Computes standard deviation of elements in input column
Expand All @@ -236,12 +228,11 @@ std::unique_ptr<scalar> variance(
* @param mr Device memory resource used to allocate the returned scalar's device memory
* @return Standard deviation as scalar of type `output_dtype`
*/
std::unique_ptr<scalar> standard_deviation(
column_view const& col,
data_type const output_dtype,
size_type ddof,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<scalar> standard_deviation(column_view const& col,
data_type const output_dtype,
size_type ddof,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @brief Returns nth element in input column
Expand All @@ -267,12 +258,11 @@ std::unique_ptr<scalar> standard_deviation(
* @param mr Device memory resource used to allocate the returned scalar's device memory
* @return nth element as scalar
*/
std::unique_ptr<scalar> nth_element(
column_view const& col,
size_type n,
null_policy null_handling,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<scalar> nth_element(column_view const& col,
size_type n,
null_policy null_handling,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @brief Collect input column into a (list) scalar
Expand All @@ -283,11 +273,10 @@ std::unique_ptr<scalar> nth_element(
* @param mr Device memory resource used to allocate the returned scalar's device memory
* @return collected list as scalar
*/
std::unique_ptr<scalar> collect_list(
column_view const& col,
null_policy null_handling,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<scalar> collect_list(column_view const& col,
null_policy null_handling,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @brief Merge a bunch of list scalars into single list scalar
Expand All @@ -297,10 +286,9 @@ std::unique_ptr<scalar> collect_list(
* @param mr Device memory resource used to allocate the returned scalar's device memory
* @return merged list as scalar
*/
std::unique_ptr<scalar> merge_lists(
lists_column_view const& col,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<scalar> merge_lists(lists_column_view const& col,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @brief Collect input column into a (list) scalar without duplicated elements
Expand All @@ -313,13 +301,12 @@ std::unique_ptr<scalar> merge_lists(
* @param mr Device memory resource used to allocate the returned scalar's device memory
* @return collected list with unique elements as scalar
*/
std::unique_ptr<scalar> collect_set(
column_view const& col,
null_policy null_handling,
null_equality nulls_equal,
nan_equality nans_equal,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<scalar> collect_set(column_view const& col,
null_policy null_handling,
null_equality nulls_equal,
nan_equality nans_equal,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

/**
* @brief Merge a bunch of list scalars into single list scalar then drop duplicated elements
Expand All @@ -331,12 +318,12 @@ std::unique_ptr<scalar> collect_set(
* @param mr Device memory resource used to allocate the returned scalar's device memory
* @return collected list with unique elements as scalar
*/
std::unique_ptr<scalar> merge_sets(
lists_column_view const& col,
null_equality nulls_equal,
nan_equality nans_equal,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
std::unique_ptr<scalar> merge_sets(lists_column_view const& col,
null_equality nulls_equal,
nan_equality nans_equal,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);

} // namespace detail
} // namespace reduction
} // namespace cudf
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -26,6 +26,7 @@

namespace cudf {
namespace reduction {
namespace detail {
// intermediate data structure to compute `var`, `std`
template <typename ResultType>
struct var_std {
Expand Down Expand Up @@ -244,7 +245,7 @@ struct variance : public compound_op<variance> {
using op = cudf::DeviceSum;

template <typename ResultType>
using transformer = cudf::reduction::transformer_var_std<ResultType>;
using transformer = cudf::reduction::detail::transformer_var_std<ResultType>;

template <typename ResultType>
struct intermediate {
Expand All @@ -270,7 +271,7 @@ struct standard_deviation : public compound_op<standard_deviation> {
using op = cudf::DeviceSum;

template <typename ResultType>
using transformer = cudf::reduction::transformer_var_std<ResultType>;
using transformer = cudf::reduction::detail::transformer_var_std<ResultType>;

template <typename ResultType>
struct intermediate {
Expand All @@ -288,7 +289,7 @@ struct standard_deviation : public compound_op<standard_deviation> {
};
};
};

} // namespace op
} // namespace detail
} // namespace reduction
} // namespace cudf
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

#pragma once

#include <cudf/detail/reduction_operators.cuh>
#include "reduction_operators.cuh"

#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
Expand Down
Loading