From d3bee47118ac3d88c8d33d06055c5423d6270c70 Mon Sep 17 00:00:00 2001 From: Michael Schellenberger Costa Date: Mon, 18 Mar 2024 17:56:51 +0100 Subject: [PATCH] Replace usages of `thrust::optional` with `cuda::std::optional` --- .../cudf/ast/detail/expression_evaluator.cuh | 8 ++-- cpp/include/cudf/ast/detail/operators.hpp | 4 +- .../cudf/column/column_device_view.cuh | 14 +++--- cpp/include/cudf/detail/copy_if_else.cuh | 4 +- cpp/include/cudf/detail/indexalator.cuh | 12 ++--- cpp/include/cudf/detail/iterator.cuh | 12 ++--- cpp/include/cudf/json/json.hpp | 4 +- .../strings/detail/convert/fixed_point.cuh | 8 ++-- .../cudf/strings/detail/copy_if_else.cuh | 6 +-- .../cudf/table/experimental/row_operators.cuh | 6 +-- cpp/src/binaryop/binaryop.cpp | 4 +- cpp/src/io/json/legacy/json_gpu.cu | 6 +-- cpp/src/io/json/legacy/json_gpu.hpp | 4 +- cpp/src/io/json/legacy/reader_impl.cu | 2 +- cpp/src/io/orc/orc.hpp | 7 +-- cpp/src/io/orc/writer_impl.cu | 6 +-- .../io/parquet/compact_protocol_reader.cpp | 8 ++-- cpp/src/io/parquet/parquet.hpp | 48 +++++++++---------- cpp/src/io/parquet/parquet_gpu.hpp | 18 +++---- cpp/src/io/parquet/predicate_pushdown.cpp | 2 +- cpp/src/io/parquet/reader_impl_chunking.cu | 2 +- cpp/src/io/parquet/reader_impl_helpers.cpp | 2 +- cpp/src/io/parquet/writer_impl.cu | 8 ++-- cpp/src/json/json_path.cu | 22 ++++----- cpp/src/lists/contains.cu | 2 +- cpp/src/lists/explode.cu | 14 +++--- cpp/src/strings/convert/convert_datetime.cu | 10 ++-- .../strings/convert/convert_fixed_point.cu | 2 +- cpp/src/strings/regex/regex.cuh | 4 +- cpp/src/strings/regex/regex.inl | 6 +-- cpp/src/strings/replace/multi.cu | 6 +-- cpp/src/strings/replace/multi_re.cu | 2 +- cpp/src/transform/row_bit_count.cu | 18 +++---- cpp/tests/io/parquet_common.cpp | 2 +- cpp/tests/io/parquet_common.hpp | 2 +- cpp/tests/iterator/indexalator_test.cu | 11 +++-- cpp/tests/iterator/offsetalator_test.cu | 4 +- cpp/tests/iterator/optional_iterator_test.cuh | 25 +++++----- .../optional_iterator_test_numeric.cu | 10 ++-- 39 files changed, 169 insertions(+), 166 deletions(-) diff --git a/cpp/include/cudf/ast/detail/expression_evaluator.cuh b/cpp/include/cudf/ast/detail/expression_evaluator.cuh index 105d87ff96f..683f45fec58 100644 --- a/cpp/include/cudf/ast/detail/expression_evaluator.cuh +++ b/cpp/include/cudf/ast/detail/expression_evaluator.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2023, NVIDIA CORPORATION. + * Copyright (c) 2021-2024, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -29,7 +29,7 @@ #include -#include +#include namespace cudf { @@ -278,7 +278,7 @@ struct expression_evaluator { detail::device_data_reference const& input_reference, IntermediateDataType* thread_intermediate_storage, cudf::size_type left_row_index, - thrust::optional right_row_index = {}) const + cuda::std::optional right_row_index = {}) const { // TODO: Everywhere in the code assumes that the table reference is either // left or right. Should we error-check somewhere to prevent @@ -329,7 +329,7 @@ struct expression_evaluator { detail::device_data_reference const& device_data_reference, IntermediateDataType* thread_intermediate_storage, cudf::size_type left_row_index, - thrust::optional right_row_index = {}) const + cuda::std::optional right_row_index = {}) const { CUDF_UNREACHABLE("Unsupported type in resolve_input."); } diff --git a/cpp/include/cudf/ast/detail/operators.hpp b/cpp/include/cudf/ast/detail/operators.hpp index b618f33a6e5..03e3c02bb4d 100644 --- a/cpp/include/cudf/ast/detail/operators.hpp +++ b/cpp/include/cudf/ast/detail/operators.hpp @@ -20,8 +20,8 @@ #include #include +#include #include -#include #include #include @@ -41,7 +41,7 @@ struct possibly_null_value; template struct possibly_null_value { - using type = thrust::optional; + using type = cuda::std::optional; }; template diff --git a/cpp/include/cudf/column/column_device_view.cuh b/cpp/include/cudf/column/column_device_view.cuh index 19722d127cb..4a9f060ab46 100644 --- a/cpp/include/cudf/column/column_device_view.cuh +++ b/cpp/include/cudf/column/column_device_view.cuh @@ -32,9 +32,9 @@ #include +#include #include #include -#include #include #include @@ -614,7 +614,7 @@ class alignas(16) column_device_view : public detail::column_device_view_base { /** * @brief Return an optional iterator to the first element of the column. * - * Dereferencing the returned iterator returns a `thrust::optional`. + * Dereferencing the returned iterator returns a `cuda::std::optional`. * * The element of this iterator contextually converts to bool. The conversion returns true * if the object contains a value and false if it does not contain a value. @@ -739,7 +739,7 @@ class alignas(16) column_device_view : public detail::column_device_view_base { /** * @brief Return an optional iterator to the element following the last element of the column. * - * The returned iterator represents a `thrust::optional` element. + * The returned iterator represents a `cuda::std::optional` element. * * This function does not participate in overload resolution if * `column_device_view::has_element_accessor()` is false. @@ -1318,13 +1318,13 @@ struct optional_accessor { * @return A `thrust::optional` that contains the value of `column[i]` is not null. If that * element is null, the resulting optional will not contain a value. */ - __device__ inline thrust::optional operator()(cudf::size_type i) const + __device__ inline cuda::std::optional operator()(cudf::size_type i) const { if (has_nulls) { - return (col.is_valid_nocheck(i)) ? thrust::optional{col.element(i)} - : thrust::optional{thrust::nullopt}; + return (col.is_valid_nocheck(i)) ? cuda::std::optional{col.element(i)} + : cuda::std::optional{cuda::std::nullopt}; } - return thrust::optional{col.element(i)}; + return cuda::std::optional{col.element(i)}; } Nullate has_nulls{}; ///< Indicates if the `col` should be checked for nulls. diff --git a/cpp/include/cudf/detail/copy_if_else.cuh b/cpp/include/cudf/detail/copy_if_else.cuh index 6162fa5ecf1..01d5deb213d 100644 --- a/cpp/include/cudf/detail/copy_if_else.cuh +++ b/cpp/include/cudf/detail/copy_if_else.cuh @@ -24,8 +24,8 @@ #include +#include #include -#include namespace cudf { namespace detail { @@ -68,7 +68,7 @@ __launch_bounds__(block_size) CUDF_KERNEL size_type index = tid; while (warp_cur <= warp_end) { auto const opt_value = - (index < end) ? (filter(index) ? lhs[index] : rhs[index]) : thrust::nullopt; + (index < end) ? (filter(index) ? lhs[index] : rhs[index]) : cuda::std::nullopt; if (opt_value) { out.element(index) = static_cast(*opt_value); } // update validity diff --git a/cpp/include/cudf/detail/indexalator.cuh b/cpp/include/cudf/detail/indexalator.cuh index b5d57da6cd5..ec7b1c3e6b6 100644 --- a/cpp/include/cudf/detail/indexalator.cuh +++ b/cpp/include/cudf/detail/indexalator.cuh @@ -22,9 +22,9 @@ #include #include +#include #include #include -#include #include namespace cudf { @@ -376,10 +376,10 @@ struct indexalator_factory { iter = make_input_iterator(col); } - __device__ thrust::optional operator()(size_type i) const + __device__ cuda::std::optional operator()(size_type i) const { - return has_nulls && !bit_is_set(null_mask, i + offset) ? thrust::nullopt - : thrust::make_optional(iter[i]); + return has_nulls && !bit_is_set(null_mask, i + offset) ? cuda::std::nullopt + : cuda::std::make_optional(iter[i]); } }; @@ -400,9 +400,9 @@ struct indexalator_factory { iter = indexalator_factory::make_input_iterator(input); } - __device__ thrust::optional operator()(size_type) const + __device__ cuda::std::optional operator()(size_type) const { - return is_null ? thrust::nullopt : thrust::make_optional(*iter); + return is_null ? cuda::std::nullopt : cuda::std::make_optional(*iter); } }; diff --git a/cpp/include/cudf/detail/iterator.cuh b/cpp/include/cudf/detail/iterator.cuh index 9e6227ec19b..021531fa1e4 100644 --- a/cpp/include/cudf/detail/iterator.cuh +++ b/cpp/include/cudf/detail/iterator.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2023, NVIDIA CORPORATION. + * Copyright (c) 2019-2024, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -37,10 +37,10 @@ #include #include +#include #include #include #include -#include #include #include @@ -186,7 +186,7 @@ auto make_null_replacement_iterator(column_device_view const& column, /** * @brief Constructs an optional iterator over a column's values and its validity. * - * Dereferencing the returned iterator returns a `thrust::optional`. + * Dereferencing the returned iterator returns a `cuda::std::optional`. * * The element of this iterator contextually converts to bool. The conversion returns true * if the object contains a value and false if it does not contain a value. @@ -418,7 +418,7 @@ auto inline make_scalar_iterator(scalar const& scalar_value) template struct scalar_optional_accessor : public scalar_value_accessor { using super_t = scalar_value_accessor; - using value_type = thrust::optional; + using value_type = cuda::std::optional; scalar_optional_accessor(scalar const& scalar_value, Nullate with_nulls) : scalar_value_accessor(scalar_value), has_nulls{with_nulls} @@ -427,7 +427,7 @@ struct scalar_optional_accessor : public scalar_value_accessor { __device__ inline value_type const operator()(size_type) const { - if (has_nulls && !super_t::dscalar.is_valid()) { return value_type{thrust::nullopt}; } + if (has_nulls && !super_t::dscalar.is_valid()) { return value_type{cuda::std::nullopt}; } if constexpr (cudf::is_fixed_point()) { using namespace numeric; @@ -519,7 +519,7 @@ struct scalar_representation_pair_accessor : public scalar_value_accessor`. + * Dereferencing the returned iterator returns a `cuda::std::optional`. * * The element of this iterator contextually converts to bool. The conversion returns true * if the object contains a value and false if it does not contain a value. diff --git a/cpp/include/cudf/json/json.hpp b/cpp/include/cudf/json/json.hpp index 944e0c26dd6..1fec8c49da8 100644 --- a/cpp/include/cudf/json/json.hpp +++ b/cpp/include/cudf/json/json.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2023, NVIDIA CORPORATION. + * Copyright (c) 2021-2024, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,7 +20,7 @@ #include -#include +#include namespace cudf { diff --git a/cpp/include/cudf/strings/detail/convert/fixed_point.cuh b/cpp/include/cudf/strings/detail/convert/fixed_point.cuh index 5f51da967d3..8440805960e 100644 --- a/cpp/include/cudf/strings/detail/convert/fixed_point.cuh +++ b/cpp/include/cudf/strings/detail/convert/fixed_point.cuh @@ -17,8 +17,8 @@ #include +#include #include -#include #include namespace cudf { @@ -88,7 +88,7 @@ __device__ inline thrust::pair parse_integer( * @return Integer value of the exponent */ template -__device__ thrust::optional parse_exponent(char const* iter, char const* iter_end) +__device__ cuda::std::optional parse_exponent(char const* iter, char const* iter_end) { constexpr uint32_t exponent_max = static_cast(std::numeric_limits::max()); @@ -105,12 +105,12 @@ __device__ thrust::optional parse_exponent(char const* iter, char const while (iter < iter_end) { auto const ch = *iter++; if (ch < '0' || ch > '9') { - if (check_only) { return thrust::nullopt; } + if (check_only) { return cuda::std::nullopt; } break; } uint32_t exp_check = static_cast(exp_ten * 10) + static_cast(ch - '0'); - if (check_only && (exp_check > exponent_max)) { return thrust::nullopt; } // check overflow + if (check_only && (exp_check > exponent_max)) { return cuda::std::nullopt; } // check overflow exp_ten = static_cast(exp_check); } diff --git a/cpp/include/cudf/strings/detail/copy_if_else.cuh b/cpp/include/cudf/strings/detail/copy_if_else.cuh index 08ba99e90d8..b13abe480d0 100644 --- a/cpp/include/cudf/strings/detail/copy_if_else.cuh +++ b/cpp/include/cudf/strings/detail/copy_if_else.cuh @@ -24,8 +24,8 @@ #include #include +#include #include -#include #include namespace cudf { @@ -40,9 +40,9 @@ namespace detail { * ``` * * @tparam StringIterLeft A random access iterator whose value_type is - * `thrust::optional` where the `optional` has a value iff the element is valid. + * `cuda::std::optional` where the `optional` has a value iff the element is valid. * @tparam StringIterRight A random access iterator whose value_type is - * `thrust::optional` where the `optional` has a value iff the element is valid. + * `cuda::std::optional` where the `optional` has a value iff the element is valid. * @tparam Filter Functor that takes an index and returns a boolean. * * @param lhs_begin Start of first set of data. Used when `filter_fn` returns true. diff --git a/cpp/include/cudf/table/experimental/row_operators.cuh b/cpp/include/cudf/table/experimental/row_operators.cuh index e9b81a525fc..a93ffcf4c83 100644 --- a/cpp/include/cudf/table/experimental/row_operators.cuh +++ b/cpp/include/cudf/table/experimental/row_operators.cuh @@ -211,7 +211,7 @@ struct sorting_physical_element_comparator { } }; -using optional_dremel_view = thrust::optional; +using optional_dremel_view = cuda::std::optional; // The has_nested_columns template parameter of the device_row_comparator is // necessary to help the compiler optimize our code. Without it, the list and @@ -223,12 +223,12 @@ using optional_dremel_view = thrust::optional; // std::optional> in the // preprocessed_table/device_row_comparator (which is always valid when // has_nested_columns and is otherwise invalid) that is then unpacked to a -// thrust::optional at the element_comparator level (which +// cuda::std::optional at the element_comparator level (which // is always valid for a list column and otherwise invalid). We cannot use an // additional template parameter for the element_comparator on a per-column // basis because we cannot conditionally define dremel_device_view member // variables without jumping through extra hoops with inheritance, so the -// thrust::optional member must be an optional rather than +// cuda::std::optional member must be an optional rather than // a raw dremel_device_view. /** * @brief Computes the lexicographic comparison between 2 rows. diff --git a/cpp/src/binaryop/binaryop.cpp b/cpp/src/binaryop/binaryop.cpp index be91c3b4d08..eae852d0261 100644 --- a/cpp/src/binaryop/binaryop.cpp +++ b/cpp/src/binaryop/binaryop.cpp @@ -40,7 +40,7 @@ #include -#include +#include #include @@ -166,7 +166,7 @@ template void fixed_point_binary_operation_validation(binary_operator op, Lhs lhs, Rhs rhs, - thrust::optional output_type = {}) + cuda::std::optional output_type = {}) { CUDF_EXPECTS((is_fixed_point(lhs) or is_fixed_point(rhs)), "One of the inputs must have fixed_point data_type."); diff --git a/cpp/src/io/json/legacy/json_gpu.cu b/cpp/src/io/json/legacy/json_gpu.cu index ff4845fcecb..eceac2a6683 100644 --- a/cpp/src/io/json/legacy/json_gpu.cu +++ b/cpp/src/io/json/legacy/json_gpu.cu @@ -33,6 +33,7 @@ #include #include +#include #include #include #include @@ -40,7 +41,6 @@ #include #include #include -#include #include using cudf::device_span; @@ -484,7 +484,7 @@ CUDF_KERNEL void collect_keys_info_kernel(parse_options_view const options, device_span const data, device_span const row_offsets, unsigned long long int* keys_cnt, - thrust::optional keys_info) + cuda::std::optional keys_info) { auto const rec_id = grid_1d::global_thread_id(); if (rec_id >= row_offsets.size()) return; @@ -595,7 +595,7 @@ void collect_keys_info(parse_options_view const& options, device_span const data, device_span const row_offsets, unsigned long long int* keys_cnt, - thrust::optional keys_info, + cuda::std::optional keys_info, rmm::cuda_stream_view stream) { int block_size; diff --git a/cpp/src/io/json/legacy/json_gpu.hpp b/cpp/src/io/json/legacy/json_gpu.hpp index 853e30c9427..1565bc164c4 100644 --- a/cpp/src/io/json/legacy/json_gpu.hpp +++ b/cpp/src/io/json/legacy/json_gpu.hpp @@ -26,7 +26,7 @@ #include -#include +#include using cudf::device_span; @@ -93,7 +93,7 @@ void collect_keys_info(parse_options_view const& options, device_span data, device_span row_offsets, unsigned long long int* keys_cnt, - thrust::optional keys_info, + cuda::std::optional keys_info, rmm::cuda_stream_view stream); } // namespace cudf::io::json::detail::legacy diff --git a/cpp/src/io/json/legacy/reader_impl.cu b/cpp/src/io/json/legacy/reader_impl.cu index f9d0f6895b9..e47ebf5c2a7 100644 --- a/cpp/src/io/json/legacy/reader_impl.cu +++ b/cpp/src/io/json/legacy/reader_impl.cu @@ -40,12 +40,12 @@ #include #include +#include #include #include #include #include #include -#include #include #include #include diff --git a/cpp/src/io/orc/orc.hpp b/cpp/src/io/orc/orc.hpp index 88bd260a598..cd1c279ee2d 100644 --- a/cpp/src/io/orc/orc.hpp +++ b/cpp/src/io/orc/orc.hpp @@ -24,7 +24,7 @@ #include #include -#include +#include #include #include @@ -692,11 +692,12 @@ class metadata { * @brief `column_device_view` and additional, ORC specific, information on the column. */ struct orc_column_device_view : public column_device_view { - __device__ orc_column_device_view(column_device_view col, thrust::optional parent_idx) + __device__ orc_column_device_view(column_device_view col, + cuda::std::optional parent_idx) : column_device_view{col}, parent_index{parent_idx} { } - thrust::optional parent_index; + cuda::std::optional parent_index; bitmask_type const* pushdown_mask = nullptr; }; diff --git a/cpp/src/io/orc/writer_impl.cu b/cpp/src/io/orc/writer_impl.cu index ade0e75de35..8d1dbb2e3a7 100644 --- a/cpp/src/io/orc/writer_impl.cu +++ b/cpp/src/io/orc/writer_impl.cu @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -50,7 +51,6 @@ #include #include #include -#include #include #include #include @@ -1817,7 +1817,7 @@ orc_table_view make_orc_table_view(table_view const& table, type_kinds, stream, rmm::mr::get_current_device_resource()); rmm::device_uvector d_orc_columns(orc_columns.size(), stream); - using stack_value_type = thrust::pair>; + using stack_value_type = thrust::pair>; rmm::device_uvector stack_storage(orc_columns.size(), stream); // pre-order append ORC device columns @@ -1833,7 +1833,7 @@ orc_table_view make_orc_table_view(table_view const& table, thrust::make_reverse_iterator(d_table.end()), thrust::make_reverse_iterator(d_table.begin()), [&stack](column_device_view const& c) { - stack.push({&c, thrust::nullopt}); + stack.push({&c, cuda::std::nullopt}); }); uint32_t idx = 0; diff --git a/cpp/src/io/parquet/compact_protocol_reader.cpp b/cpp/src/io/parquet/compact_protocol_reader.cpp index d39d832c18c..c593a5dd6d1 100644 --- a/cpp/src/io/parquet/compact_protocol_reader.cpp +++ b/cpp/src/io/parquet/compact_protocol_reader.cpp @@ -300,10 +300,10 @@ class parquet_field_struct : public parquet_field { template class parquet_field_union_struct : public parquet_field { E& enum_val; - thrust::optional& val; // union structs are always wrapped in std::optional + cuda::std::optional& val; // union structs are always wrapped in std::optional public: - parquet_field_union_struct(int f, E& ev, thrust::optional& v) + parquet_field_union_struct(int f, E& ev, cuda::std::optional& v) : parquet_field(f), enum_val(ev), val(v) { } @@ -427,10 +427,10 @@ class parquet_field_struct_blob : public parquet_field { */ template class parquet_field_optional : public parquet_field { - thrust::optional& val; + cuda::std::optional& val; public: - parquet_field_optional(int f, thrust::optional& v) : parquet_field(f), val(v) {} + parquet_field_optional(int f, cuda::std::optional& v) : parquet_field(f), val(v) {} inline void operator()(CompactProtocolReader* cpr, int field_type) { diff --git a/cpp/src/io/parquet/parquet.hpp b/cpp/src/io/parquet/parquet.hpp index 08f9fae145b..be788fdb8be 100644 --- a/cpp/src/io/parquet/parquet.hpp +++ b/cpp/src/io/parquet/parquet.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2023, NVIDIA CORPORATION. + * Copyright (c) 2018-2024, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,7 +20,7 @@ #include -#include +#include #include #include @@ -94,10 +94,10 @@ struct LogicalType { BSON }; Type type; - thrust::optional decimal_type; - thrust::optional time_type; - thrust::optional timestamp_type; - thrust::optional int_type; + cuda::std::optional decimal_type; + cuda::std::optional time_type; + cuda::std::optional timestamp_type; + cuda::std::optional int_type; LogicalType(Type tp = UNDEFINED) : type(tp) {} LogicalType(DecimalType&& dt) : type(DECIMAL), decimal_type(dt) {} @@ -170,15 +170,15 @@ struct SchemaElement { // 5: nested fields int32_t num_children = 0; // 6: DEPRECATED: record the original type before conversion to parquet type - thrust::optional converted_type; + cuda::std::optional converted_type; // 7: DEPRECATED: record the scale for DECIMAL converted type int32_t decimal_scale = 0; // 8: DEPRECATED: record the precision for DECIMAL converted type int32_t decimal_precision = 0; // 9: save field_id from original schema - thrust::optional field_id; + cuda::std::optional field_id; // 10: replaces converted type - thrust::optional logical_type; + cuda::std::optional logical_type; // extra cudf specific fields bool output_as_byte_array = false; @@ -248,17 +248,17 @@ struct SchemaElement { */ struct Statistics { // deprecated max value in signed comparison order - thrust::optional> max; + cuda::std::optional> max; // deprecated min value in signed comparison order - thrust::optional> min; + cuda::std::optional> min; // count of null values in the column - thrust::optional null_count; + cuda::std::optional null_count; // count of distinct values occurring - thrust::optional distinct_count; + cuda::std::optional distinct_count; // max value for column determined by ColumnOrder - thrust::optional> max_value; + cuda::std::optional> max_value; // min value for column determined by ColumnOrder - thrust::optional> min_value; + cuda::std::optional> min_value; }; /** @@ -267,7 +267,7 @@ struct Statistics { struct SizeStatistics { // Number of variable-width bytes stored for the page/chunk. Should not be set for anything // but the BYTE_ARRAY physical type. - thrust::optional unencoded_byte_array_data_bytes; + cuda::std::optional unencoded_byte_array_data_bytes; /** * When present, there is expected to be one element corresponding to each * repetition (i.e. size=max repetition_level+1) where each element @@ -276,14 +276,14 @@ struct SizeStatistics { * * This value should not be written if max_repetition_level is 0. */ - thrust::optional> repetition_level_histogram; + cuda::std::optional> repetition_level_histogram; /** * Same as repetition_level_histogram except for definition levels. * * This value should not be written if max_definition_level is 0 or 1. */ - thrust::optional> definition_level_histogram; + cuda::std::optional> definition_level_histogram; }; /** @@ -304,7 +304,7 @@ struct OffsetIndex { std::vector page_locations; // per-page size info. see description of the same field in SizeStatistics. only present for // columns with a BYTE_ARRAY physical type. - thrust::optional> unencoded_byte_array_data_bytes; + cuda::std::optional> unencoded_byte_array_data_bytes; }; /** @@ -316,10 +316,10 @@ struct ColumnIndex { std::vector> max_values; // upper bound for values in each page BoundaryOrder boundary_order = BoundaryOrder::UNORDERED; // Indicates if min and max values are ordered - thrust::optional> null_counts; // Optional count of null values per page + cuda::std::optional> null_counts; // Optional count of null values per page // Repetition/definition level histograms for the column chunk - thrust::optional> repetition_level_histogram; - thrust::optional> definition_level_histogram; + cuda::std::optional> repetition_level_histogram; + cuda::std::optional> definition_level_histogram; }; /** @@ -340,7 +340,7 @@ struct ColumnChunkMetaData { int64_t dictionary_page_offset = 0; // Byte offset from the beginning of file to first (only) dictionary page Statistics statistics; // Encoded chunk-level statistics - thrust::optional size_statistics; // Size statistics for the chunk + cuda::std::optional size_statistics; // Size statistics for the chunk }; /** @@ -401,7 +401,7 @@ struct FileMetaData { std::vector row_groups; std::vector key_value_metadata; std::string created_by = ""; - thrust::optional> column_orders; + cuda::std::optional> column_orders; }; /** diff --git a/cpp/src/io/parquet/parquet_gpu.hpp b/cpp/src/io/parquet/parquet_gpu.hpp index 82ccb2b314a..b6bc3de11cb 100644 --- a/cpp/src/io/parquet/parquet_gpu.hpp +++ b/cpp/src/io/parquet/parquet_gpu.hpp @@ -381,7 +381,7 @@ struct ColumnChunkDesc { uint8_t rep_level_bits_, int8_t codec_, int8_t converted_type_, - thrust::optional logical_type_, + cuda::std::optional logical_type_, int8_t decimal_precision_, int32_t ts_clock_rate_, int32_t src_col_index_, @@ -429,14 +429,14 @@ struct ColumnChunkDesc { int32_t num_data_pages{}; // number of data pages int32_t num_dict_pages{}; // number of dictionary pages PageInfo const* dict_page{}; - string_index_pair* str_dict_index{}; // index for string dictionary - bitmask_type** valid_map_base{}; // base pointers of valid bit map for this column - void** column_data_base{}; // base pointers of column data - void** column_string_base{}; // base pointers of column string data - int8_t codec{}; // compressed codec enum - int8_t converted_type{}; // converted type enum - thrust::optional logical_type{}; // logical type - int8_t decimal_precision{}; // Decimal precision + string_index_pair* str_dict_index{}; // index for string dictionary + bitmask_type** valid_map_base{}; // base pointers of valid bit map for this column + void** column_data_base{}; // base pointers of column data + void** column_string_base{}; // base pointers of column string data + int8_t codec{}; // compressed codec enum + int8_t converted_type{}; // converted type enum + cuda::std::optional logical_type{}; // logical type + int8_t decimal_precision{}; // Decimal precision int32_t ts_clock_rate{}; // output timestamp clock frequency (0=default, 1000=ms, 1000000000=ns) int32_t src_col_index{}; // my input column index diff --git a/cpp/src/io/parquet/predicate_pushdown.cpp b/cpp/src/io/parquet/predicate_pushdown.cpp index f43a8fd24c4..fc002271d11 100644 --- a/cpp/src/io/parquet/predicate_pushdown.cpp +++ b/cpp/src/io/parquet/predicate_pushdown.cpp @@ -151,7 +151,7 @@ struct stats_caster { } void set_index(size_type index, - thrust::optional> const& binary_value, + cuda::std::optional> const& binary_value, Type const type) { if (binary_value.has_value()) { diff --git a/cpp/src/io/parquet/reader_impl_chunking.cu b/cpp/src/io/parquet/reader_impl_chunking.cu index 9c14902ef2f..aa81e8c1489 100644 --- a/cpp/src/io/parquet/reader_impl_chunking.cu +++ b/cpp/src/io/parquet/reader_impl_chunking.cu @@ -378,7 +378,7 @@ int64_t find_next_split(int64_t cur_pos, type_id column_type_id, type_id timestamp_type_id, Type physical, - thrust::optional converted, + cuda::std::optional converted, int32_t length) { int32_t type_width = (physical == FIXED_LEN_BYTE_ARRAY) ? length : 0; diff --git a/cpp/src/io/parquet/reader_impl_helpers.cpp b/cpp/src/io/parquet/reader_impl_helpers.cpp index 776caa99ac9..b9ae7f84241 100644 --- a/cpp/src/io/parquet/reader_impl_helpers.cpp +++ b/cpp/src/io/parquet/reader_impl_helpers.cpp @@ -25,7 +25,7 @@ namespace cudf::io::parquet::detail { namespace { -ConvertedType logical_type_to_converted_type(thrust::optional const& logical) +ConvertedType logical_type_to_converted_type(cuda::std::optional const& logical) { if (not logical.has_value()) { return UNKNOWN; } switch (logical->type) { diff --git a/cpp/src/io/parquet/writer_impl.cu b/cpp/src/io/parquet/writer_impl.cu index 5a8d96975ce..bbe396df858 100644 --- a/cpp/src/io/parquet/writer_impl.cu +++ b/cpp/src/io/parquet/writer_impl.cu @@ -174,7 +174,7 @@ struct aggregate_writer_metadata { std::vector> column_indexes; }; std::vector files; - thrust::optional> column_orders = thrust::nullopt; + cuda::std::optional> column_orders = cuda::std::nullopt; }; namespace { @@ -443,7 +443,7 @@ struct leaf_schema_fn { std::enable_if_t, void> operator()() { col_schema.type = (timestamp_is_int96) ? Type::INT96 : Type::INT64; - col_schema.converted_type = thrust::nullopt; + col_schema.converted_type = cuda::std::nullopt; col_schema.stats_dtype = statistics_dtype::dtype_timestamp64; if (timestamp_is_int96) { col_schema.ts_scale = -1000; // negative value indicates division by absolute value @@ -689,7 +689,7 @@ std::vector construct_schema_tree( schema_tree_node col_schema{}; col_schema.type = Type::BYTE_ARRAY; - col_schema.converted_type = thrust::nullopt; + col_schema.converted_type = cuda::std::nullopt; col_schema.stats_dtype = statistics_dtype::dtype_byte_array; col_schema.repetition_type = col_nullable ? OPTIONAL : REQUIRED; col_schema.name = (schema[parent_idx].name == "list") ? "element" : col_meta.get_name(); @@ -2660,7 +2660,7 @@ std::unique_ptr> writer::merge_row_group_metadata( // See https://github.com/rapidsai/cudf/pull/14264#issuecomment-1778311615 for (auto& se : md.schema) { if (se.logical_type.has_value() && se.logical_type.value().type == LogicalType::UNKNOWN) { - se.logical_type = thrust::nullopt; + se.logical_type = cuda::std::nullopt; } } diff --git a/cpp/src/json/json_path.cu b/cpp/src/json/json_path.cu index ff42d9c8620..b1eb3ff9c88 100644 --- a/cpp/src/json/json_path.cu +++ b/cpp/src/json/json_path.cu @@ -38,7 +38,7 @@ #include #include -#include +#include #include #include #include @@ -206,7 +206,7 @@ class parser { struct json_output { size_t output_max_len; char* output; - thrust::optional output_len; + cuda::std::optional output_len; __device__ void add_output(char const* str, size_t len) { @@ -655,7 +655,7 @@ class path_state : private parser { * @param stream Cuda stream to perform any gpu actions on * @returns A pair containing the command buffer, and maximum stack depth required. */ -std::pair>, int> build_command_buffer( +std::pair>, int> build_command_buffer( cudf::string_scalar const& json_path, rmm::cuda_stream_view stream) { std::string h_json_path = json_path.to_string(stream); @@ -689,8 +689,8 @@ std::pair>, int> build_comma } while (op.type != path_operator_type::END); auto const is_empty = h_operators.size() == 1 && h_operators[0].type == path_operator_type::END; - return is_empty ? std::pair(thrust::nullopt, 0) - : std::pair(thrust::make_optional(cudf::detail::make_device_uvector_sync( + return is_empty ? std::pair(cuda::std::nullopt, 0) + : std::pair(cuda::std::make_optional(cudf::detail::make_device_uvector_sync( h_operators, stream, rmm::mr::get_current_device_resource())), max_stack_depth); } @@ -919,9 +919,9 @@ __launch_bounds__(block_size) CUDF_KERNEL path_operator const* const commands, size_type* d_sizes, cudf::detail::input_offsetalator output_offsets, - thrust::optional out_buf, - thrust::optional out_validity, - thrust::optional out_valid_count, + cuda::std::optional out_buf, + cuda::std::optional out_validity, + cuda::std::optional out_valid_count, get_json_object_options options) { auto tid = cudf::detail::grid_1d::global_thread_id(); @@ -1011,9 +1011,9 @@ std::unique_ptr get_json_object(cudf::strings_column_view const& c std::get<0>(preprocess).value().data(), sizes.data(), d_offsets, - thrust::nullopt, - thrust::nullopt, - thrust::nullopt, + cuda::std::nullopt, + cuda::std::nullopt, + cuda::std::nullopt, options); // convert sizes to offsets diff --git a/cpp/src/lists/contains.cu b/cpp/src/lists/contains.cu index 378cf678f1f..7bba9ab5cd5 100644 --- a/cpp/src/lists/contains.cu +++ b/cpp/src/lists/contains.cu @@ -32,12 +32,12 @@ #include #include +#include #include #include #include #include #include -#include #include #include #include diff --git a/cpp/src/lists/explode.cu b/cpp/src/lists/explode.cu index 5f1d30321a2..eec01440d49 100644 --- a/cpp/src/lists/explode.cu +++ b/cpp/src/lists/explode.cu @@ -28,6 +28,7 @@ #include #include +#include #include #include #include @@ -35,7 +36,6 @@ #include #include #include -#include #include #include @@ -56,8 +56,8 @@ std::unique_ptr build_table( size_type const explode_column_idx, column_view const& sliced_child, cudf::device_span gather_map, - thrust::optional> explode_col_gather_map, - thrust::optional> position_array, + cuda::std::optional> explode_col_gather_map, + cuda::std::optional> position_array, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { @@ -142,8 +142,8 @@ std::unique_ptr
explode(table_view const& input_table, explode_column_idx, sliced_child, gather_map, - thrust::nullopt, - thrust::nullopt, + cuda::std::nullopt, + cuda::std::nullopt, stream, mr); } @@ -192,7 +192,7 @@ std::unique_ptr
explode_position(table_view const& input_table, explode_column_idx, sliced_child, gather_map, - thrust::nullopt, + cuda::std::nullopt, std::move(pos), stream, mr); @@ -291,7 +291,7 @@ std::unique_ptr
explode_outer(table_view const& input_table, sliced_child, gather_map, explode_col_gather_map, - include_position ? std::move(pos) : thrust::optional>{}, + include_position ? std::move(pos) : cuda::std::optional>{}, stream, mr); } diff --git a/cpp/src/strings/convert/convert_datetime.cu b/cpp/src/strings/convert/convert_datetime.cu index f54eb082959..505646c0d85 100644 --- a/cpp/src/strings/convert/convert_datetime.cu +++ b/cpp/src/strings/convert/convert_datetime.cu @@ -35,12 +35,12 @@ #include #include +#include #include #include #include #include #include -#include #include #include @@ -519,7 +519,7 @@ struct check_datetime_format { * The checking here is a little more strict than the actual * parser used for conversion. */ - __device__ thrust::optional check_string(string_view const& d_string) + __device__ cuda::std::optional check_string(string_view const& d_string) { timestamp_components dateparts = {1970, 1, 1, 0}; // init to epoch time @@ -529,7 +529,7 @@ struct check_datetime_format { // eliminate static character values first if (item.item_type == format_char_type::literal) { // check static character matches - if (*ptr != item.value) return thrust::nullopt; + if (*ptr != item.value) return cuda::std::nullopt; ptr += item.length; length -= item.length; continue; @@ -645,7 +645,7 @@ struct check_datetime_format { case 'Z': result = true; // skip default: break; } - if (!result) return thrust::nullopt; + if (!result) return cuda::std::nullopt; ptr += bytes_read; length -= bytes_read; } @@ -820,7 +820,7 @@ struct datetime_formatter_fn { // We only dissect the timestamp into components if needed // by a specifier. And then we only do it once and reuse it. // This can improve performance when not using uncommon specifiers. - thrust::optional days; + cuda::std::optional days; auto days_from_timestamp = [tstamp]() { auto const count = tstamp.time_since_epoch().count(); diff --git a/cpp/src/strings/convert/convert_fixed_point.cu b/cpp/src/strings/convert/convert_fixed_point.cu index fb8ebf55ef1..df03d250b3f 100644 --- a/cpp/src/strings/convert/convert_fixed_point.cu +++ b/cpp/src/strings/convert/convert_fixed_point.cu @@ -34,12 +34,12 @@ #include #include +#include #include #include #include #include #include -#include #include namespace cudf { diff --git a/cpp/src/strings/regex/regex.cuh b/cpp/src/strings/regex/regex.cuh index c8d846624f8..8f25da343b0 100644 --- a/cpp/src/strings/regex/regex.cuh +++ b/cpp/src/strings/regex/regex.cuh @@ -23,8 +23,8 @@ #include +#include #include -#include #include #include @@ -36,7 +36,7 @@ namespace detail { struct relist; using match_pair = thrust::pair; -using match_result = thrust::optional; +using match_result = cuda::std::optional; constexpr int32_t MAX_SHARED_MEM = 2048; ///< Memory size for storing prog instruction data constexpr std::size_t MAX_WORKING_MEM = 0x01'FFFF'FFFF; ///< Memory size for state data diff --git a/cpp/src/strings/regex/regex.inl b/cpp/src/strings/regex/regex.inl index ce12dc17aa4..9b028d6b660 100644 --- a/cpp/src/strings/regex/regex.inl +++ b/cpp/src/strings/regex/regex.inl @@ -252,12 +252,12 @@ __device__ __forceinline__ match_result reprog_device::regexec(string_view const switch (jnk.starttype) { case BOL: if (pos == 0) break; - if (jnk.startchar != '^') { return thrust::nullopt; } + if (jnk.startchar != '^') { return cuda::std::nullopt; } --pos; startchar = static_cast('\n'); case CHAR: { auto const fidx = dstr.find(startchar, pos); - if (fidx == string_view::npos) { return thrust::nullopt; } + if (fidx == string_view::npos) { return cuda::std::nullopt; } pos = fidx + (jnk.starttype == BOL); break; } @@ -388,7 +388,7 @@ __device__ __forceinline__ match_result reprog_device::regexec(string_view const checkstart = jnk.list1->get_size() == 0; } while (!last_character && (!checkstart || !match)); - return match ? match_result({begin, end}) : thrust::nullopt; + return match ? match_result({begin, end}) : cuda::std::nullopt; } __device__ __forceinline__ match_result reprog_device::find(int32_t const thread_idx, diff --git a/cpp/src/strings/replace/multi.cu b/cpp/src/strings/replace/multi.cu index 8b5a4317b50..2fd30daf63b 100644 --- a/cpp/src/strings/replace/multi.cu +++ b/cpp/src/strings/replace/multi.cu @@ -35,6 +35,7 @@ #include #include +#include #include #include #include @@ -42,7 +43,6 @@ #include #include #include -#include #include #include @@ -100,7 +100,7 @@ struct replace_multi_parallel_fn { * @param idx Index of the byte position in the chars column * @param chars_bytes Number of bytes in the chars column */ - __device__ thrust::optional has_target(size_type idx, size_type chars_bytes) const + __device__ cuda::std::optional has_target(size_type idx, size_type chars_bytes) const { auto const d_offsets = get_offsets_ptr(); auto const d_chars = get_base_ptr() + d_offsets[0] + idx; @@ -118,7 +118,7 @@ struct replace_multi_parallel_fn { if ((d_chars + d_tgt.size_bytes()) <= (d_str.data() + d_str.size_bytes())) { return t; } } } - return thrust::nullopt; + return cuda::std::nullopt; } /** diff --git a/cpp/src/strings/replace/multi_re.cu b/cpp/src/strings/replace/multi_re.cu index 743e5894112..931358adafd 100644 --- a/cpp/src/strings/replace/multi_re.cu +++ b/cpp/src/strings/replace/multi_re.cu @@ -90,7 +90,7 @@ struct replace_multi_regex_fn { } reprog_device prog = progs[ptn_idx]; - auto const result = !prog.is_empty() ? prog.find(idx, d_str, itr) : thrust::nullopt; + auto const result = !prog.is_empty() ? prog.find(idx, d_str, itr) : cuda::std::nullopt; d_ranges[ptn_idx] = result ? found_range{result->first, result->second} : found_range{nchars, nchars}; } diff --git a/cpp/src/transform/row_bit_count.cu b/cpp/src/transform/row_bit_count.cu index 78bd558501b..3e27b556335 100644 --- a/cpp/src/transform/row_bit_count.cu +++ b/cpp/src/transform/row_bit_count.cu @@ -33,8 +33,8 @@ #include #include +#include #include -#include #include namespace cudf { @@ -157,9 +157,9 @@ void flatten_hierarchy(ColIter begin, std::vector& info, hierarchy_info& h_info, rmm::cuda_stream_view stream, - size_type cur_depth = 0, - size_type cur_branch_depth = 0, - thrust::optional parent_index = {}); + size_type cur_depth = 0, + size_type cur_branch_depth = 0, + cuda::std::optional parent_index = {}); /** * @brief Type-dispatched functor called by flatten_hierarchy. @@ -175,7 +175,7 @@ struct flatten_functor { rmm::cuda_stream_view, size_type cur_depth, size_type cur_branch_depth, - thrust::optional) + cuda::std::optional) { out.push_back(col); info.push_back({cur_depth, cur_branch_depth, cur_branch_depth}); @@ -192,7 +192,7 @@ struct flatten_functor { rmm::cuda_stream_view, size_type cur_depth, size_type cur_branch_depth, - thrust::optional) + cuda::std::optional) { out.push_back(col); info.push_back({cur_depth, cur_branch_depth, cur_branch_depth}); @@ -208,7 +208,7 @@ struct flatten_functor { rmm::cuda_stream_view stream, size_type cur_depth, size_type cur_branch_depth, - thrust::optional parent_index) + cuda::std::optional parent_index) { // track branch depth as we reach this list and after we pass it auto const branch_depth_start = cur_branch_depth; @@ -241,7 +241,7 @@ struct flatten_functor { rmm::cuda_stream_view stream, size_type cur_depth, size_type cur_branch_depth, - thrust::optional) + cuda::std::optional) { out.push_back(col); info.push_back({cur_depth, cur_branch_depth, cur_branch_depth}); @@ -282,7 +282,7 @@ void flatten_hierarchy(ColIter begin, rmm::cuda_stream_view stream, size_type cur_depth, size_type cur_branch_depth, - thrust::optional parent_index) + cuda::std::optional parent_index) { std::for_each(begin, end, [&](column_view const& col) { cudf::type_dispatcher(col.type(), diff --git a/cpp/tests/io/parquet_common.cpp b/cpp/tests/io/parquet_common.cpp index b64cd230bc6..8a040e1a322 100644 --- a/cpp/tests/io/parquet_common.cpp +++ b/cpp/tests/io/parquet_common.cpp @@ -743,7 +743,7 @@ int32_t compare(T& v1, T& v2) int32_t compare_binary(std::vector const& v1, std::vector const& v2, cudf::io::parquet::detail::Type ptype, - thrust::optional const& ctype) + cuda::std::optional const& ctype) { auto ctype_val = ctype.value_or(cudf::io::parquet::detail::UNKNOWN); switch (ptype) { diff --git a/cpp/tests/io/parquet_common.hpp b/cpp/tests/io/parquet_common.hpp index 59ee85444f2..bc6145d77da 100644 --- a/cpp/tests/io/parquet_common.hpp +++ b/cpp/tests/io/parquet_common.hpp @@ -172,7 +172,7 @@ std::pair create_parquet_typed_with_stats(std::string int32_t compare_binary(std::vector const& v1, std::vector const& v2, cudf::io::parquet::detail::Type ptype, - thrust::optional const& ctype); + cuda::std::optional const& ctype); void expect_compression_stats_empty(std::shared_ptr stats); diff --git a/cpp/tests/iterator/indexalator_test.cu b/cpp/tests/iterator/indexalator_test.cu index 0c10853ec02..dac2356dcb0 100644 --- a/cpp/tests/iterator/indexalator_test.cu +++ b/cpp/tests/iterator/indexalator_test.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2023, NVIDIA CORPORATION. + * Copyright (c) 2021-2024, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,10 +20,10 @@ #include +#include #include #include #include -#include #include #include #include @@ -84,15 +84,16 @@ TYPED_TEST(IndexalatorTest, optional_iterator) auto d_col = cudf::test::fixed_width_column_wrapper( host_values.begin(), host_values.end(), validity.begin()); - auto expected_values = thrust::host_vector>(host_values.size()); + auto expected_values = + thrust::host_vector>(host_values.size()); std::transform(host_values.begin(), host_values.end(), validity.begin(), expected_values.begin(), [](T v, bool b) { - return (b) ? thrust::make_optional(static_cast(v)) - : thrust::nullopt; + return (b) ? cuda::std::make_optional(static_cast(v)) + : cuda::std::nullopt; }); auto it_dev = cudf::detail::indexalator_factory::make_input_optional_iterator(d_col); diff --git a/cpp/tests/iterator/offsetalator_test.cu b/cpp/tests/iterator/offsetalator_test.cu index e569e58f42a..af12ebbe08e 100644 --- a/cpp/tests/iterator/offsetalator_test.cu +++ b/cpp/tests/iterator/offsetalator_test.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, NVIDIA CORPORATION. + * Copyright (c) 2023-2024, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,10 +20,10 @@ #include +#include #include #include #include -#include #include #include #include diff --git a/cpp/tests/iterator/optional_iterator_test.cuh b/cpp/tests/iterator/optional_iterator_test.cuh index 6a264cee9a8..04f5410a44f 100644 --- a/cpp/tests/iterator/optional_iterator_test.cuh +++ b/cpp/tests/iterator/optional_iterator_test.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. + * Copyright (c) 2020-2024, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,8 @@ #include +#include #include -#include template void nonull_optional_iterator(IteratorTest& testFixture) @@ -32,9 +32,9 @@ void nonull_optional_iterator(IteratorTest& testFixture) auto d_col = cudf::column_device_view::create(w_col); // calculate the expected value by CPU. - thrust::host_vector> replaced_array(host_values.size()); + thrust::host_vector> replaced_array(host_values.size()); std::transform(host_values.begin(), host_values.end(), replaced_array.begin(), [](auto s) { - return thrust::optional{s}; + return cuda::std::optional{s}; }); // GPU test @@ -61,19 +61,20 @@ void null_optional_iterator(IteratorTest& testFixture) auto d_col = cudf::column_device_view::create(w_col); // calculate the expected value by CPU. - thrust::host_vector> optional_values(host_values.size()); - std::transform(host_values.begin(), - host_values.end(), - host_bools.begin(), - optional_values.begin(), - [](auto s, bool b) { return b ? thrust::optional{s} : thrust::optional{}; }); + thrust::host_vector> optional_values(host_values.size()); + std::transform( + host_values.begin(), + host_values.end(), + host_bools.begin(), + optional_values.begin(), + [](auto s, bool b) { return b ? cuda::std::optional{s} : cuda::std::optional{}; }); - thrust::host_vector> value_all_valid(host_values.size()); + thrust::host_vector> value_all_valid(host_values.size()); std::transform(host_values.begin(), host_values.end(), host_bools.begin(), value_all_valid.begin(), - [](auto s, bool b) { return thrust::optional{s}; }); + [](auto s, bool b) { return cuda::std::optional{s}; }); // GPU test for correct null mapping testFixture.iterator_test_thrust( diff --git a/cpp/tests/iterator/optional_iterator_test_numeric.cu b/cpp/tests/iterator/optional_iterator_test_numeric.cu index 98befb0a3ee..257c0979017 100644 --- a/cpp/tests/iterator/optional_iterator_test_numeric.cu +++ b/cpp/tests/iterator/optional_iterator_test_numeric.cu @@ -18,9 +18,9 @@ #include +#include #include #include -#include #include #include @@ -49,21 +49,21 @@ TYPED_TEST(NumericOptionalIteratorTest, null_optional_iterator) { null_optional_ // Transformers and Operators for optional_iterator test template struct transformer_optional_meanvar { - using ResultType = thrust::optional>; + using ResultType = cuda::std::optional>; - CUDF_HOST_DEVICE inline ResultType operator()(thrust::optional const& optional) + CUDF_HOST_DEVICE inline ResultType operator()(cuda::std::optional const& optional) { if (optional.has_value()) { auto v = *optional; return cudf::meanvar{v, static_cast(v * v), 1}; } - return thrust::nullopt; + return cuda::std::nullopt; } }; template struct optional_to_meanvar { - CUDF_HOST_DEVICE inline T operator()(thrust::optional const& v) { return v.value_or(T{0}); } + CUDF_HOST_DEVICE inline T operator()(cuda::std::optional const& v) { return v.value_or(T{0}); } }; // TODO: enable this test also at __CUDACC_DEBUG__