From a025db54a92ad967827ad6f6f2b251065fe09c73 Mon Sep 17 00:00:00 2001 From: Ed Seidl Date: Sat, 26 Aug 2023 01:25:16 -0700 Subject: [PATCH 001/150] Fix for encodings listed in the Parquet column chunk metadata (#13907) With the addition of V2 page headers, the encodings used have also changed. This PR correctly determines the encodings used in each column chunk and writes that information to the column chunk metadata. Authors: - Ed Seidl (https://github.com/etseidl) - Nghia Truong (https://github.com/ttnghia) Approvers: - Vukasin Milovanovic (https://github.com/vuule) - Nghia Truong (https://github.com/ttnghia) URL: https://github.com/rapidsai/cudf/pull/13907 --- cpp/src/io/parquet/page_enc.cu | 21 ++++++++ cpp/src/io/parquet/parquet_common.hpp | 1 + cpp/src/io/parquet/parquet_gpu.hpp | 13 ++++- cpp/src/io/parquet/writer_impl.cu | 29 ++++++----- cpp/tests/io/parquet_test.cpp | 70 +++++++++++++++++++++++++++ 5 files changed, 120 insertions(+), 14 deletions(-) diff --git a/cpp/src/io/parquet/page_enc.cu b/cpp/src/io/parquet/page_enc.cu index d066b454840..0af561be8da 100644 --- a/cpp/src/io/parquet/page_enc.cu +++ b/cpp/src/io/parquet/page_enc.cu @@ -229,6 +229,16 @@ Encoding __device__ determine_encoding(PageType page_type, } } +// operator to use with warp_reduce. stolen from cub::Sum +struct BitwiseOr { + /// Binary OR operator, returns a | b + template + __host__ __device__ __forceinline__ T operator()(T const& a, T const& b) const + { + return a | b; + } +}; + } // anonymous namespace // blockDim {512,1,1} @@ -1445,6 +1455,7 @@ __global__ void __launch_bounds__(decide_compression_block_size) uint32_t uncompressed_data_size = 0; uint32_t compressed_data_size = 0; + uint32_t encodings = 0; auto const num_pages = ck_g[warp_id].num_pages; for (auto page_id = lane_id; page_id < num_pages; page_id += cudf::detail::warp_size) { auto const& curr_page = ck_g[warp_id].pages[page_id]; @@ -1457,10 +1468,14 @@ __global__ void __launch_bounds__(decide_compression_block_size) atomicOr(&compression_error[warp_id], 1); } } + // collect encoding info for the chunk metadata + encodings |= encoding_to_mask(curr_page.encoding); } uncompressed_data_size = warp_reduce(temp_storage[warp_id][0]).Sum(uncompressed_data_size); compressed_data_size = warp_reduce(temp_storage[warp_id][1]).Sum(compressed_data_size); __syncwarp(); + encodings = warp_reduce(temp_storage[warp_id][0]).Reduce(encodings, BitwiseOr{}); + __syncwarp(); if (lane_id == 0) { auto const write_compressed = compressed_data_size != 0 and compression_error[warp_id] == 0 and @@ -1469,6 +1484,12 @@ __global__ void __launch_bounds__(decide_compression_block_size) chunks[chunk_id].bfr_size = uncompressed_data_size; chunks[chunk_id].compressed_size = write_compressed ? compressed_data_size : uncompressed_data_size; + + // if there is repetition or definition level data add RLE encoding + auto const rle_bits = + ck_g[warp_id].col_desc->num_def_level_bits() + ck_g[warp_id].col_desc->num_rep_level_bits(); + if (rle_bits > 0) { encodings |= encoding_to_mask(Encoding::RLE); } + chunks[chunk_id].encodings = encodings; } } diff --git a/cpp/src/io/parquet/parquet_common.hpp b/cpp/src/io/parquet/parquet_common.hpp index ab6290c4ed6..5f8f1617cb9 100644 --- a/cpp/src/io/parquet/parquet_common.hpp +++ b/cpp/src/io/parquet/parquet_common.hpp @@ -92,6 +92,7 @@ enum class Encoding : uint8_t { DELTA_BYTE_ARRAY = 7, RLE_DICTIONARY = 8, BYTE_STREAM_SPLIT = 9, + NUM_ENCODINGS = 10, }; /** diff --git a/cpp/src/io/parquet/parquet_gpu.hpp b/cpp/src/io/parquet/parquet_gpu.hpp index 0a8640aef26..e82b6abc13d 100644 --- a/cpp/src/io/parquet/parquet_gpu.hpp +++ b/cpp/src/io/parquet/parquet_gpu.hpp @@ -345,8 +345,8 @@ struct parquet_column_device_view : stats_column_desc { ConvertedType converted_type; //!< logical data type uint8_t level_bits; //!< bits to encode max definition (lower nibble) & repetition (upper nibble) //!< levels - constexpr uint8_t num_def_level_bits() { return level_bits & 0xf; } - constexpr uint8_t num_rep_level_bits() { return level_bits >> 4; } + constexpr uint8_t num_def_level_bits() const { return level_bits & 0xf; } + constexpr uint8_t num_rep_level_bits() const { return level_bits >> 4; } size_type const* const* nesting_offsets; //!< If column is a nested type, contains offset array of each nesting level @@ -384,6 +384,12 @@ constexpr size_t kDictScratchSize = (1 << kDictHashBits) * sizeof(uint32_t); struct EncPage; struct slot_type; +// convert Encoding to a mask value +constexpr uint32_t encoding_to_mask(Encoding encoding) +{ + return 1 << static_cast(encoding); +} + /** * @brief Struct describing an encoder column chunk */ @@ -420,6 +426,7 @@ struct EncColumnChunk { bool use_dictionary; //!< True if the chunk uses dictionary encoding uint8_t* column_index_blob; //!< Binary blob containing encoded column index for this chunk uint32_t column_index_size; //!< Size of column index blob + uint32_t encodings; //!< Mask representing the set of encodings used for this chunk }; /** @@ -748,6 +755,8 @@ void EncodePages(device_span pages, /** * @brief Launches kernel to make the compressed vs uncompressed chunk-level decision * + * Also calculates the set of page encodings used for each chunk. + * * @param[in,out] chunks Column chunks (updated with actual compressed/uncompressed sizes) * @param[in] stream CUDA stream to use */ diff --git a/cpp/src/io/parquet/writer_impl.cu b/cpp/src/io/parquet/writer_impl.cu index c5fc852d20b..d2976a3f5d9 100644 --- a/cpp/src/io/parquet/writer_impl.cu +++ b/cpp/src/io/parquet/writer_impl.cu @@ -193,6 +193,20 @@ parquet::Compression to_parquet_compression(compression_type compression) } } +/** + * @brief Convert a mask of encodings to a vector. + * + * @param encodings Vector of `Encoding`s to populate + * @param enc_mask Mask of encodings used + */ +void update_chunk_encodings(std::vector& encodings, uint32_t enc_mask) +{ + for (uint8_t enc = 0; enc < static_cast(Encoding::NUM_ENCODINGS); enc++) { + auto const enc_enum = static_cast(enc); + if ((enc_mask & gpu::encoding_to_mask(enc_enum)) != 0) { encodings.push_back(enc_enum); } + } +} + /** * @brief Compute size (in bytes) of the data stored in the given column. * @@ -1671,6 +1685,7 @@ auto convert_table_to_parquet_data(table_input_metadata& table_meta, ck.start_row = start_row; ck.num_rows = (uint32_t)row_group.num_rows; ck.first_fragment = c * num_fragments + f; + ck.encodings = 0; auto chunk_fragments = row_group_fragments[c].subspan(f, fragments_in_chunk); // In fragment struct, add a pointer to the chunk it belongs to // In each fragment in chunk_fragments, update the chunk pointer here. @@ -1687,7 +1702,6 @@ auto convert_table_to_parquet_data(table_input_metadata& table_meta, }); auto& column_chunk_meta = row_group.columns[c].meta_data; column_chunk_meta.type = parquet_columns[c].physical_type(); - column_chunk_meta.encodings = {Encoding::PLAIN, Encoding::RLE}; column_chunk_meta.path_in_schema = parquet_columns[c].get_path_in_schema(); column_chunk_meta.codec = UNCOMPRESSED; column_chunk_meta.num_values = ck.num_values; @@ -1703,17 +1717,6 @@ auto convert_table_to_parquet_data(table_input_metadata& table_meta, row_group_fragments.host_to_device_async(stream); [[maybe_unused]] auto dict_info_owner = build_chunk_dictionaries( chunks, col_desc, row_group_fragments, compression, dict_policy, max_dictionary_size, stream); - for (size_t p = 0; p < partitions.size(); p++) { - for (int rg = 0; rg < num_rg_in_part[p]; rg++) { - size_t global_rg = global_rowgroup_base[p] + rg; - for (int col = 0; col < num_columns; col++) { - if (chunks.host_view()[rg][col].use_dictionary) { - agg_meta->file(p).row_groups[global_rg].columns[col].meta_data.encodings.push_back( - Encoding::PLAIN_DICTIONARY); - } - } - } - } // The code preceding this used a uniform fragment size for all columns. Now recompute // fragments with a (potentially) varying number of fragments per column. @@ -1949,6 +1952,8 @@ auto convert_table_to_parquet_data(table_input_metadata& table_meta, } max_write_size = std::max(max_write_size, ck.compressed_size); + update_chunk_encodings(column_chunk_meta.encodings, ck.encodings); + if (ck.ck_stat_size != 0) { std::vector const stats_blob = cudf::detail::make_std_vector_sync( device_span(dev_bfr, ck.ck_stat_size), stream); diff --git a/cpp/tests/io/parquet_test.cpp b/cpp/tests/io/parquet_test.cpp index 8c7d598d33f..b210452f619 100644 --- a/cpp/tests/io/parquet_test.cpp +++ b/cpp/tests/io/parquet_test.cpp @@ -6599,4 +6599,74 @@ TEST_F(ParquetWriterTest, TimestampMicrosINT96NoOverflow) CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view()); } +TEST_P(ParquetV2Test, CheckEncodings) +{ + using cudf::io::parquet::Encoding; + constexpr auto num_rows = 100'000; + auto const is_v2 = GetParam(); + + auto const validity = cudf::test::iterators::no_nulls(); + // data should be PLAIN for v1, RLE for V2 + auto col0_data = + cudf::detail::make_counting_transform_iterator(0, [](auto i) -> bool { return i % 2 == 0; }); + // data should be PLAIN for both + auto col1_data = random_values(num_rows); + // data should be PLAIN_DICTIONARY for v1, PLAIN and RLE_DICTIONARY for v2 + auto col2_data = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return 1; }); + + cudf::test::fixed_width_column_wrapper col0{col0_data, col0_data + num_rows, validity}; + column_wrapper col1{col1_data.begin(), col1_data.end(), validity}; + column_wrapper col2{col2_data, col2_data + num_rows, validity}; + + auto expected = table_view{{col0, col1, col2}}; + + auto const filename = is_v2 ? "CheckEncodingsV2.parquet" : "CheckEncodingsV1.parquet"; + auto filepath = temp_env->get_temp_filepath(filename); + cudf::io::parquet_writer_options out_opts = + cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected) + .max_page_size_rows(num_rows) + .write_v2_headers(is_v2); + cudf::io::write_parquet(out_opts); + + // make sure the expected encodings are present + auto contains = [](auto const& vec, auto const& enc) { + return std::find(vec.begin(), vec.end(), enc) != vec.end(); + }; + + auto const source = cudf::io::datasource::create(filepath); + cudf::io::parquet::FileMetaData fmd; + + read_footer(source, &fmd); + auto const& chunk0_enc = fmd.row_groups[0].columns[0].meta_data.encodings; + auto const& chunk1_enc = fmd.row_groups[0].columns[1].meta_data.encodings; + auto const& chunk2_enc = fmd.row_groups[0].columns[2].meta_data.encodings; + if (is_v2) { + // col0 should have RLE for rep/def and data + EXPECT_TRUE(chunk0_enc.size() == 1); + EXPECT_TRUE(contains(chunk0_enc, Encoding::RLE)); + // col1 should have RLE for rep/def and PLAIN for data + EXPECT_TRUE(chunk1_enc.size() == 2); + EXPECT_TRUE(contains(chunk1_enc, Encoding::RLE)); + EXPECT_TRUE(contains(chunk1_enc, Encoding::PLAIN)); + // col2 should have RLE for rep/def, PLAIN for dict, and RLE_DICTIONARY for data + EXPECT_TRUE(chunk2_enc.size() == 3); + EXPECT_TRUE(contains(chunk2_enc, Encoding::RLE)); + EXPECT_TRUE(contains(chunk2_enc, Encoding::PLAIN)); + EXPECT_TRUE(contains(chunk2_enc, Encoding::RLE_DICTIONARY)); + } else { + // col0 should have RLE for rep/def and PLAIN for data + EXPECT_TRUE(chunk0_enc.size() == 2); + EXPECT_TRUE(contains(chunk0_enc, Encoding::RLE)); + EXPECT_TRUE(contains(chunk0_enc, Encoding::PLAIN)); + // col1 should have RLE for rep/def and PLAIN for data + EXPECT_TRUE(chunk1_enc.size() == 2); + EXPECT_TRUE(contains(chunk1_enc, Encoding::RLE)); + EXPECT_TRUE(contains(chunk1_enc, Encoding::PLAIN)); + // col2 should have RLE for rep/def and PLAIN_DICTIONARY for data and dict + EXPECT_TRUE(chunk2_enc.size() == 2); + EXPECT_TRUE(contains(chunk2_enc, Encoding::RLE)); + EXPECT_TRUE(contains(chunk2_enc, Encoding::PLAIN_DICTIONARY)); + } +} + CUDF_TEST_PROGRAM_MAIN() From 2c7f02c399e58538a7f772e86839c05d3e80ca19 Mon Sep 17 00:00:00 2001 From: Divye Gala Date: Sun, 27 Aug 2023 13:42:36 -0400 Subject: [PATCH 002/150] Use `thread_index_type` in `partitioning.cu` (#13973) This PR uses `cudf::thread_index_type` to avoid overflows. Authors: - Divye Gala (https://github.com/divyegala) Approvers: - Bradley Dice (https://github.com/bdice) - Karthikeyan (https://github.com/karthikeyann) URL: https://github.com/rapidsai/cudf/pull/13973 --- cpp/src/partitioning/partitioning.cu | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/cpp/src/partitioning/partitioning.cu b/cpp/src/partitioning/partitioning.cu index 0d94db110b4..ff9c4ea2f59 100644 --- a/cpp/src/partitioning/partitioning.cu +++ b/cpp/src/partitioning/partitioning.cu @@ -134,7 +134,8 @@ __global__ void compute_row_partition_numbers(row_hasher_t the_hasher, // Accumulate histogram of the size of each partition in shared memory extern __shared__ size_type shared_partition_sizes[]; - size_type row_number = threadIdx.x + blockIdx.x * blockDim.x; + auto tid = cudf::thread_index_type{threadIdx.x} + + cudf::thread_index_type{blockIdx.x} * cudf::thread_index_type{blockDim.x}; // Initialize local histogram size_type partition_number = threadIdx.x; @@ -148,7 +149,8 @@ __global__ void compute_row_partition_numbers(row_hasher_t the_hasher, // Compute the hash value for each row, store it to the array of hash values // and compute the partition to which the hash value belongs and increment // the shared memory counter for that partition - while (row_number < num_rows) { + while (tid < num_rows) { + auto const row_number = static_cast(tid); hash_value_type const row_hash_value = the_hasher(row_number); size_type const partition_number = the_partitioner(row_hash_value); @@ -158,7 +160,7 @@ __global__ void compute_row_partition_numbers(row_hasher_t the_hasher, row_partition_offset[row_number] = atomicAdd(&(shared_partition_sizes[partition_number]), size_type(1)); - row_number += blockDim.x * gridDim.x; + tid += cudf::thread_index_type{blockDim.x} * cudf::thread_index_type{gridDim.x}; } __syncthreads(); @@ -213,12 +215,14 @@ __global__ void compute_row_output_locations(size_type* __restrict__ row_partiti } __syncthreads(); - size_type row_number = threadIdx.x + blockIdx.x * blockDim.x; + auto tid = cudf::thread_index_type{threadIdx.x} + + cudf::thread_index_type{blockIdx.x} * cudf::thread_index_type{blockDim.x}; // Get each row's partition number, and get it's output location by // incrementing block's offset counter for that partition number // and store the row's output location in-place - while (row_number < num_rows) { + while (tid < num_rows) { + auto const row_number = static_cast(tid); // Get partition number of this row size_type const partition_number = row_partition_numbers[row_number]; @@ -230,7 +234,7 @@ __global__ void compute_row_output_locations(size_type* __restrict__ row_partiti // Store the row's output location in-place row_partition_numbers[row_number] = row_output_location; - row_number += blockDim.x * gridDim.x; + tid += cudf::thread_index_type{blockDim.x} * cudf::thread_index_type{gridDim.x}; } } @@ -307,8 +311,11 @@ __global__ void copy_block_partitions(InputIter input_iter, __syncthreads(); // Fetch the input data to shared memory - for (size_type row_number = threadIdx.x + blockIdx.x * blockDim.x; row_number < num_rows; - row_number += blockDim.x * gridDim.x) { + for (auto tid = cudf::thread_index_type{threadIdx.x} + + cudf::thread_index_type{blockIdx.x} * cudf::thread_index_type{blockDim.x}; + tid < num_rows; + tid += cudf::thread_index_type{blockDim.x} * cudf::thread_index_type{gridDim.x}) { + auto const row_number = static_cast(tid); size_type const ipartition = row_partition_numbers[row_number]; block_output[partition_offset_shared[ipartition] + row_partition_offset[row_number]] = From aba001c12f8db876ab7b763fcde939dba9efd665 Mon Sep 17 00:00:00 2001 From: Karthikeyan <6488848+karthikeyann@users.noreply.github.com> Date: Mon, 28 Aug 2023 15:23:34 +0530 Subject: [PATCH 003/150] Use cuco::static_set in JSON tree algorithm (#13928) In JSON tree algorithms of JSON reader, cuco static_map is used as a set. This PR replaces it with static_set. No tests are changed. No significant runtime changes. Addresses part of #12261 Authors: - Karthikeyan (https://github.com/karthikeyann) Approvers: - Nghia Truong (https://github.com/ttnghia) - Yunsong Wang (https://github.com/PointKernel) URL: https://github.com/rapidsai/cudf/pull/13928 --- cpp/src/io/json/json_tree.cu | 85 ++++++++++++++++-------------------- 1 file changed, 38 insertions(+), 47 deletions(-) diff --git a/cpp/src/io/json/json_tree.cu b/cpp/src/io/json/json_tree.cu index 3f1f2e81d21..9231040eb70 100644 --- a/cpp/src/io/json/json_tree.cu +++ b/cpp/src/io/json/json_tree.cu @@ -35,7 +35,7 @@ #include -#include +#include #include #include @@ -400,8 +400,6 @@ rmm::device_uvector hash_node_type_with_field_name(device_span>; - using hash_map_type = - cuco::static_map; auto const num_nodes = d_tree.node_categories.size(); auto const num_fields = thrust::count(rmm::exec_policy(stream), @@ -409,12 +407,6 @@ rmm::device_uvector hash_node_type_with_field_name(device_span{}, stream}, - stream.value()}; auto const d_hasher = [d_input = d_input.data(), node_range_begin = d_tree.node_range_begin.data(), node_range_end = d_tree.node_range_end.data()] __device__(auto node_id) { @@ -434,25 +426,33 @@ rmm::device_uvector hash_node_type_with_field_name(device_span(0); auto const is_field_name_node = [node_categories = d_tree.node_categories.data()] __device__(auto node_id) { return node_categories[node_id] == node_t::NC_FN; }; - key_map.insert_if(iter, - iter + num_nodes, - thrust::counting_iterator(0), // stencil - is_field_name_node, - d_hasher, - d_equal, - stream.value()); + + using hasher_type = decltype(d_hasher); + constexpr size_type empty_node_index_sentinel = -1; + auto key_set = + cuco::experimental::static_set{cuco::experimental::extent{compute_hash_table_size( + num_fields, 40)}, // 40% occupancy in hash map + cuco::empty_key{empty_node_index_sentinel}, + d_equal, + cuco::experimental::linear_probing<1, hasher_type>{d_hasher}, + hash_table_allocator_type{default_allocator{}, stream}, + stream.value()}; + key_set.insert_if_async(iter, + iter + num_nodes, + thrust::counting_iterator(0), // stencil + is_field_name_node, + stream.value()); auto const get_hash_value = - [key_map = key_map.get_device_view(), d_hasher, d_equal] __device__(auto node_id) -> size_type { - auto const it = key_map.find(node_id, d_hasher, d_equal); - return (it == key_map.end()) ? size_type{0} : it->second.load(cuda::std::memory_order_relaxed); + [key_set = key_set.ref(cuco::experimental::op::find)] __device__(auto node_id) -> size_type { + auto const it = key_set.find(node_id); + return (it == key_set.end()) ? size_type{0} : *it; }; // convert field nodes to node indices, and other nodes to enum value. @@ -528,7 +528,6 @@ std::pair, rmm::device_uvector> hash_n { CUDF_FUNC_RANGE(); auto const num_nodes = parent_node_ids.size(); - rmm::device_uvector col_id(num_nodes, stream, mr); // array of arrays NodeIndexT const row_array_children_level = is_enabled_lines ? 1 : 2; @@ -560,17 +559,6 @@ std::pair, rmm::device_uvector> hash_n list_indices.begin()); } - using hash_table_allocator_type = rmm::mr::stream_allocator_adaptor>; - using hash_map_type = - cuco::static_map; - - constexpr size_type empty_node_index_sentinel = -1; - hash_map_type key_map{compute_hash_table_size(num_nodes), // TODO reduce oversubscription - cuco::empty_key{empty_node_index_sentinel}, - cuco::empty_value{empty_node_index_sentinel}, - cuco::erased_key{-2}, - hash_table_allocator_type{default_allocator{}, stream}, - stream.value()}; // path compression is not used since extra writes make all map operations slow. auto const d_hasher = [node_level = node_levels.begin(), node_type = node_type.begin(), @@ -632,23 +620,26 @@ std::pair, rmm::device_uvector> hash_n return node_id1 == node_id2; }; + constexpr size_type empty_node_index_sentinel = -1; + using hash_table_allocator_type = rmm::mr::stream_allocator_adaptor>; + using hasher_type = decltype(d_hashed_cache); + + auto key_set = cuco::experimental::static_set{ + cuco::experimental::extent{compute_hash_table_size(num_nodes)}, + cuco::empty_key{empty_node_index_sentinel}, + d_equal, + cuco::experimental::linear_probing<1, hasher_type>{d_hashed_cache}, + hash_table_allocator_type{default_allocator{}, stream}, + stream.value()}; + // insert and convert node ids to unique set ids - auto const num_inserted = thrust::count_if( - rmm::exec_policy(stream), - thrust::make_counting_iterator(0), - thrust::make_counting_iterator(num_nodes), - [d_hashed_cache, - d_equal, - view = key_map.get_device_mutable_view(), - uq_node_id = col_id.begin()] __device__(auto node_id) mutable { - auto it = view.insert_and_find(cuco::make_pair(node_id, node_id), d_hashed_cache, d_equal); - uq_node_id[node_id] = (it.first)->first.load(cuda::std::memory_order_relaxed); - return it.second; - }); + auto nodes_itr = thrust::make_counting_iterator(0); + auto const num_columns = key_set.insert(nodes_itr, nodes_itr + num_nodes, stream.value()); - auto const num_columns = num_inserted; // key_map.get_size() is not updated. rmm::device_uvector unique_keys(num_columns, stream); - key_map.retrieve_all(unique_keys.begin(), thrust::make_discard_iterator(), stream.value()); + rmm::device_uvector col_id(num_nodes, stream, mr); + key_set.find_async(nodes_itr, nodes_itr + num_nodes, col_id.begin(), stream.value()); + std::ignore = key_set.retrieve_all(unique_keys.begin(), stream.value()); return {std::move(col_id), std::move(unique_keys)}; } From d138dd0c9c365e03891d33cf4423a553629a3f6b Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Mon, 28 Aug 2023 08:27:34 -0500 Subject: [PATCH 004/150] Restore column type metadata with `dropna` to fix `factorize` API (#13980) closes #13979 This PR restores column type metadata for `dropna` call, absense of this restoration was causing an issue with the `CategoricalColumn.dropna` that was necessary for `factorize` API. Authors: - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cudf/pull/13980 --- python/cudf/cudf/core/column/column.py | 2 +- python/cudf/cudf/tests/test_factorize.py | 20 +++++++++++++++++++- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/python/cudf/cudf/core/column/column.py b/python/cudf/cudf/core/column/column.py index eafcc18450d..b5332f35073 100644 --- a/python/cudf/cudf/core/column/column.py +++ b/python/cudf/cudf/core/column/column.py @@ -291,7 +291,7 @@ def any(self, skipna: bool = True) -> bool: def dropna(self, drop_nan: bool = False) -> ColumnBase: # The drop_nan argument is only used for numerical columns. - return drop_nulls([self])[0] + return drop_nulls([self])[0]._with_type_metadata(self.dtype) def to_arrow(self) -> pa.Array: """Convert to PyArrow Array diff --git a/python/cudf/cudf/tests/test_factorize.py b/python/cudf/cudf/tests/test_factorize.py index 90cf11d7dde..730bfdd8590 100644 --- a/python/cudf/cudf/tests/test_factorize.py +++ b/python/cudf/cudf/tests/test_factorize.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018-2022, NVIDIA CORPORATION. +# Copyright (c) 2018-2023, NVIDIA CORPORATION. import cupy as cp import numpy as np @@ -139,3 +139,21 @@ def test_factorize_result_classes(): assert isinstance(labels, cp.ndarray) assert isinstance(cats, cp.ndarray) + + +@pytest.mark.parametrize( + "data", + [ + ["abc", "def", "abc", "a", "def", None], + [10, 20, 100, -10, 0, 1, None, 10, 100], + ], +) +def test_category_dtype_factorize(data): + gs = cudf.Series(data, dtype="category") + ps = gs.to_pandas() + + actual_codes, actual_uniques = gs.factorize() + expected_codes, expected_uniques = ps.factorize() + + assert_eq(actual_codes, expected_codes) + assert_eq(actual_uniques, expected_uniques) From 8a78d68b8bdb6312d97e01b593814f27115a4727 Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Mon, 28 Aug 2023 10:23:05 -0500 Subject: [PATCH 005/150] Fix `CategoricalIndex` ordering in `Groupby.agg` when pandas-compatibility mode is enabled (#13978) closes #13974 This PR re-calculates the `CategoricalIndex`'s `categories` order to match the order in which the grouping has been done for the `CategoricalColumn`. This fix is being done only when pandas-compatibility mode is enabled. Authors: - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Matthew Roeschke (https://github.com/mroeschke) URL: https://github.com/rapidsai/cudf/pull/13978 --- python/cudf/cudf/core/groupby/groupby.py | 9 +++++++++ python/cudf/cudf/tests/test_groupby.py | 16 ++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/python/cudf/cudf/core/groupby/groupby.py b/python/cudf/cudf/core/groupby/groupby.py index cf4c861c28f..38b07eca330 100644 --- a/python/cudf/cudf/core/groupby/groupby.py +++ b/python/cudf/cudf/core/groupby/groupby.py @@ -613,6 +613,15 @@ def agg(self, func): how="left", ) result = result.take(indices) + if isinstance(result._index, cudf.CategoricalIndex): + # Needs re-ordering the categories in the order + # they are after grouping. + result._index = cudf.Index( + result._index._column.reorder_categories( + result._index._column._get_decategorized_column() + ), + name=result._index.name, + ) if not self._as_index: result = result.reset_index() diff --git a/python/cudf/cudf/tests/test_groupby.py b/python/cudf/cudf/tests/test_groupby.py index b48ce210104..2ab8b29f224 100644 --- a/python/cudf/cudf/tests/test_groupby.py +++ b/python/cudf/cudf/tests/test_groupby.py @@ -3440,3 +3440,19 @@ def test_groupby_consecutive_operations(): expected = pg.cumsum() assert_groupby_results_equal(actual, expected, check_dtype=False) + + +def test_categorical_grouping_pandas_compatibility(): + gdf = cudf.DataFrame( + { + "key": cudf.Series([2, 1, 3, 1, 1], dtype="category"), + "a": [0, 1, 3, 2, 3], + } + ) + pdf = gdf.to_pandas() + + with cudf.option_context("mode.pandas_compatible", True): + actual = gdf.groupby("key", sort=False).sum() + expected = pdf.groupby("key", sort=False).sum() + + assert_eq(actual, expected) From f9e35c7216ed433564d845298bb2e15f0c960461 Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Mon, 28 Aug 2023 10:25:42 -0500 Subject: [PATCH 006/150] Enable `codes` dtype parity in pandas-compatibility mode for `factorize` API (#13982) closes #13981 This PR enables parity with pandas `factorize` API by returning `codes` with `int64` dtype only in pandas-compatibility mode. When the pandas-compatibility mode is turned off, `cudf` will calculate the appropriate dtype that needs to be returned to save memory usage. Authors: - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Matthew Roeschke (https://github.com/mroeschke) URL: https://github.com/rapidsai/cudf/pull/13982 --- python/cudf/cudf/core/algorithms.py | 5 ++++- python/cudf/cudf/tests/test_factorize.py | 17 +++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/python/cudf/cudf/core/algorithms.py b/python/cudf/cudf/core/algorithms.py index ff604d3252b..a472142ece0 100644 --- a/python/cudf/cudf/core/algorithms.py +++ b/python/cudf/cudf/core/algorithms.py @@ -9,6 +9,7 @@ from cudf.core.indexed_frame import IndexedFrame from cudf.core.scalar import Scalar from cudf.core.series import Series +from cudf.options import get_option def factorize( @@ -137,7 +138,9 @@ def factorize( cats = cats.sort_values() labels = values._column._label_encoding( - cats=cats, na_sentinel=Scalar(na_sentinel) + cats=cats, + na_sentinel=Scalar(na_sentinel), + dtype="int64" if get_option("mode.pandas_compatible") else None, ).values return labels, cats.values if return_cupy_array else Index(cats) diff --git a/python/cudf/cudf/tests/test_factorize.py b/python/cudf/cudf/tests/test_factorize.py index 730bfdd8590..bf409b30090 100644 --- a/python/cudf/cudf/tests/test_factorize.py +++ b/python/cudf/cudf/tests/test_factorize.py @@ -122,6 +122,23 @@ def test_cudf_factorize_array(): np.testing.assert_array_equal(expect[1], got[1].get()) +@pytest.mark.parametrize("pandas_compatibility", [True, False]) +def test_factorize_code_pandas_compatibility(pandas_compatibility): + + psr = pd.Series([1, 2, 3, 4, 5]) + gsr = cudf.from_pandas(psr) + + expect = pd.factorize(psr) + with cudf.option_context("mode.pandas_compatible", pandas_compatibility): + got = cudf.factorize(gsr) + assert_eq(got[0], expect[0]) + assert_eq(got[1], expect[1]) + if pandas_compatibility: + assert got[0].dtype == expect[0].dtype + else: + assert got[0].dtype == cudf.dtype("int8") + + def test_factorize_result_classes(): data = [1, 2, 3] From 724e42ae685e2063865378a5ae904f5cd6d8b3e3 Mon Sep 17 00:00:00 2001 From: brandon-b-miller <53796099+brandon-b-miller@users.noreply.github.com> Date: Mon, 28 Aug 2023 10:53:36 -0500 Subject: [PATCH 007/150] Fix integer overflow in shim `device_sum` functions (#13943) Closes https://github.com/rapidsai/cudf/issues/13873 Authors: - https://github.com/brandon-b-miller Approvers: - Lawrence Mitchell (https://github.com/wence-) - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cudf/pull/13943 --- python/cudf/cudf/tests/test_groupby.py | 17 +++++++++ python/cudf/udf_cpp/shim.cu | 50 +++++++++++--------------- 2 files changed, 37 insertions(+), 30 deletions(-) diff --git a/python/cudf/cudf/tests/test_groupby.py b/python/cudf/cudf/tests/test_groupby.py index 2ab8b29f224..042f0e1aa38 100644 --- a/python/cudf/cudf/tests/test_groupby.py +++ b/python/cudf/cudf/tests/test_groupby.py @@ -474,6 +474,23 @@ def func(group): run_groupby_apply_jit_test(data, func, ["a"]) +@pytest.mark.parametrize("dtype", ["int32"]) +def test_groupby_apply_jit_sum_integer_overflow(dtype): + max = np.iinfo(dtype).max + + data = DataFrame( + { + "a": [0, 0, 0], + "b": [max, max, max], + } + ) + + def func(group): + return group["b"].sum() + + run_groupby_apply_jit_test(data, func, ["a"]) + + @pytest.mark.parametrize("dtype", ["float64"]) @pytest.mark.parametrize("func", ["min", "max", "sum", "mean", "var", "std"]) @pytest.mark.parametrize("special_val", [np.nan, np.inf, -np.inf]) diff --git a/python/cudf/udf_cpp/shim.cu b/python/cudf/udf_cpp/shim.cu index 686e39e7036..cabca3154be 100644 --- a/python/cudf/udf_cpp/shim.cu +++ b/python/cudf/udf_cpp/shim.cu @@ -388,26 +388,30 @@ __device__ bool are_all_nans(cooperative_groups::thread_block const& block, return count == 0; } -template -__device__ void device_sum(cooperative_groups::thread_block const& block, - T const* data, - int64_t size, - T* sum) +template , int64_t, T>> +__device__ AccumT device_sum(cooperative_groups::thread_block const& block, + T const* data, + int64_t size) { - T local_sum = 0; + __shared__ AccumT block_sum; + if (block.thread_rank() == 0) { block_sum = 0; } + block.sync(); + + AccumT local_sum = 0; for (int64_t idx = block.thread_rank(); idx < size; idx += block.size()) { - local_sum += data[idx]; + local_sum += static_cast(data[idx]); } - cuda::atomic_ref ref{*sum}; + cuda::atomic_ref ref{block_sum}; ref.fetch_add(local_sum, cuda::std::memory_order_relaxed); block.sync(); + return block_sum; } -template -__device__ T BlockSum(T const* data, int64_t size) +template , int64_t, T>> +__device__ AccumT BlockSum(T const* data, int64_t size) { auto block = cooperative_groups::this_thread_block(); @@ -415,11 +419,7 @@ __device__ T BlockSum(T const* data, int64_t size) if (are_all_nans(block, data, size)) { return 0; } } - __shared__ T block_sum; - if (block.thread_rank() == 0) { block_sum = 0; } - block.sync(); - - device_sum(block, data, size, &block_sum); + auto block_sum = device_sum(block, data, size); return block_sum; } @@ -428,11 +428,7 @@ __device__ double BlockMean(T const* data, int64_t size) { auto block = cooperative_groups::this_thread_block(); - __shared__ T block_sum; - if (block.thread_rank() == 0) { block_sum = 0; } - block.sync(); - - device_sum(block, data, size, &block_sum); + auto block_sum = device_sum(block, data, size); return static_cast(block_sum) / static_cast(size); } @@ -443,17 +439,11 @@ __device__ double BlockCoVar(T const* lhs, T const* rhs, int64_t size) __shared__ double block_covar; - __shared__ T block_sum_lhs; - __shared__ T block_sum_rhs; - - if (block.thread_rank() == 0) { - block_covar = 0; - block_sum_lhs = 0; - block_sum_rhs = 0; - } + if (block.thread_rank() == 0) { block_covar = 0; } block.sync(); - device_sum(block, lhs, size, &block_sum_lhs); + auto block_sum_lhs = device_sum(block, lhs, size); + auto const mu_l = static_cast(block_sum_lhs) / static_cast(size); auto const mu_r = [=]() { if (lhs == rhs) { @@ -461,7 +451,7 @@ __device__ double BlockCoVar(T const* lhs, T const* rhs, int64_t size) // Thus we can assume mu_r = mu_l. return mu_l; } else { - device_sum(block, rhs, size, &block_sum_rhs); + auto block_sum_rhs = device_sum(block, rhs, size); return static_cast(block_sum_rhs) / static_cast(size); } }(); From 3c8ce98e00e5a2b686cda690620f2a519d2a8e3d Mon Sep 17 00:00:00 2001 From: David Wendt <45795991+davidwendt@users.noreply.github.com> Date: Mon, 28 Aug 2023 12:53:18 -0400 Subject: [PATCH 008/150] Use cudf::thread_index_type in strings custom kernels (#13968) Adds `cudf::thread_index_type` usage when calculating the thread index in custom kernels in `src/strings/attributes.cu` and `src/strings/convert/convert_urls.cu` Authors: - David Wendt (https://github.com/davidwendt) Approvers: - Vukasin Milovanovic (https://github.com/vuule) - Nghia Truong (https://github.com/ttnghia) URL: https://github.com/rapidsai/cudf/pull/13968 --- cpp/src/strings/attributes.cu | 10 ++-- cpp/src/strings/convert/convert_urls.cu | 68 ++++++++++++------------- 2 files changed, 39 insertions(+), 39 deletions(-) diff --git a/cpp/src/strings/attributes.cu b/cpp/src/strings/attributes.cu index 3a1b7044b56..8dc150998ee 100644 --- a/cpp/src/strings/attributes.cu +++ b/cpp/src/strings/attributes.cu @@ -111,14 +111,14 @@ std::unique_ptr counts_fn(strings_column_view const& strings, __global__ void count_characters_parallel_fn(column_device_view const d_strings, size_type* d_lengths) { - size_type const idx = static_cast(threadIdx.x + blockIdx.x * blockDim.x); - using warp_reduce = cub::WarpReduce; + auto const idx = cudf::detail::grid_1d::global_thread_id(); + using warp_reduce = cub::WarpReduce; __shared__ typename warp_reduce::TempStorage temp_storage; if (idx >= (d_strings.size() * cudf::detail::warp_size)) { return; } - auto const str_idx = idx / cudf::detail::warp_size; - auto const lane_idx = idx % cudf::detail::warp_size; + auto const str_idx = static_cast(idx / cudf::detail::warp_size); + auto const lane_idx = static_cast(idx % cudf::detail::warp_size); if (d_strings.is_null(str_idx)) { d_lengths[str_idx] = 0; return; @@ -126,7 +126,7 @@ __global__ void count_characters_parallel_fn(column_device_view const d_strings, auto const d_str = d_strings.element(str_idx); auto const str_ptr = d_str.data(); - auto count = 0; + size_type count = 0; for (auto i = lane_idx; i < d_str.size_bytes(); i += cudf::detail::warp_size) { count += static_cast(is_begin_utf8_char(str_ptr[i])); } diff --git a/cpp/src/strings/convert/convert_urls.cu b/cpp/src/strings/convert/convert_urls.cu index 401a04cdc9d..71b6c09310e 100644 --- a/cpp/src/strings/convert/convert_urls.cu +++ b/cpp/src/strings/convert/convert_urls.cu @@ -195,7 +195,7 @@ __forceinline__ __device__ char escaped_sequence_to_byte(char const* const ptr) * @param[in] in_strings Input string column. * @param[out] out_counts Number of characters in each decode URL. */ -template +template __global__ void url_decode_char_counter(column_device_view const in_strings, size_type* const out_counts) { @@ -203,12 +203,12 @@ __global__ void url_decode_char_counter(column_device_view const in_strings, __shared__ char temporary_buffer[num_warps_per_threadblock][char_block_size + halo_size]; __shared__ typename cub::WarpReduce::TempStorage cub_storage[num_warps_per_threadblock]; - int const global_thread_id = blockIdx.x * blockDim.x + threadIdx.x; - int const global_warp_id = global_thread_id / cudf::detail::warp_size; - int const local_warp_id = threadIdx.x / cudf::detail::warp_size; - int const warp_lane = threadIdx.x % cudf::detail::warp_size; - int const nwarps = gridDim.x * blockDim.x / cudf::detail::warp_size; - char* in_chars_shared = temporary_buffer[local_warp_id]; + auto const global_thread_id = cudf::detail::grid_1d::global_thread_id(); + auto const global_warp_id = static_cast(global_thread_id / cudf::detail::warp_size); + auto const local_warp_id = static_cast(threadIdx.x / cudf::detail::warp_size); + auto const warp_lane = static_cast(threadIdx.x % cudf::detail::warp_size); + auto const nwarps = static_cast(gridDim.x * blockDim.x / cudf::detail::warp_size); + char* in_chars_shared = temporary_buffer[local_warp_id]; // Loop through strings, and assign each string to a warp. for (size_type row_idx = global_warp_id; row_idx < in_strings.size(); row_idx += nwarps) { @@ -220,11 +220,11 @@ __global__ void url_decode_char_counter(column_device_view const in_strings, auto const in_string = in_strings.element(row_idx); auto const in_chars = in_string.data(); auto const string_length = in_string.size_bytes(); - int const nblocks = cudf::util::div_rounding_up_unsafe(string_length, char_block_size); + auto const nblocks = cudf::util::div_rounding_up_unsafe(string_length, char_block_size); size_type escape_char_count = 0; - for (int block_idx = 0; block_idx < nblocks; block_idx++) { - int const string_length_block = + for (size_type block_idx = 0; block_idx < nblocks; block_idx++) { + auto const string_length_block = std::min(char_block_size, string_length - char_block_size * block_idx); // Each warp collectively loads input characters of the current block to the shared memory. @@ -233,18 +233,18 @@ __global__ void url_decode_char_counter(column_device_view const in_strings, // are added after the end of the block. If the cell is beyond the end of the string, 0s are // filled in to make sure the last two characters of the string are not the start of an // escaped sequence. - for (int char_idx = warp_lane; char_idx < string_length_block + halo_size; + for (auto char_idx = warp_lane; char_idx < string_length_block + halo_size; char_idx += cudf::detail::warp_size) { - int const in_idx = block_idx * char_block_size + char_idx; + auto const in_idx = block_idx * char_block_size + char_idx; in_chars_shared[char_idx] = in_idx < string_length ? in_chars[in_idx] : 0; } __syncwarp(); // `char_idx_start` represents the start character index of the current warp. - for (int char_idx_start = 0; char_idx_start < string_length_block; + for (size_type char_idx_start = 0; char_idx_start < string_length_block; char_idx_start += cudf::detail::warp_size) { - int const char_idx = char_idx_start + warp_lane; + auto const char_idx = char_idx_start + warp_lane; int8_t const is_ichar_escape_char = (char_idx < string_length_block && is_escape_char(in_chars_shared + char_idx)) ? 1 : 0; @@ -277,7 +277,7 @@ __global__ void url_decode_char_counter(column_device_view const in_strings, * @param[out] out_chars Character buffer for the output string column. * @param[in] out_offsets Offset value of each string associated with `out_chars`. */ -template +template __global__ void url_decode_char_replacer(column_device_view const in_strings, char* const out_chars, size_type const* const out_offsets) @@ -285,14 +285,14 @@ __global__ void url_decode_char_replacer(column_device_view const in_strings, constexpr int halo_size = 2; __shared__ char temporary_buffer[num_warps_per_threadblock][char_block_size + halo_size * 2]; __shared__ typename cub::WarpScan::TempStorage cub_storage[num_warps_per_threadblock]; - __shared__ int out_idx[num_warps_per_threadblock]; + __shared__ size_type out_idx[num_warps_per_threadblock]; - int const global_thread_id = blockIdx.x * blockDim.x + threadIdx.x; - int const global_warp_id = global_thread_id / cudf::detail::warp_size; - int const local_warp_id = threadIdx.x / cudf::detail::warp_size; - int const warp_lane = threadIdx.x % cudf::detail::warp_size; - int const nwarps = gridDim.x * blockDim.x / cudf::detail::warp_size; - char* in_chars_shared = temporary_buffer[local_warp_id]; + auto const global_thread_id = cudf::detail::grid_1d::global_thread_id(); + auto const global_warp_id = static_cast(global_thread_id / cudf::detail::warp_size); + auto const local_warp_id = static_cast(threadIdx.x / cudf::detail::warp_size); + auto const warp_lane = static_cast(threadIdx.x % cudf::detail::warp_size); + auto const nwarps = static_cast(gridDim.x * blockDim.x / cudf::detail::warp_size); + char* in_chars_shared = temporary_buffer[local_warp_id]; // Loop through strings, and assign each string to a warp for (size_type row_idx = global_warp_id; row_idx < in_strings.size(); row_idx += nwarps) { @@ -302,31 +302,31 @@ __global__ void url_decode_char_replacer(column_device_view const in_strings, auto const in_chars = in_string.data(); auto const string_length = in_string.size_bytes(); auto out_chars_string = out_chars + out_offsets[row_idx]; - int const nblocks = cudf::util::div_rounding_up_unsafe(string_length, char_block_size); + auto const nblocks = cudf::util::div_rounding_up_unsafe(string_length, char_block_size); // Use the last thread of the warp to initialize `out_idx` to 0. if (warp_lane == cudf::detail::warp_size - 1) { out_idx[local_warp_id] = 0; } - for (int block_idx = 0; block_idx < nblocks; block_idx++) { - int const string_length_block = + for (size_type block_idx = 0; block_idx < nblocks; block_idx++) { + auto const string_length_block = std::min(char_block_size, string_length - char_block_size * block_idx); // Each warp collectively loads input characters of the current block to shared memory. // Two halo cells before and after the block are added. The halo cells are used to test // whether the current location as well as the previous two locations are escape characters, // without branches. - for (int char_idx = warp_lane; char_idx < string_length_block + halo_size * 2; + for (auto char_idx = warp_lane; char_idx < string_length_block + halo_size * 2; char_idx += cudf::detail::warp_size) { - int const in_idx = block_idx * char_block_size + char_idx - halo_size; + auto const in_idx = block_idx * char_block_size + char_idx - halo_size; in_chars_shared[char_idx] = in_idx >= 0 && in_idx < string_length ? in_chars[in_idx] : 0; } __syncwarp(); // `char_idx_start` represents the start character index of the current warp. - for (int char_idx_start = 0; char_idx_start < string_length_block; + for (size_type char_idx_start = 0; char_idx_start < string_length_block; char_idx_start += cudf::detail::warp_size) { - int const char_idx = char_idx_start + warp_lane; + auto const char_idx = char_idx_start + warp_lane; // If the current character is part of an escape sequence starting at the previous two // locations, the thread with the starting location should output the escaped character, and // the current thread should not output a character. @@ -375,10 +375,10 @@ std::unique_ptr url_decode(strings_column_view const& strings, size_type strings_count = strings.size(); if (strings_count == 0) return make_empty_column(type_id::STRING); - constexpr int num_warps_per_threadblock = 4; - constexpr int threadblock_size = num_warps_per_threadblock * cudf::detail::warp_size; - constexpr int char_block_size = 256; - int const num_threadblocks = + constexpr size_type num_warps_per_threadblock = 4; + constexpr size_type threadblock_size = num_warps_per_threadblock * cudf::detail::warp_size; + constexpr size_type char_block_size = 256; + auto const num_threadblocks = std::min(65536, cudf::util::div_rounding_up_unsafe(strings_count, num_warps_per_threadblock)); auto offset_count = strings_count + 1; @@ -386,7 +386,7 @@ std::unique_ptr url_decode(strings_column_view const& strings, // build offsets column auto offsets_column = make_numeric_column( - data_type{type_id::INT32}, offset_count, mask_state::UNALLOCATED, stream, mr); + data_type{type_to_id()}, offset_count, mask_state::UNALLOCATED, stream, mr); // count number of bytes in each string after decoding and store it in offsets_column auto offsets_view = offsets_column->view(); From 70fbec809a45fb4d462d7f3ef22464d00d2640e0 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Mon, 28 Aug 2023 20:32:38 -0500 Subject: [PATCH 009/150] Expose streams in public concatenate APIs (#13987) Contributes to #925 Authors: - Vyas Ramasubramani (https://github.com/vyasr) Approvers: - Mark Harris (https://github.com/harrism) - Divye Gala (https://github.com/divyegala) URL: https://github.com/rapidsai/cudf/pull/13987 --- cpp/include/cudf/concatenate.hpp | 7 ++++ cpp/include/cudf_test/column_wrapper.hpp | 10 ++--- cpp/src/copying/concatenate.cu | 9 +++-- cpp/tests/CMakeLists.txt | 1 + cpp/tests/streams/concatenate_test.cpp | 51 ++++++++++++++++++++++++ 5 files changed, 69 insertions(+), 9 deletions(-) create mode 100644 cpp/tests/streams/concatenate_test.cpp diff --git a/cpp/include/cudf/concatenate.hpp b/cpp/include/cudf/concatenate.hpp index 11c6a02c225..9ee55275a5e 100644 --- a/cpp/include/cudf/concatenate.hpp +++ b/cpp/include/cudf/concatenate.hpp @@ -17,6 +17,7 @@ #include #include +#include #include #include @@ -40,10 +41,12 @@ namespace cudf { * * @param views Column views whose bitmasks will be concatenated * @param mr Device memory resource used for allocating the returned memory + * @param stream CUDA stream used for device memory operations and kernel launches * @return Bitmasks of all the column views in the views vector */ rmm::device_buffer concatenate_masks( host_span views, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -53,12 +56,14 @@ rmm::device_buffer concatenate_masks( * @throws std::overflow_error If the total number of output rows exceeds cudf::size_type * * @param columns_to_concat Column views to be concatenated into a single column + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return A single column having all the rows from the elements of `columns_to_concat` respectively * in the same order. */ std::unique_ptr concatenate( host_span columns_to_concat, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -80,12 +85,14 @@ std::unique_ptr concatenate( * @throws std::overflow_error If the total number of output rows exceeds cudf::size_type * * @param tables_to_concat Table views to be concatenated into a single table + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned table's device memory * @return A single table having all the rows from the elements of * `tables_to_concat` respectively in the same order. */ std::unique_ptr concatenate( host_span tables_to_concat, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group diff --git a/cpp/include/cudf_test/column_wrapper.hpp b/cpp/include/cudf_test/column_wrapper.hpp index 1e311322de1..cc8cac35ef4 100644 --- a/cpp/include/cudf_test/column_wrapper.hpp +++ b/cpp/include/cudf_test/column_wrapper.hpp @@ -1596,12 +1596,10 @@ class lists_column_wrapper : public detail::column_wrapper { thrust::copy_if( std::cbegin(cols), std::cend(cols), valids, std::back_inserter(children), thrust::identity{}); - // TODO: Once the public concatenate API exposes streams, use that instead. - auto data = - children.empty() - ? cudf::empty_like(expected_hierarchy) - : cudf::detail::concatenate( - children, cudf::test::get_default_stream(), rmm::mr::get_current_device_resource()); + auto data = children.empty() ? cudf::empty_like(expected_hierarchy) + : cudf::concatenate(children, + cudf::test::get_default_stream(), + rmm::mr::get_current_device_resource()); // increment depth depth = expected_depth + 1; diff --git a/cpp/src/copying/concatenate.cu b/cpp/src/copying/concatenate.cu index a53ec295512..35f06e47436 100644 --- a/cpp/src/copying/concatenate.cu +++ b/cpp/src/copying/concatenate.cu @@ -574,25 +574,28 @@ rmm::device_buffer concatenate_masks(host_span views, } // namespace detail rmm::device_buffer concatenate_masks(host_span views, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::concatenate_masks(views, cudf::get_default_stream(), mr); + return detail::concatenate_masks(views, stream, mr); } // Concatenates the elements from a vector of column_views std::unique_ptr concatenate(host_span columns_to_concat, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::concatenate(columns_to_concat, cudf::get_default_stream(), mr); + return detail::concatenate(columns_to_concat, stream, mr); } std::unique_ptr
concatenate(host_span tables_to_concat, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::concatenate(tables_to_concat, cudf::get_default_stream(), mr); + return detail::concatenate(tables_to_concat, stream, mr); } } // namespace cudf diff --git a/cpp/tests/CMakeLists.txt b/cpp/tests/CMakeLists.txt index d9e34c739ea..c97e2a58ca4 100644 --- a/cpp/tests/CMakeLists.txt +++ b/cpp/tests/CMakeLists.txt @@ -623,6 +623,7 @@ ConfigureTest( ConfigureTest(STREAM_HASHING_TEST streams/hash_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_COPYING_TEST streams/copying_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_GROUPBY_TEST streams/groupby_test.cpp STREAM_MODE testing) +ConfigureTest(STREAM_CONCATENATE_TEST streams/concatenate_test.cpp STREAM_MODE testing) # ################################################################################################## # Install tests #################################################################################### diff --git a/cpp/tests/streams/concatenate_test.cpp b/cpp/tests/streams/concatenate_test.cpp new file mode 100644 index 00000000000..6e6ff58686f --- /dev/null +++ b/cpp/tests/streams/concatenate_test.cpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include +#include +#include + +class ConcatenateTest : public cudf::test::BaseFixture {}; + +TEST_F(ConcatenateTest, Column) +{ + cudf::test::fixed_width_column_wrapper const input1({0, 0, 0, 0, 0}); + cudf::test::fixed_width_column_wrapper const input2({1, 1, 1, 1, 1}); + std::vector views{input1, input2}; + auto result = cudf::concatenate(views, cudf::test::get_default_stream()); +} + +TEST_F(ConcatenateTest, Table) +{ + cudf::test::fixed_width_column_wrapper const input1({0, 0, 0, 0, 0}); + cudf::test::fixed_width_column_wrapper const input2({1, 1, 1, 1, 1}); + cudf::table_view tbl1({input1, input2}); + cudf::table_view tbl2({input2, input1}); + std::vector views{tbl1, tbl2}; + auto result = cudf::concatenate(views, cudf::test::get_default_stream()); +} + +TEST_F(ConcatenateTest, Masks) +{ + cudf::test::fixed_width_column_wrapper const input1( + {{0, 0, 0, 0, 0}, {false, false, false, false, false}}); + cudf::test::fixed_width_column_wrapper const input2( + {{0, 0, 0, 0, 0}, {true, true, true, true, true}}); + std::vector views{input1, input2}; + auto result = cudf::concatenate_masks(views, cudf::test::get_default_stream()); +} From cd56cc2b4cc47a1d0c63e56fac945a66905c28df Mon Sep 17 00:00:00 2001 From: AJ Schmidt Date: Tue, 29 Aug 2023 10:45:53 -0400 Subject: [PATCH 010/150] Use `copy-pr-bot` (#13970) This PR replaces the `copy_prs` functionality from the `ops-bot` with the new dedicated `copy-pr-bot` GitHub application. Thorough documentation for the new `copy-pr-bot` application can be viewed below. - https://docs.gha-runners.nvidia.com/apps/copy-pr-bot/ **Important**: `copy-pr-bot` enforces signed commits. If an organization member opens a PR that contains unsigned commits, it will be deemed untrusted and therefore require an `/ok to test` comment. See the GitHub docs [here](https://docs.github.com/en/authentication/managing-commit-signature-verification/about-commit-signature-verification) for information on how to set up commit signing. Any time a PR is deemed untrusted, it will receive a comment that looks like this: https://github.com/rapidsai/ci-imgs/pull/63#issuecomment-1688973208. Every subsequent commit on an untrusted PR will require an additional `/ok to test` comment. Any existing PRs that have unsigned commits after this change is merged will require an `/ok to test` comment for each subsequent commit _or_ the PR can be rebased to include signed commits as mentioned in the docs below: https://docs.gha-runners.nvidia.com/cpr/contributors. This information is all included on the documentation page linked above. _I've skipped CI on this PR since it's not a change that is tested._ [skip ci] --- .github/copy-pr-bot.yaml | 4 ++++ .github/ops-bot.yaml | 1 - 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 .github/copy-pr-bot.yaml diff --git a/.github/copy-pr-bot.yaml b/.github/copy-pr-bot.yaml new file mode 100644 index 00000000000..895ba83ee54 --- /dev/null +++ b/.github/copy-pr-bot.yaml @@ -0,0 +1,4 @@ +# Configuration file for `copy-pr-bot` GitHub App +# https://docs.gha-runners.nvidia.com/apps/copy-pr-bot/ + +enabled: true diff --git a/.github/ops-bot.yaml b/.github/ops-bot.yaml index 2d1444c595d..9a0b4155035 100644 --- a/.github/ops-bot.yaml +++ b/.github/ops-bot.yaml @@ -5,5 +5,4 @@ auto_merger: true branch_checker: true label_checker: true release_drafter: true -copy_prs: true recently_updated: true From e2e92c46741ea6ef71a657a2cdbc3c010497943e Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Tue, 29 Aug 2023 13:04:25 -0500 Subject: [PATCH 011/150] Expose streams in public filling APIs (#13990) Contributes to #925 Authors: - Vyas Ramasubramani (https://github.com/vyasr) Approvers: - Nghia Truong (https://github.com/ttnghia) - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cudf/pull/13990 --- cpp/include/cudf/filling.hpp | 17 ++++- cpp/src/filling/calendrical_month_sequence.cu | 5 +- cpp/src/filling/fill.cu | 8 +- cpp/src/filling/repeat.cu | 6 +- cpp/src/filling/sequence.cu | 6 +- cpp/tests/CMakeLists.txt | 1 + cpp/tests/streams/filling_test.cpp | 76 +++++++++++++++++++ 7 files changed, 109 insertions(+), 10 deletions(-) create mode 100644 cpp/tests/streams/filling_test.cpp diff --git a/cpp/include/cudf/filling.hpp b/cpp/include/cudf/filling.hpp index a82bb9d1a48..1268f488919 100644 --- a/cpp/include/cudf/filling.hpp +++ b/cpp/include/cudf/filling.hpp @@ -17,6 +17,7 @@ #pragma once #include +#include #include @@ -54,11 +55,13 @@ namespace cudf { * @param begin The starting index of the fill range (inclusive) * @param end The index of the last element in the fill range (exclusive) * @param value The scalar value to fill + * @param stream CUDA stream used for device memory operations and kernel launches */ void fill_in_place(mutable_column_view& destination, size_type begin, size_type end, - scalar const& value); + scalar const& value, + rmm::cuda_stream_view stream = cudf::get_default_stream()); /** * @brief Fills a range of elements in a column out-of-place with a scalar @@ -79,6 +82,7 @@ void fill_in_place(mutable_column_view& destination, * @param begin The starting index of the fill range (inclusive) * @param end The index of the last element in the fill range (exclusive) * @param value The scalar value to fill + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return The result output column */ @@ -87,6 +91,7 @@ std::unique_ptr fill( size_type begin, size_type end, scalar const& value, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -113,12 +118,14 @@ std::unique_ptr fill( * * @param input_table Input table * @param count Non-nullable column of an integral type + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned table's device memory * @return The result table containing the repetitions */ std::unique_ptr
repeat( table_view const& input_table, column_view const& count, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -136,12 +143,14 @@ std::unique_ptr
repeat( * * @param input_table Input table * @param count Number of repetitions + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned table's device memory * @return The result table containing the repetitions */ std::unique_ptr
repeat( table_view const& input_table, size_type count, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -164,6 +173,7 @@ std::unique_ptr
repeat( * @param size Size of the output column * @param init First value in the sequence * @param step Increment value + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return The result column containing the generated sequence */ @@ -171,6 +181,7 @@ std::unique_ptr sequence( size_type size, scalar const& init, scalar const& step, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -190,12 +201,14 @@ std::unique_ptr sequence( * * @param size Size of the output column * @param init First value in the sequence + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return The result column containing the generated sequence */ std::unique_ptr sequence( size_type size, scalar const& init, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -217,6 +230,7 @@ std::unique_ptr sequence( * @param size Number of timestamps to generate * @param init The initial timestamp * @param months Months to increment + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * * @return Timestamps column with sequences of months @@ -225,6 +239,7 @@ std::unique_ptr calendrical_month_sequence( size_type size, scalar const& init, size_type months, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group diff --git a/cpp/src/filling/calendrical_month_sequence.cu b/cpp/src/filling/calendrical_month_sequence.cu index f45634a615e..80badb7d566 100644 --- a/cpp/src/filling/calendrical_month_sequence.cu +++ b/cpp/src/filling/calendrical_month_sequence.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -40,10 +40,11 @@ std::unique_ptr calendrical_month_sequence(size_type size, std::unique_ptr calendrical_month_sequence(size_type size, scalar const& init, size_type months, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::calendrical_month_sequence(size, init, months, cudf::get_default_stream(), mr); + return detail::calendrical_month_sequence(size, init, months, stream, mr); } } // namespace cudf diff --git a/cpp/src/filling/fill.cu b/cpp/src/filling/fill.cu index 342392c773e..3d84db121fc 100644 --- a/cpp/src/filling/fill.cu +++ b/cpp/src/filling/fill.cu @@ -246,20 +246,22 @@ std::unique_ptr fill(column_view const& input, void fill_in_place(mutable_column_view& destination, size_type begin, size_type end, - scalar const& value) + scalar const& value, + rmm::cuda_stream_view stream) { CUDF_FUNC_RANGE(); - return detail::fill_in_place(destination, begin, end, value, cudf::get_default_stream()); + return detail::fill_in_place(destination, begin, end, value, stream); } std::unique_ptr fill(column_view const& input, size_type begin, size_type end, scalar const& value, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::fill(input, begin, end, value, cudf::get_default_stream(), mr); + return detail::fill(input, begin, end, value, stream, mr); } } // namespace cudf diff --git a/cpp/src/filling/repeat.cu b/cpp/src/filling/repeat.cu index 2be15a06c0d..677d9a09515 100644 --- a/cpp/src/filling/repeat.cu +++ b/cpp/src/filling/repeat.cu @@ -156,18 +156,20 @@ std::unique_ptr
repeat(table_view const& input_table, std::unique_ptr
repeat(table_view const& input_table, column_view const& count, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::repeat(input_table, count, cudf::get_default_stream(), mr); + return detail::repeat(input_table, count, stream, mr); } std::unique_ptr
repeat(table_view const& input_table, size_type count, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::repeat(input_table, count, cudf::get_default_stream(), mr); + return detail::repeat(input_table, count, stream, mr); } } // namespace cudf diff --git a/cpp/src/filling/sequence.cu b/cpp/src/filling/sequence.cu index b4bab369c61..99a17f8b0e0 100644 --- a/cpp/src/filling/sequence.cu +++ b/cpp/src/filling/sequence.cu @@ -150,18 +150,20 @@ std::unique_ptr sequence(size_type size, std::unique_ptr sequence(size_type size, scalar const& init, scalar const& step, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::sequence(size, init, step, cudf::get_default_stream(), mr); + return detail::sequence(size, init, step, stream, mr); } std::unique_ptr sequence(size_type size, scalar const& init, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::sequence(size, init, cudf::get_default_stream(), mr); + return detail::sequence(size, init, stream, mr); } } // namespace cudf diff --git a/cpp/tests/CMakeLists.txt b/cpp/tests/CMakeLists.txt index c97e2a58ca4..8a0aa27b175 100644 --- a/cpp/tests/CMakeLists.txt +++ b/cpp/tests/CMakeLists.txt @@ -624,6 +624,7 @@ ConfigureTest(STREAM_HASHING_TEST streams/hash_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_COPYING_TEST streams/copying_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_GROUPBY_TEST streams/groupby_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_CONCATENATE_TEST streams/concatenate_test.cpp STREAM_MODE testing) +ConfigureTest(STREAM_FILLING_TEST streams/filling_test.cpp STREAM_MODE testing) # ################################################################################################## # Install tests #################################################################################### diff --git a/cpp/tests/streams/filling_test.cpp b/cpp/tests/streams/filling_test.cpp new file mode 100644 index 00000000000..b822743d4ca --- /dev/null +++ b/cpp/tests/streams/filling_test.cpp @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include +#include +#include + +class FillingTest : public cudf::test::BaseFixture {}; + +TEST_F(FillingTest, FillInPlace) +{ + cudf::test::fixed_width_column_wrapper col({0, 0, 0, 0, 0}); + auto scalar = cudf::numeric_scalar(5, true, cudf::test::get_default_stream()); + cudf::mutable_column_view mut_view = col; + cudf::fill_in_place(mut_view, 0, 4, scalar, cudf::test::get_default_stream()); +} + +TEST_F(FillingTest, Fill) +{ + cudf::test::fixed_width_column_wrapper const col({0, 0, 0, 0, 0}); + auto scalar = cudf::numeric_scalar(5, true, cudf::test::get_default_stream()); + cudf::fill(col, 0, 4, scalar, cudf::test::get_default_stream()); +} + +TEST_F(FillingTest, RepeatVariable) +{ + cudf::test::fixed_width_column_wrapper const col({0, 0, 0, 0, 0}); + cudf::table_view const table({col}); + cudf::test::fixed_width_column_wrapper const counts({1, 2, 3, 4, 5}); + cudf::repeat(table, counts, cudf::test::get_default_stream()); +} + +TEST_F(FillingTest, RepeatConst) +{ + cudf::test::fixed_width_column_wrapper const col({0, 0, 0, 0, 0}); + cudf::table_view const table({col}); + cudf::repeat(table, 5, cudf::test::get_default_stream()); +} + +TEST_F(FillingTest, SequenceStep) +{ + auto init = cudf::numeric_scalar(5, true, cudf::test::get_default_stream()); + auto step = cudf::numeric_scalar(2, true, cudf::test::get_default_stream()); + cudf::sequence(10, init, step, cudf::test::get_default_stream()); +} + +TEST_F(FillingTest, Sequence) +{ + auto init = cudf::numeric_scalar(5, true, cudf::test::get_default_stream()); + cudf::sequence(10, init, cudf::test::get_default_stream()); +} + +TEST_F(FillingTest, CalendricalMonthSequence) +{ + cudf::timestamp_scalar init( + 1629852896L, true, cudf::test::get_default_stream()); // 2021-08-25 00:54:56 GMT + + cudf::calendrical_month_sequence(10, init, 2, cudf::test::get_default_stream()); +} From 14522003f3bbd8041e66b1ff34077acdae4869ba Mon Sep 17 00:00:00 2001 From: nvdbaranec <56695930+nvdbaranec@users.noreply.github.com> Date: Tue, 29 Aug 2023 14:33:57 -0500 Subject: [PATCH 012/150] Use cudf::thread_index_type in get_json_object and tdigest kernels (#13962) Convert the grid-stride loop in `get_json_object_kernel` to use `cudf::thread_index_type`. Convert `compute_percentiles_kernel` to use `cudf::thread_index_type`. ## Checklist - [x] I am familiar with the [Contributing Guidelines](https://github.com/rapidsai/cudf/blob/HEAD/CONTRIBUTING.md). - [x] New or existing tests cover these changes. - [x] The documentation is up to date with these changes. Authors: - https://github.com/nvdbaranec Approvers: - Bradley Dice (https://github.com/bdice) - MithunR (https://github.com/mythrocks) - Mike Wilson (https://github.com/hyperbolic2346) - Karthikeyan (https://github.com/karthikeyann) --- cpp/src/quantiles/tdigest/tdigest.cu | 2 +- cpp/src/strings/json/json_path.cu | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cpp/src/quantiles/tdigest/tdigest.cu b/cpp/src/quantiles/tdigest/tdigest.cu index cfdb386ff64..79a25f79f60 100644 --- a/cpp/src/quantiles/tdigest/tdigest.cu +++ b/cpp/src/quantiles/tdigest/tdigest.cu @@ -74,7 +74,7 @@ __global__ void compute_percentiles_kernel(device_span tdigest_ double const* cumulative_weight_, double* output) { - int const tid = threadIdx.x + blockIdx.x * blockDim.x; + auto const tid = cudf::detail::grid_1d::global_thread_id(); auto const num_tdigests = tdigest_offsets.size() - 1; auto const tdigest_index = tid / percentiles.size(); diff --git a/cpp/src/strings/json/json_path.cu b/cpp/src/strings/json/json_path.cu index be5b089c6e0..2d2691e0518 100644 --- a/cpp/src/strings/json/json_path.cu +++ b/cpp/src/strings/json/json_path.cu @@ -907,8 +907,8 @@ __launch_bounds__(block_size) __global__ thrust::optional out_valid_count, get_json_object_options options) { - size_type tid = threadIdx.x + (blockDim.x * blockIdx.x); - size_type stride = blockDim.x * gridDim.x; + auto tid = cudf::detail::grid_1d::global_thread_id(); + auto const stride = cudf::thread_index_type{blockDim.x} * cudf::thread_index_type{gridDim.x}; size_type warp_valid_count{0}; From 7b9f4a17579befd902d1c30af38daa5fe493e335 Mon Sep 17 00:00:00 2001 From: Gera Shegalov Date: Tue, 29 Aug 2023 20:59:04 -0700 Subject: [PATCH 013/150] Use HostMemoryAllocator in jni::allocate_host_buffer (#13975) Fixes #13940 Contributes to NVIDIA/spark-rapids#8889 - Pass an explicit host memory allocator to `jni::allocate_host_buffer` - Consistently check for errors from NewGlobalRef - Consistently guard against DelteteGlobalRef on a null Authors: - Gera Shegalov (https://github.com/gerashegalov) Approvers: - https://github.com/nvdbaranec - Jason Lowe (https://github.com/jlowe) URL: https://github.com/rapidsai/cudf/pull/13975 --- java/src/main/java/ai/rapids/cudf/Table.java | 84 +++++++++++++++---- java/src/main/native/include/jni_utils.hpp | 15 ++++ .../main/native/src/ContiguousTableJni.cpp | 10 +-- java/src/main/native/src/CudfJni.cpp | 21 ++--- java/src/main/native/src/RmmJni.cpp | 7 +- java/src/main/native/src/TableJni.cpp | 59 ++++++------- java/src/main/native/src/cudf_jni_apis.hpp | 3 +- .../main/native/src/jni_writer_data_sink.hpp | 29 +++---- 8 files changed, 133 insertions(+), 95 deletions(-) diff --git a/java/src/main/java/ai/rapids/cudf/Table.java b/java/src/main/java/ai/rapids/cudf/Table.java index 57189b052b6..b2eb33d47dc 100644 --- a/java/src/main/java/ai/rapids/cudf/Table.java +++ b/java/src/main/java/ai/rapids/cudf/Table.java @@ -336,7 +336,9 @@ private static native long writeParquetBufferBegin(String[] columnNames, boolean[] isBinaryValues, boolean[] hasParquetFieldIds, int[] parquetFieldIds, - HostBufferConsumer consumer) throws CudfException; + HostBufferConsumer consumer, + HostMemoryAllocator hostMemoryAllocator + ) throws CudfException; /** * Write out a table to an open handle. @@ -419,7 +421,9 @@ private static native long writeORCBufferBegin(String[] columnNames, int compression, int[] precisions, boolean[] isMapValues, - HostBufferConsumer consumer) throws CudfException; + HostBufferConsumer consumer, + HostMemoryAllocator hostMemoryAllocator + ) throws CudfException; /** * Write out a table to an open handle. @@ -447,10 +451,12 @@ private static native long writeORCBufferBegin(String[] columnNames, * Setup everything to write Arrow IPC formatted data to a buffer. * @param columnNames names that correspond to the table columns * @param consumer consumer of host buffers produced. + * @param hostMemoryAllocator allocator for host memory buffers. * @return a handle that is used in later calls to writeArrowIPCChunk and writeArrowIPCEnd. */ private static native long writeArrowIPCBufferBegin(String[] columnNames, - HostBufferConsumer consumer); + HostBufferConsumer consumer, + HostMemoryAllocator hostMemoryAllocator); /** * Convert a cudf table to an arrow table handle. @@ -906,7 +912,9 @@ private static native long startWriteCSVToBuffer(String[] columnNames, String trueValue, String falseValue, int quoteStyle, - HostBufferConsumer buffer) throws CudfException; + HostBufferConsumer buffer, + HostMemoryAllocator hostMemoryAllocator + ) throws CudfException; private static native void writeCSVChunkToBuffer(long writerHandle, long tableHandle); @@ -915,7 +923,8 @@ private static native long startWriteCSVToBuffer(String[] columnNames, private static class CSVTableWriter extends TableWriter { private HostBufferConsumer consumer; - private CSVTableWriter(CSVWriterOptions options, HostBufferConsumer consumer) { + private CSVTableWriter(CSVWriterOptions options, HostBufferConsumer consumer, + HostMemoryAllocator hostMemoryAllocator) { super(startWriteCSVToBuffer(options.getColumnNames(), options.getIncludeHeader(), options.getRowDelimiter(), @@ -924,7 +933,7 @@ private CSVTableWriter(CSVWriterOptions options, HostBufferConsumer consumer) { options.getTrueValue(), options.getFalseValue(), options.getQuoteStyle().nativeId, - consumer)); + consumer, hostMemoryAllocator)); this.consumer = consumer; } @@ -949,8 +958,14 @@ public void close() throws CudfException { } } - public static TableWriter getCSVBufferWriter(CSVWriterOptions options, HostBufferConsumer bufferConsumer) { - return new CSVTableWriter(options, bufferConsumer); + public static TableWriter getCSVBufferWriter(CSVWriterOptions options, + HostBufferConsumer bufferConsumer, HostMemoryAllocator hostMemoryAllocator) { + return new CSVTableWriter(options, bufferConsumer, hostMemoryAllocator); + } + + public static TableWriter getCSVBufferWriter(CSVWriterOptions options, + HostBufferConsumer bufferConsumer) { + return getCSVBufferWriter(options, bufferConsumer, DefaultHostMemoryAllocator.get()); } /** @@ -1393,7 +1408,8 @@ private ParquetTableWriter(ParquetWriterOptions options, File outputFile) { this.consumer = null; } - private ParquetTableWriter(ParquetWriterOptions options, HostBufferConsumer consumer) { + private ParquetTableWriter(ParquetWriterOptions options, HostBufferConsumer consumer, + HostMemoryAllocator hostMemoryAllocator) { super(writeParquetBufferBegin(options.getFlatColumnNames(), options.getTopLevelChildren(), options.getFlatNumChildren(), @@ -1408,7 +1424,7 @@ private ParquetTableWriter(ParquetWriterOptions options, HostBufferConsumer cons options.getFlatIsBinary(), options.getFlatHasParquetFieldId(), options.getFlatParquetFieldId(), - consumer)); + consumer, hostMemoryAllocator)); this.consumer = consumer; } @@ -1448,11 +1464,18 @@ public static TableWriter writeParquetChunked(ParquetWriterOptions options, File * @param options the parquet writer options. * @param consumer a class that will be called when host buffers are ready with parquet * formatted data in them. + * @param hostMemoryAllocator allocator for host memory buffers * @return a table writer to use for writing out multiple tables. */ + public static TableWriter writeParquetChunked(ParquetWriterOptions options, + HostBufferConsumer consumer, + HostMemoryAllocator hostMemoryAllocator) { + return new ParquetTableWriter(options, consumer, hostMemoryAllocator); + } + public static TableWriter writeParquetChunked(ParquetWriterOptions options, HostBufferConsumer consumer) { - return new ParquetTableWriter(options, consumer); + return writeParquetChunked(options, consumer, DefaultHostMemoryAllocator.get()); } /** @@ -1461,10 +1484,12 @@ public static TableWriter writeParquetChunked(ParquetWriterOptions options, * @param options the Parquet writer options. * @param consumer a class that will be called when host buffers are ready with Parquet * formatted data in them. + * @param hostMemoryAllocator allocator for host memory buffers * @param columnViews ColumnViews to write to Parquet */ public static void writeColumnViewsToParquet(ParquetWriterOptions options, HostBufferConsumer consumer, + HostMemoryAllocator hostMemoryAllocator, ColumnView... columnViews) { assert columnViews != null && columnViews.length > 0 : "ColumnViews can't be null or empty"; long rows = columnViews[0].getRowCount(); @@ -1483,7 +1508,9 @@ public static void writeColumnViewsToParquet(ParquetWriterOptions options, long nativeHandle = createCudfTableView(viewPointers); try { - try (ParquetTableWriter writer = new ParquetTableWriter(options, consumer)) { + try ( + ParquetTableWriter writer = new ParquetTableWriter(options, consumer, hostMemoryAllocator) + ) { long total = 0; for (ColumnView cv : columnViews) { total += cv.getDeviceMemorySize(); @@ -1495,6 +1522,12 @@ public static void writeColumnViewsToParquet(ParquetWriterOptions options, } } + public static void writeColumnViewsToParquet(ParquetWriterOptions options, + HostBufferConsumer consumer, + ColumnView... columnViews) { + writeColumnViewsToParquet(options, consumer, DefaultHostMemoryAllocator.get(), columnViews); + } + private static class ORCTableWriter extends TableWriter { HostBufferConsumer consumer; @@ -1512,7 +1545,8 @@ private ORCTableWriter(ORCWriterOptions options, File outputFile) { this.consumer = null; } - private ORCTableWriter(ORCWriterOptions options, HostBufferConsumer consumer) { + private ORCTableWriter(ORCWriterOptions options, HostBufferConsumer consumer, + HostMemoryAllocator hostMemoryAllocator) { super(writeORCBufferBegin(options.getFlatColumnNames(), options.getTopLevelChildren(), options.getFlatNumChildren(), @@ -1522,7 +1556,7 @@ private ORCTableWriter(ORCWriterOptions options, HostBufferConsumer consumer) { options.getCompressionType().nativeId, options.getFlatPrecision(), options.getFlatIsMap(), - consumer)); + consumer, hostMemoryAllocator)); this.consumer = consumer; } @@ -1562,10 +1596,16 @@ public static TableWriter writeORCChunked(ORCWriterOptions options, File outputF * @param options the ORC writer options. * @param consumer a class that will be called when host buffers are ready with ORC * formatted data in them. + * @param hostMemoryAllocator allocator for host memory buffers * @return a table writer to use for writing out multiple tables. */ + public static TableWriter writeORCChunked(ORCWriterOptions options, HostBufferConsumer consumer, + HostMemoryAllocator hostMemoryAllocator) { + return new ORCTableWriter(options, consumer, hostMemoryAllocator); + } + public static TableWriter writeORCChunked(ORCWriterOptions options, HostBufferConsumer consumer) { - return new ORCTableWriter(options, consumer); + return writeORCChunked(options, consumer, DefaultHostMemoryAllocator.get()); } private static class ArrowIPCTableWriter extends TableWriter { @@ -1580,8 +1620,9 @@ private ArrowIPCTableWriter(ArrowIPCWriterOptions options, File outputFile) { this.maxChunkSize = options.getMaxChunkSize(); } - private ArrowIPCTableWriter(ArrowIPCWriterOptions options, HostBufferConsumer consumer) { - super(writeArrowIPCBufferBegin(options.getColumnNames(), consumer)); + private ArrowIPCTableWriter(ArrowIPCWriterOptions options, HostBufferConsumer consumer, + HostMemoryAllocator hostMemoryAllocator) { + super(writeArrowIPCBufferBegin(options.getColumnNames(), consumer, hostMemoryAllocator)); this.callback = options.getCallback(); this.consumer = consumer; this.maxChunkSize = options.getMaxChunkSize(); @@ -1629,11 +1670,18 @@ public static TableWriter writeArrowIPCChunked(ArrowIPCWriterOptions options, Fi * @param options the arrow IPC writer options. * @param consumer a class that will be called when host buffers are ready with arrow IPC * formatted data in them. + * @param hostMemoryAllocator allocator for host memory buffers * @return a table writer to use for writing out multiple tables. */ + public static TableWriter writeArrowIPCChunked(ArrowIPCWriterOptions options, + HostBufferConsumer consumer, + HostMemoryAllocator hostMemoryAllocator) { + return new ArrowIPCTableWriter(options, consumer, hostMemoryAllocator); + } + public static TableWriter writeArrowIPCChunked(ArrowIPCWriterOptions options, HostBufferConsumer consumer) { - return new ArrowIPCTableWriter(options, consumer); + return writeArrowIPCChunked(options, consumer, DefaultHostMemoryAllocator.get()); } private static class ArrowReaderWrapper implements AutoCloseable { diff --git a/java/src/main/native/include/jni_utils.hpp b/java/src/main/native/include/jni_utils.hpp index ff4da893329..f342fca8933 100644 --- a/java/src/main/native/include/jni_utils.hpp +++ b/java/src/main/native/include/jni_utils.hpp @@ -786,6 +786,21 @@ inline void jni_cuda_check(JNIEnv *const env, cudaError_t cuda_status) { } } +inline auto add_global_ref(JNIEnv *env, jobject jobj) { + auto new_global_ref = env->NewGlobalRef(jobj); + if (new_global_ref == nullptr) { + throw cudf::jni::jni_exception("global ref"); + } + return new_global_ref; +} + +inline nullptr_t del_global_ref(JNIEnv *env, jobject jobj) { + if (jobj != nullptr) { + env->DeleteGlobalRef(jobj); + } + return nullptr; +} + } // namespace jni } // namespace cudf diff --git a/java/src/main/native/src/ContiguousTableJni.cpp b/java/src/main/native/src/ContiguousTableJni.cpp index 7eddea2a895..8c99c77ca1f 100644 --- a/java/src/main/native/src/ContiguousTableJni.cpp +++ b/java/src/main/native/src/ContiguousTableJni.cpp @@ -55,10 +55,7 @@ bool cache_contiguous_table_jni(JNIEnv *env) { } void release_contiguous_table_jni(JNIEnv *env) { - if (Contiguous_table_jclass != nullptr) { - env->DeleteGlobalRef(Contiguous_table_jclass); - Contiguous_table_jclass = nullptr; - } + Contiguous_table_jclass = cudf::jni::del_global_ref(env, Contiguous_table_jclass); } bool cache_contig_split_group_by_result_jni(JNIEnv *env) { @@ -87,10 +84,7 @@ bool cache_contig_split_group_by_result_jni(JNIEnv *env) { } void release_contig_split_group_by_result_jni(JNIEnv *env) { - if (Contig_split_group_by_result_jclass != nullptr) { - env->DeleteGlobalRef(Contig_split_group_by_result_jclass); - Contig_split_group_by_result_jclass = nullptr; - } + Contig_split_group_by_result_jclass = del_global_ref(env, Contig_split_group_by_result_jclass); } jobject contig_split_group_by_result_from(JNIEnv *env, jobjectArray &groups) { diff --git a/java/src/main/native/src/CudfJni.cpp b/java/src/main/native/src/CudfJni.cpp index acbf309b4b7..0f143086451 100644 --- a/java/src/main/native/src/CudfJni.cpp +++ b/java/src/main/native/src/CudfJni.cpp @@ -46,7 +46,6 @@ constexpr bool is_ptds_enabled{false}; #endif static jclass Host_memory_buffer_jclass; -static jmethodID Host_buffer_allocate; static jfieldID Host_buffer_address; static jfieldID Host_buffer_length; @@ -59,11 +58,6 @@ static bool cache_host_memory_buffer_jni(JNIEnv *env) { return false; } - Host_buffer_allocate = env->GetStaticMethodID(cls, "allocate", HOST_MEMORY_BUFFER_SIG("JZ")); - if (Host_buffer_allocate == nullptr) { - return false; - } - Host_buffer_address = env->GetFieldID(cls, "address", "J"); if (Host_buffer_address == nullptr) { return false; @@ -83,15 +77,16 @@ static bool cache_host_memory_buffer_jni(JNIEnv *env) { } static void release_host_memory_buffer_jni(JNIEnv *env) { - if (Host_memory_buffer_jclass != nullptr) { - env->DeleteGlobalRef(Host_memory_buffer_jclass); - Host_memory_buffer_jclass = nullptr; - } + Host_memory_buffer_jclass = del_global_ref(env, Host_memory_buffer_jclass); } -jobject allocate_host_buffer(JNIEnv *env, jlong amount, jboolean prefer_pinned) { - jobject ret = env->CallStaticObjectMethod(Host_memory_buffer_jclass, Host_buffer_allocate, amount, - prefer_pinned); +jobject allocate_host_buffer(JNIEnv *env, jlong amount, jboolean prefer_pinned, + jobject host_memory_allocator) { + auto const host_memory_allocator_class = env->GetObjectClass(host_memory_allocator); + auto const allocateMethodId = + env->GetMethodID(host_memory_allocator_class, "allocate", HOST_MEMORY_BUFFER_SIG("JZ")); + jobject ret = + env->CallObjectMethod(host_memory_allocator, allocateMethodId, amount, prefer_pinned); if (env->ExceptionCheck()) { throw std::runtime_error("allocateHostBuffer threw an exception"); diff --git a/java/src/main/native/src/RmmJni.cpp b/java/src/main/native/src/RmmJni.cpp index 5bbb5383d93..3c49d153cb6 100644 --- a/java/src/main/native/src/RmmJni.cpp +++ b/java/src/main/native/src/RmmJni.cpp @@ -197,10 +197,7 @@ class java_event_handler_memory_resource : public device_memory_resource { update_thresholds(env, alloc_thresholds, jalloc_thresholds); update_thresholds(env, dealloc_thresholds, jdealloc_thresholds); - handler_obj = env->NewGlobalRef(jhandler); - if (handler_obj == nullptr) { - throw cudf::jni::jni_exception("global ref"); - } + handler_obj = cudf::jni::add_global_ref(env, jhandler); } virtual ~java_event_handler_memory_resource() { @@ -209,7 +206,7 @@ class java_event_handler_memory_resource : public device_memory_resource { // already be destroyed and this thread should not try to attach to get an environment. JNIEnv *env = nullptr; if (jvm->GetEnv(reinterpret_cast(&env), cudf::jni::MINIMUM_JNI_VERSION) == JNI_OK) { - env->DeleteGlobalRef(handler_obj); + handler_obj = cudf::jni::del_global_ref(env, handler_obj); } handler_obj = nullptr; } diff --git a/java/src/main/native/src/TableJni.cpp b/java/src/main/native/src/TableJni.cpp index d6ef2a1e26c..f7ada4305db 100644 --- a/java/src/main/native/src/TableJni.cpp +++ b/java/src/main/native/src/TableJni.cpp @@ -224,7 +224,7 @@ class native_arrow_ipc_writer_handle final { class jni_arrow_output_stream final : public arrow::io::OutputStream { public: - explicit jni_arrow_output_stream(JNIEnv *env, jobject callback) { + explicit jni_arrow_output_stream(JNIEnv *env, jobject callback, jobject host_memory_allocator) { if (env->GetJavaVM(&jvm) < 0) { throw std::runtime_error("GetJavaVM failed"); } @@ -239,11 +239,8 @@ class jni_arrow_output_stream final : public arrow::io::OutputStream { if (handle_buffer_method == nullptr) { throw cudf::jni::jni_exception("handleBuffer method"); } - - this->callback = env->NewGlobalRef(callback); - if (this->callback == nullptr) { - throw cudf::jni::jni_exception("global ref"); - } + this->callback = add_global_ref(env, callback); + this->host_memory_allocator = add_global_ref(env, host_memory_allocator); } virtual ~jni_arrow_output_stream() { @@ -252,13 +249,13 @@ class jni_arrow_output_stream final : public arrow::io::OutputStream { // already be destroyed and this thread should not try to attach to get an environment. JNIEnv *env = nullptr; if (jvm->GetEnv(reinterpret_cast(&env), cudf::jni::MINIMUM_JNI_VERSION) == JNI_OK) { - env->DeleteGlobalRef(callback); - if (current_buffer != nullptr) { - env->DeleteGlobalRef(current_buffer); - } + callback = del_global_ref(env, callback); + current_buffer = del_global_ref(env, current_buffer); + host_memory_allocator = del_global_ref(env, host_memory_allocator); } callback = nullptr; current_buffer = nullptr; + host_memory_allocator = nullptr; } arrow::Status Write(const std::shared_ptr &data) override { @@ -293,10 +290,7 @@ class jni_arrow_output_stream final : public arrow::io::OutputStream { if (current_buffer_written > 0) { JNIEnv *env = cudf::jni::get_jni_env(jvm); handle_buffer(env, current_buffer, current_buffer_written); - if (current_buffer != nullptr) { - env->DeleteGlobalRef(current_buffer); - } - current_buffer = nullptr; + current_buffer = del_global_ref(env, current_buffer); current_buffer_len = 0; current_buffer_data = nullptr; current_buffer_written = 0; @@ -323,11 +317,10 @@ class jni_arrow_output_stream final : public arrow::io::OutputStream { void rotate_buffer(JNIEnv *env) { if (current_buffer != nullptr) { handle_buffer(env, current_buffer, current_buffer_written); - env->DeleteGlobalRef(current_buffer); - current_buffer = nullptr; } - jobject tmp_buffer = allocate_host_buffer(env, alloc_size, true); - current_buffer = env->NewGlobalRef(tmp_buffer); + current_buffer = del_global_ref(env, current_buffer); + jobject tmp_buffer = allocate_host_buffer(env, alloc_size, true, host_memory_allocator); + current_buffer = add_global_ref(env, tmp_buffer); current_buffer_len = get_host_buffer_length(env, current_buffer); current_buffer_data = reinterpret_cast(get_host_buffer_address(env, current_buffer)); current_buffer_written = 0; @@ -350,6 +343,7 @@ class jni_arrow_output_stream final : public arrow::io::OutputStream { int64_t total_written = 0; long alloc_size = MINIMUM_WRITE_BUFFER_SIZE; bool is_closed = false; + jobject host_memory_allocator; }; class jni_arrow_input_stream final : public arrow::io::InputStream { @@ -370,10 +364,7 @@ class jni_arrow_input_stream final : public arrow::io::InputStream { throw cudf::jni::jni_exception("readInto method"); } - this->callback = env->NewGlobalRef(callback); - if (this->callback == nullptr) { - throw cudf::jni::jni_exception("global ref"); - } + this->callback = add_global_ref(env, callback); } virtual ~jni_arrow_input_stream() { @@ -382,7 +373,7 @@ class jni_arrow_input_stream final : public arrow::io::InputStream { // already be destroyed and this thread should not try to attach to get an environment. JNIEnv *env = nullptr; if (jvm->GetEnv(reinterpret_cast(&env), cudf::jni::MINIMUM_JNI_VERSION) == JNI_OK) { - env->DeleteGlobalRef(callback); + callback = del_global_ref(env, callback); } callback = nullptr; } @@ -1269,7 +1260,7 @@ JNIEXPORT void JNICALL Java_ai_rapids_cudf_Table_writeCSVToFile( JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_startWriteCSVToBuffer( JNIEnv *env, jclass, jobjectArray j_column_names, jboolean include_header, jstring j_row_delimiter, jbyte j_field_delimiter, jstring j_null_value, jstring j_true_value, - jstring j_false_value, jint j_quote_style, jobject j_buffer) { + jstring j_false_value, jint j_quote_style, jobject j_buffer, jobject host_memory_allocator) { JNI_NULL_CHECK(env, j_column_names, "column name array cannot be null", 0); JNI_NULL_CHECK(env, j_row_delimiter, "row delimiter cannot be null", 0); JNI_NULL_CHECK(env, j_field_delimiter, "field delimiter cannot be null", 0); @@ -1279,7 +1270,8 @@ JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_startWriteCSVToBuffer( try { cudf::jni::auto_set_device(env); - auto data_sink = std::make_unique(env, j_buffer); + auto data_sink = + std::make_unique(env, j_buffer, host_memory_allocator); auto const n_column_names = cudf::jni::native_jstringArray{env, j_column_names}; auto const column_names = n_column_names.as_cpp_vector(); @@ -1576,7 +1568,7 @@ JNIEXPORT long JNICALL Java_ai_rapids_cudf_Table_writeParquetBufferBegin( jbooleanArray j_col_nullability, jobjectArray j_metadata_keys, jobjectArray j_metadata_values, jint j_compression, jint j_stats_freq, jbooleanArray j_isInt96, jintArray j_precisions, jbooleanArray j_is_map, jbooleanArray j_is_binary, jbooleanArray j_hasParquetFieldIds, - jintArray j_parquetFieldIds, jobject consumer) { + jintArray j_parquetFieldIds, jobject consumer, jobject host_memory_allocator) { JNI_NULL_CHECK(env, j_col_names, "null columns", 0); JNI_NULL_CHECK(env, j_col_nullability, "null nullability", 0); JNI_NULL_CHECK(env, j_metadata_keys, "null metadata keys", 0); @@ -1584,7 +1576,7 @@ JNIEXPORT long JNICALL Java_ai_rapids_cudf_Table_writeParquetBufferBegin( JNI_NULL_CHECK(env, consumer, "null consumer", 0); try { std::unique_ptr data_sink( - new cudf::jni::jni_writer_data_sink(env, consumer)); + new cudf::jni::jni_writer_data_sink(env, consumer, host_memory_allocator)); using namespace cudf::io; using namespace cudf::jni; @@ -1755,7 +1747,8 @@ JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_readORC( JNIEXPORT long JNICALL Java_ai_rapids_cudf_Table_writeORCBufferBegin( JNIEnv *env, jclass, jobjectArray j_col_names, jint j_num_children, jintArray j_children, jbooleanArray j_col_nullability, jobjectArray j_metadata_keys, jobjectArray j_metadata_values, - jint j_compression, jintArray j_precisions, jbooleanArray j_is_map, jobject consumer) { + jint j_compression, jintArray j_precisions, jbooleanArray j_is_map, jobject consumer, + jobject host_memory_allocator) { JNI_NULL_CHECK(env, j_col_names, "null columns", 0); JNI_NULL_CHECK(env, j_col_nullability, "null nullability", 0); JNI_NULL_CHECK(env, j_metadata_keys, "null metadata keys", 0); @@ -1787,7 +1780,7 @@ JNIEXPORT long JNICALL Java_ai_rapids_cudf_Table_writeORCBufferBegin( [](const std::string &k, const std::string &v) { return std::make_pair(k, v); }); std::unique_ptr data_sink( - new cudf::jni::jni_writer_data_sink(env, consumer)); + new cudf::jni::jni_writer_data_sink(env, consumer, host_memory_allocator)); sink_info sink{data_sink.get()}; auto stats = std::make_shared(); @@ -1918,9 +1911,9 @@ JNIEXPORT jdoubleArray JNICALL Java_ai_rapids_cudf_TableWriter_getWriteStatistic CATCH_STD(env, nullptr) } -JNIEXPORT long JNICALL Java_ai_rapids_cudf_Table_writeArrowIPCBufferBegin(JNIEnv *env, jclass, - jobjectArray j_col_names, - jobject consumer) { +JNIEXPORT long JNICALL Java_ai_rapids_cudf_Table_writeArrowIPCBufferBegin( + JNIEnv *env, jclass, jobjectArray j_col_names, jobject consumer, + jobject host_memory_allocator) { JNI_NULL_CHECK(env, j_col_names, "null columns", 0); JNI_NULL_CHECK(env, consumer, "null consumer", 0); try { @@ -1928,7 +1921,7 @@ JNIEXPORT long JNICALL Java_ai_rapids_cudf_Table_writeArrowIPCBufferBegin(JNIEnv cudf::jni::native_jstringArray col_names(env, j_col_names); std::shared_ptr data_sink( - new cudf::jni::jni_arrow_output_stream(env, consumer)); + new cudf::jni::jni_arrow_output_stream(env, consumer, host_memory_allocator)); cudf::jni::native_arrow_ipc_writer_handle *ret = new cudf::jni::native_arrow_ipc_writer_handle(col_names.as_cpp_vector(), data_sink); diff --git a/java/src/main/native/src/cudf_jni_apis.hpp b/java/src/main/native/src/cudf_jni_apis.hpp index 18993aea294..867df80b722 100644 --- a/java/src/main/native/src/cudf_jni_apis.hpp +++ b/java/src/main/native/src/cudf_jni_apis.hpp @@ -100,7 +100,8 @@ jobject contig_split_group_by_result_from(JNIEnv *env, jobjectArray &groups, /** * Allocate a HostMemoryBuffer */ -jobject allocate_host_buffer(JNIEnv *env, jlong amount, jboolean prefer_pinned); +jobject allocate_host_buffer(JNIEnv *env, jlong amount, jboolean prefer_pinned, + jobject host_memory_allocator); /** * Get the address of a HostMemoryBuffer diff --git a/java/src/main/native/src/jni_writer_data_sink.hpp b/java/src/main/native/src/jni_writer_data_sink.hpp index 05fe594fcd5..efac6112c25 100644 --- a/java/src/main/native/src/jni_writer_data_sink.hpp +++ b/java/src/main/native/src/jni_writer_data_sink.hpp @@ -26,7 +26,7 @@ constexpr long MINIMUM_WRITE_BUFFER_SIZE = 10 * 1024 * 1024; // 10 MB class jni_writer_data_sink final : public cudf::io::data_sink { public: - explicit jni_writer_data_sink(JNIEnv *env, jobject callback) { + explicit jni_writer_data_sink(JNIEnv *env, jobject callback, jobject host_memory_allocator) { if (env->GetJavaVM(&jvm) < 0) { throw std::runtime_error("GetJavaVM failed"); } @@ -42,10 +42,8 @@ class jni_writer_data_sink final : public cudf::io::data_sink { throw cudf::jni::jni_exception("handleBuffer method"); } - this->callback = env->NewGlobalRef(callback); - if (this->callback == nullptr) { - throw cudf::jni::jni_exception("global ref"); - } + this->callback = add_global_ref(env, callback); + this->host_memory_allocator = add_global_ref(env, host_memory_allocator); } virtual ~jni_writer_data_sink() { @@ -54,13 +52,13 @@ class jni_writer_data_sink final : public cudf::io::data_sink { // already be destroyed and this thread should not try to attach to get an environment. JNIEnv *env = nullptr; if (jvm->GetEnv(reinterpret_cast(&env), cudf::jni::MINIMUM_JNI_VERSION) == JNI_OK) { - env->DeleteGlobalRef(callback); - if (current_buffer != nullptr) { - env->DeleteGlobalRef(current_buffer); - } + callback = del_global_ref(env, callback); + current_buffer = del_global_ref(env, current_buffer); + host_memory_allocator = del_global_ref(env, host_memory_allocator); } callback = nullptr; current_buffer = nullptr; + host_memory_allocator = nullptr; } void host_write(void const *data, size_t size) override { @@ -126,10 +124,7 @@ class jni_writer_data_sink final : public cudf::io::data_sink { if (current_buffer_written > 0) { JNIEnv *env = cudf::jni::get_jni_env(jvm); handle_buffer(env, current_buffer, current_buffer_written); - if (current_buffer != nullptr) { - env->DeleteGlobalRef(current_buffer); - } - current_buffer = nullptr; + current_buffer = del_global_ref(env, current_buffer); current_buffer_len = 0; current_buffer_data = nullptr; current_buffer_written = 0; @@ -144,11 +139,10 @@ class jni_writer_data_sink final : public cudf::io::data_sink { void rotate_buffer(JNIEnv *env) { if (current_buffer != nullptr) { handle_buffer(env, current_buffer, current_buffer_written); - env->DeleteGlobalRef(current_buffer); - current_buffer = nullptr; } - jobject tmp_buffer = allocate_host_buffer(env, alloc_size, true); - current_buffer = env->NewGlobalRef(tmp_buffer); + current_buffer = del_global_ref(env, current_buffer); + jobject tmp_buffer = allocate_host_buffer(env, alloc_size, true, host_memory_allocator); + current_buffer = add_global_ref(env, tmp_buffer); current_buffer_len = get_host_buffer_length(env, current_buffer); current_buffer_data = reinterpret_cast(get_host_buffer_address(env, current_buffer)); current_buffer_written = 0; @@ -170,6 +164,7 @@ class jni_writer_data_sink final : public cudf::io::data_sink { long current_buffer_written = 0; size_t total_written = 0; long alloc_size = MINIMUM_WRITE_BUFFER_SIZE; + jobject host_memory_allocator; }; } // namespace cudf::jni From 04085acf1ed43921b638ead432d654695b84d4ea Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Wed, 30 Aug 2023 09:55:38 -0500 Subject: [PATCH 014/150] Fix `name` selection in `Index.difference` and `Index.intersection` (#13986) closes #13985 This PR fixes issues with `Index.difference` and `Index.intersection` API where the name selection was incorrect and `NA` values handling wasn't happening in these two APIs. Authors: - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Bradley Dice (https://github.com/bdice) - Matthew Roeschke (https://github.com/mroeschke) URL: https://github.com/rapidsai/cudf/pull/13986 --- python/cudf/cudf/core/_base_index.py | 21 +++++++++++---------- python/cudf/cudf/tests/test_index.py | 16 ++++++++++------ 2 files changed, 21 insertions(+), 16 deletions(-) diff --git a/python/cudf/cudf/core/_base_index.py b/python/cudf/cudf/core/_base_index.py index d593f0df138..829ca33d8a5 100644 --- a/python/cudf/cudf/core/_base_index.py +++ b/python/cudf/cudf/core/_base_index.py @@ -651,7 +651,7 @@ def _get_reconciled_name_object(self, other): case make a shallow copy of self. """ name = _get_result_name(self.name, other.name) - if self.name != name: + if not _is_same_name(self.name, name): return self.rename(name) return self @@ -943,17 +943,18 @@ def difference(self, other, sort=None): other = cudf.Index(other) + res_name = _get_result_name(self.name, other.name) + if is_mixed_with_object_dtype(self, other): difference = self.copy() else: other = other.copy(deep=False) - other.names = self.names difference = cudf.core.index._index_from_data( - cudf.DataFrame._from_data(self._data) + cudf.DataFrame._from_data({"None": self._column}) .merge( - cudf.DataFrame._from_data(other._data), + cudf.DataFrame._from_data({"None": other._column}), how="leftanti", - on=self.name, + on="None", ) ._data ) @@ -961,6 +962,8 @@ def difference(self, other, sort=None): if self.dtype != other.dtype: difference = difference.astype(self.dtype) + difference.name = res_name + if sort is None and len(other): return difference.sort_values() @@ -1323,14 +1326,12 @@ def _union(self, other, sort=None): return union_result def _intersection(self, other, sort=None): - other_unique = other.unique() - other_unique.names = self.names intersection_result = cudf.core.index._index_from_data( - cudf.DataFrame._from_data(self.unique()._data) + cudf.DataFrame._from_data({"None": self.unique()._column}) .merge( - cudf.DataFrame._from_data(other_unique._data), + cudf.DataFrame._from_data({"None": other.unique()._column}), how="inner", - on=self.name, + on="None", ) ._data ) diff --git a/python/cudf/cudf/tests/test_index.py b/python/cudf/cudf/tests/test_index.py index 2e6b45058ef..359b3c519de 100644 --- a/python/cudf/cudf/tests/test_index.py +++ b/python/cudf/cudf/tests/test_index.py @@ -804,12 +804,16 @@ def test_index_to_series(data): ], ) @pytest.mark.parametrize("sort", [None, False]) -def test_index_difference(data, other, sort): - pd_data = pd.Index(data) - pd_other = pd.Index(other) +@pytest.mark.parametrize( + "name_data,name_other", + [("abc", "c"), (None, "abc"), ("abc", pd.NA), ("abc", "abc")], +) +def test_index_difference(data, other, sort, name_data, name_other): + pd_data = pd.Index(data, name=name_data) + pd_other = pd.Index(other, name=name_other) - gd_data = cudf.core.index.as_index(data) - gd_other = cudf.core.index.as_index(other) + gd_data = cudf.from_pandas(pd_data) + gd_other = cudf.from_pandas(pd_other) expected = pd_data.difference(pd_other, sort=sort) actual = gd_data.difference(gd_other, sort=sort) @@ -2066,7 +2070,7 @@ def test_union_index(idx1, idx2, sort): (pd.RangeIndex(0, 10), pd.RangeIndex(3, 7)), (pd.RangeIndex(0, 10), pd.RangeIndex(-10, 20)), (pd.RangeIndex(0, 10, name="a"), pd.RangeIndex(90, 100, name="b")), - (pd.Index([0, 1, 2, 30], name="a"), pd.Index([30, 0, 90, 100])), + (pd.Index([0, 1, 2, 30], name=pd.NA), pd.Index([30, 0, 90, 100])), (pd.Index([0, 1, 2, 30], name="a"), [90, 100]), (pd.Index([0, 1, 2, 30]), pd.Index([0, 10, 1.0, 11])), (pd.Index(["a", "b", "c", "d", "c"]), pd.Index(["a", "c", "z"])), From 9259a20bc37fc323af7bc80e72a5af27400f3b09 Mon Sep 17 00:00:00 2001 From: Martin Marenz Date: Wed, 30 Aug 2023 17:01:33 +0200 Subject: [PATCH 015/150] Add `bytes_per_second` to copy_if_else benchmark (#13960) Adds `bytes_per_second` to the `COPY_IF_ELSE_BENCH` benchmark. This patch relates to #13735. Authors: - Martin Marenz (https://github.com/Blonck) Approvers: - Bradley Dice (https://github.com/bdice) - David Wendt (https://github.com/davidwendt) URL: https://github.com/rapidsai/cudf/pull/13960 --- cpp/benchmarks/copying/copy_if_else.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/cpp/benchmarks/copying/copy_if_else.cpp b/cpp/benchmarks/copying/copy_if_else.cpp index a10f54b3d6f..50ddfb82feb 100644 --- a/cpp/benchmarks/copying/copy_if_else.cpp +++ b/cpp/benchmarks/copying/copy_if_else.cpp @@ -47,6 +47,14 @@ static void BM_copy_if_else(benchmark::State& state, bool nulls) cuda_event_timer raii(state, true, cudf::get_default_stream()); cudf::copy_if_else(lhs, rhs, decision); } + + auto const bytes_read = n_rows * (sizeof(TypeParam) + sizeof(bool)); + auto const bytes_written = n_rows * sizeof(TypeParam); + auto const null_bytes = nulls ? 2 * cudf::bitmask_allocation_size_bytes(n_rows) : 0; + + // Use number of bytes read and written. + state.SetBytesProcessed(static_cast(state.iterations()) * + (bytes_read + bytes_written + null_bytes)); } #define COPY_BENCHMARK_DEFINE(name, type, b) \ From e63f64176ec362a00c7a9123f6244a44fdbe2ad2 Mon Sep 17 00:00:00 2001 From: Martin Marenz Date: Wed, 30 Aug 2023 17:21:09 +0200 Subject: [PATCH 016/150] Add `bytes_per_second` to hash_partition benchmark (#13965) Adds `bytes_per_second` to the `PARTITION_BENCH` benchmark. This patch relates to #13735. Authors: - Martin Marenz (https://github.com/Blonck) - Mark Harris (https://github.com/harrism) Approvers: - David Wendt (https://github.com/davidwendt) - Mark Harris (https://github.com/harrism) URL: https://github.com/rapidsai/cudf/pull/13965 --- cpp/benchmarks/hashing/partition.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cpp/benchmarks/hashing/partition.cpp b/cpp/benchmarks/hashing/partition.cpp index b688fe2ed7f..0bec4394216 100644 --- a/cpp/benchmarks/hashing/partition.cpp +++ b/cpp/benchmarks/hashing/partition.cpp @@ -43,6 +43,13 @@ void BM_hash_partition(benchmark::State& state) cuda_event_timer timer(state, true); auto output = cudf::hash_partition(input, columns_to_hash, num_partitions); } + + auto const bytes_read = num_rows * num_cols * sizeof(T); + auto const bytes_written = num_rows * num_cols * sizeof(T); + auto const partition_bytes = num_partitions * sizeof(cudf::size_type); + + state.SetBytesProcessed(static_cast(state.iterations()) * + (bytes_read + bytes_written + partition_bytes)); } BENCHMARK_DEFINE_F(Hashing, hash_partition) From ed754da1b6711927622ef9544f52aa1c9ce22191 Mon Sep 17 00:00:00 2001 From: David Wendt <45795991+davidwendt@users.noreply.github.com> Date: Wed, 30 Aug 2023 11:22:14 -0400 Subject: [PATCH 017/150] Add tab as literal to cudf::test::to_string output (#13993) Adds escaped `\\t` to the `cudf::test::to_string()` output. Found this while working on #13891 where the output included tabs but was shown as a various number of spaces in the console when using `cudf::test::print()`. Also added `\\b` for good measure as well as a gtest for all the supported escape sequences. Authors: - David Wendt (https://github.com/davidwendt) Approvers: - Bradley Dice (https://github.com/bdice) - Vukasin Milovanovic (https://github.com/vuule) URL: https://github.com/rapidsai/cudf/pull/13993 --- cpp/tests/utilities/column_utilities.cu | 7 ++++++- cpp/tests/utilities_tests/column_utilities_tests.cpp | 8 ++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/cpp/tests/utilities/column_utilities.cu b/cpp/tests/utilities/column_utilities.cu index fcaf23fd456..bae402155e9 100644 --- a/cpp/tests/utilities/column_utilities.cu +++ b/cpp/tests/utilities/column_utilities.cu @@ -1091,7 +1091,7 @@ struct column_view_printer { if (col.is_empty()) return; auto h_data = cudf::test::to_host(col); - // explicitly replace '\r' and '\n' characters with "\r" and "\n" strings respectively. + // explicitly replace some special whitespace characters with their literal equivalents auto cleaned = [](std::string_view in) { std::string out(in); auto replace_char = [](std::string& out, char c, std::string_view repl) { @@ -1099,8 +1099,13 @@ struct column_view_printer { out.replace(pos, 1, repl); } }; + replace_char(out, '\a', "\\a"); + replace_char(out, '\b', "\\b"); + replace_char(out, '\f', "\\f"); replace_char(out, '\r', "\\r"); + replace_char(out, '\t', "\\t"); replace_char(out, '\n', "\\n"); + replace_char(out, '\v', "\\v"); return out; }; diff --git a/cpp/tests/utilities_tests/column_utilities_tests.cpp b/cpp/tests/utilities_tests/column_utilities_tests.cpp index e90a3f9ac6e..90a7270cb29 100644 --- a/cpp/tests/utilities_tests/column_utilities_tests.cpp +++ b/cpp/tests/utilities_tests/column_utilities_tests.cpp @@ -274,6 +274,14 @@ TEST_F(ColumnUtilitiesStringsTest, StringsToString) EXPECT_EQ(cudf::test::to_string(strings, delimiter), tmp.str()); } +TEST_F(ColumnUtilitiesStringsTest, PrintEscapeStrings) +{ + char const* delimiter = ","; + cudf::test::strings_column_wrapper input({"e\te\ne", "é\bé\ré", "e\vé\fé\abell"}); + std::string expected{"e\\te\\ne,é\\bé\\ré,e\\vé\\fé\\abell"}; + EXPECT_EQ(cudf::test::to_string(input, delimiter), expected); +} + TYPED_TEST(ColumnUtilitiesTestFixedPoint, NonNullableToHost) { using namespace numeric; From 2b5e0fb587cf0cb479b470af5aa6c67ce4e7f00f Mon Sep 17 00:00:00 2001 From: David Wendt <45795991+davidwendt@users.noreply.github.com> Date: Wed, 30 Aug 2023 12:51:28 -0400 Subject: [PATCH 018/150] Improve performance of nvtext::edit_distance (#13912) Improves performance of `nvtext::edit_distance` by reworking the algorithm with shorter working buffer and simpler logic. Authors: - David Wendt (https://github.com/davidwendt) Approvers: - Bradley Dice (https://github.com/bdice) - Karthikeyan (https://github.com/karthikeyann) - MithunR (https://github.com/mythrocks) URL: https://github.com/rapidsai/cudf/pull/13912 --- cpp/benchmarks/CMakeLists.txt | 4 +- cpp/benchmarks/text/edit_distance.cpp | 58 ++++++++ cpp/src/text/edit_distance.cu | 194 +++++++++++++------------- 3 files changed, 154 insertions(+), 102 deletions(-) create mode 100644 cpp/benchmarks/text/edit_distance.cpp diff --git a/cpp/benchmarks/CMakeLists.txt b/cpp/benchmarks/CMakeLists.txt index 96e24efac8a..5e7862f4b3b 100644 --- a/cpp/benchmarks/CMakeLists.txt +++ b/cpp/benchmarks/CMakeLists.txt @@ -276,8 +276,8 @@ ConfigureBench(BINARYOP_BENCH binaryop/binaryop.cpp binaryop/compiled_binaryop.c ConfigureBench(TEXT_BENCH text/ngrams.cpp text/subword.cpp) ConfigureNVBench( - TEXT_NVBENCH text/hash_ngrams.cpp text/jaccard.cpp text/minhash.cpp text/normalize.cpp - text/replace.cpp text/tokenize.cpp + TEXT_NVBENCH text/edit_distance.cpp text/hash_ngrams.cpp text/jaccard.cpp text/minhash.cpp + text/normalize.cpp text/replace.cpp text/tokenize.cpp ) # ################################################################################################## diff --git a/cpp/benchmarks/text/edit_distance.cpp b/cpp/benchmarks/text/edit_distance.cpp new file mode 100644 index 00000000000..8a8bd9ae586 --- /dev/null +++ b/cpp/benchmarks/text/edit_distance.cpp @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include + +#include + +#include + +#include + +static void bench_edit_distance(nvbench::state& state) +{ + auto const num_rows = static_cast(state.get_int64("num_rows")); + auto const row_width = static_cast(state.get_int64("row_width")); + + if (static_cast(num_rows) * static_cast(row_width) >= + static_cast(std::numeric_limits::max())) { + state.skip("Skip benchmarks greater than size_type limit"); + } + + data_profile const strings_profile = data_profile_builder().distribution( + cudf::type_id::STRING, distribution_id::NORMAL, 0, row_width); + auto const strings_table = create_random_table( + {cudf::type_id::STRING, cudf::type_id::STRING}, row_count{num_rows}, strings_profile); + cudf::strings_column_view input1(strings_table->view().column(0)); + cudf::strings_column_view input2(strings_table->view().column(1)); + + state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value())); + + auto chars_size = input1.chars_size() + input2.chars_size(); + state.add_global_memory_reads(chars_size); + // output are integers (one per row) + state.add_global_memory_writes(num_rows); + + state.exec(nvbench::exec_tag::sync, + [&](nvbench::launch& launch) { auto result = nvtext::edit_distance(input1, input2); }); +} + +NVBENCH_BENCH(bench_edit_distance) + .set_name("edit_distance") + .add_int64_axis("num_rows", {1024, 4096, 8192, 16364, 32768, 262144}) + .add_int64_axis("row_width", {8, 16, 32, 64, 128, 256}); diff --git a/cpp/src/text/edit_distance.cu b/cpp/src/text/edit_distance.cu index fb0ecdb7677..1460be4fcf5 100644 --- a/cpp/src/text/edit_distance.cu +++ b/cpp/src/text/edit_distance.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -29,11 +29,13 @@ #include #include +#include #include #include #include #include #include +#include #include #include @@ -42,91 +44,76 @@ namespace detail { namespace { /** - * @brief Compute the edit-distance between two strings + * @brief Compute the Levenshtein distance for each string pair * - * The temporary buffer must be able to hold 3 int16 values for each character - * in the smaller of the two provided strings. + * Documentation here: https://www.cuelogic.com/blog/the-levenshtein-algorithm + * And here: https://en.wikipedia.org/wiki/Levenshtein_distance * * @param d_str First string * @param d_tgt Second string - * @param buffer Temporary memory buffer used for the calculation. - * @return Edit distance value + * @param buffer Working buffer for intermediate calculations + * @return The edit distance value */ -__device__ int32_t compute_distance(cudf::string_view const& d_str, - cudf::string_view const& d_tgt, - int16_t* buffer) +__device__ cudf::size_type compute_distance(cudf::string_view const& d_str, + cudf::string_view const& d_tgt, + cudf::size_type* buffer) { auto const str_length = d_str.length(); auto const tgt_length = d_tgt.length(); if (str_length == 0) return tgt_length; if (tgt_length == 0) return str_length; - auto itr_A = str_length < tgt_length ? d_str.begin() : d_tgt.begin(); - auto itr_B = str_length < tgt_length ? d_tgt.begin() : d_str.begin(); + auto begin = str_length < tgt_length ? d_str.begin() : d_tgt.begin(); + auto itr = str_length < tgt_length ? d_tgt.begin() : d_str.begin(); // .first is min and .second is max - auto const lengths = std::minmax(str_length, tgt_length); + auto const [n, m] = std::minmax(str_length, tgt_length); // setup compute buffer pointers - auto line2 = buffer; - auto line1 = line2 + lengths.first; - auto line0 = line1 + lengths.first; - // range is both lengths - auto const range = lengths.first + lengths.second - 1; - for (cudf::size_type i = 0; i < range; ++i) { - auto tmp = line2; - line2 = line1; - line1 = line0; - line0 = tmp; - // checking pairs of characters - for (int x = (i < lengths.second ? 0 : i - lengths.second + 1); - (x < lengths.first) && (x < i + 1); - ++x) { - int const y = i - x; - itr_A += (x - itr_A.position()); // point to next - itr_B += (y - itr_B.position()); // characters to check - int16_t const w = - (((x > 0) && (y > 0)) ? line2[x - 1] : static_cast(std::max(x, y))) + - static_cast(*itr_A != *itr_B); // add 1 if characters do not match - int16_t const u = (y > 0 ? line1[x] : x + 1) + 1; - int16_t const v = (x > 0 ? line1[x - 1] : y + 1) + 1; - // store min(u,v,w) - line0[x] = std::min(std::min(u, v), w); + auto v0 = buffer; + auto v1 = v0 + n + 1; + // initialize v0 + thrust::sequence(thrust::seq, v0, v1); + + for (int i = 0; i < m; ++i, ++itr) { + auto itr_tgt = begin; + v1[0] = i + 1; + for (int j = 0; j < n; ++j, ++itr_tgt) { + auto sub_cost = v0[j] + (*itr != *itr_tgt); + auto del_cost = v0[j + 1] + 1; + auto ins_cost = v1[j] + 1; + v1[j + 1] = std::min(std::min(sub_cost, del_cost), ins_cost); } + thrust::swap(v0, v1); } - return static_cast(line0[lengths.first - 1]); + return v0[n]; } -/** - * @brief Compute the Levenshtein distance for each string. - * - * Documentation here: https://www.cuelogic.com/blog/the-levenshtein-algorithm - * And here: https://en.wikipedia.org/wiki/Levenshtein_distances - */ struct edit_distance_levenshtein_algorithm { cudf::column_device_view d_strings; // computing these cudf::column_device_view d_targets; // against these; - int16_t* d_buffer; // compute buffer for each string - int32_t* d_results; // input is buffer offset; output is edit distance + cudf::size_type* d_buffer; // compute buffer for each string + std::ptrdiff_t const* d_offsets; // locate sub-buffer for each string + cudf::size_type* d_results; // edit distance values - __device__ void operator()(cudf::size_type idx) + __device__ void operator()(cudf::size_type idx) const { auto d_str = d_strings.is_null(idx) ? cudf::string_view{} : d_strings.element(idx); auto d_tgt = [&] __device__ { // d_targets is also allowed to have only one entry - if (d_targets.is_null(idx)) return cudf::string_view{}; + if (d_targets.is_null(idx)) { return cudf::string_view{}; } return d_targets.size() == 1 ? d_targets.element(0) : d_targets.element(idx); }(); - d_results[idx] = compute_distance(d_str, d_tgt, d_buffer + d_results[idx]); + d_results[idx] = compute_distance(d_str, d_tgt, d_buffer + d_offsets[idx]); } }; struct edit_distance_matrix_levenshtein_algorithm { cudf::column_device_view d_strings; // computing these against itself - int16_t* d_buffer; // compute buffer for each string - int32_t const* d_offsets; // locate sub-buffer for each string - int32_t* d_results; // edit distance values + cudf::size_type* d_buffer; // compute buffer for each string + std::ptrdiff_t const* d_offsets; // locate sub-buffer for each string + cudf::size_type* d_results; // edit distance values - __device__ void operator()(cudf::size_type idx) + __device__ void operator()(cudf::size_type idx) const { auto const strings_count = d_strings.size(); auto const row = idx / strings_count; @@ -136,9 +123,9 @@ struct edit_distance_matrix_levenshtein_algorithm { d_strings.is_null(row) ? cudf::string_view{} : d_strings.element(row); cudf::string_view d_str2 = d_strings.is_null(col) ? cudf::string_view{} : d_strings.element(col); - auto work_buffer = d_buffer + d_offsets[idx - ((row + 1) * (row + 2)) / 2]; - int32_t const distance = (row == col) ? 0 : compute_distance(d_str1, d_str2, work_buffer); - d_results[idx] = distance; // top half of matrix + auto work_buffer = d_buffer + d_offsets[idx - ((row + 1) * (row + 2)) / 2]; + auto const distance = (row == col) ? 0 : compute_distance(d_str1, d_str2, work_buffer); + d_results[idx] = distance; // top half of matrix d_results[col * strings_count + row] = distance; // bottom half of matrix } }; @@ -153,10 +140,13 @@ std::unique_ptr edit_distance(cudf::strings_column_view const& str rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { - cudf::size_type strings_count = strings.size(); - if (strings_count == 0) return cudf::make_empty_column(cudf::data_type{cudf::type_id::INT32}); - if (targets.size() > 1) + auto const strings_count = strings.size(); + if (strings_count == 0) { + return cudf::make_empty_column(cudf::data_type{cudf::type_to_id()}); + } + if (targets.size() > 1) { CUDF_EXPECTS(strings_count == targets.size(), "targets.size() must equal strings.size()"); + } // create device columns from the input columns auto strings_column = cudf::column_device_view::create(strings.parent(), stream); @@ -165,46 +155,46 @@ std::unique_ptr edit_distance(cudf::strings_column_view const& str auto d_targets = *targets_column; // calculate the size of the compute-buffer; - // we can use the output column buffer to hold the size/offset values temporarily - auto results = cudf::make_fixed_width_column(cudf::data_type{cudf::type_id::INT32}, - strings_count, - rmm::device_buffer{0, stream, mr}, - 0, - stream, - mr); - auto d_results = results->mutable_view().data(); - + rmm::device_uvector offsets(strings_count, stream); thrust::transform(rmm::exec_policy(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(strings_count), - d_results, + offsets.begin(), [d_strings, d_targets] __device__(auto idx) { - if (d_strings.is_null(idx) || d_targets.is_null(idx)) return int32_t{0}; + if (d_strings.is_null(idx) || d_targets.is_null(idx)) { + return cudf::size_type{0}; + } auto d_str = d_strings.element(idx); auto d_tgt = d_targets.size() == 1 ? d_targets.element(0) : d_targets.element(idx); - // just need 3 int16's for each character of the shorter string - return static_cast(std::min(d_str.length(), d_tgt.length()) * 3); + // just need 2 integers for each character of the shorter string + return (std::min(d_str.length(), d_tgt.length()) + 1) * 2; }); // get the total size of the temporary compute buffer - size_t compute_size = - thrust::reduce(rmm::exec_policy(stream), d_results, d_results + strings_count, size_t{0}); + int64_t compute_size = + thrust::reduce(rmm::exec_policy(stream), offsets.begin(), offsets.end(), int64_t{0}); // convert sizes to offsets in-place - thrust::exclusive_scan(rmm::exec_policy(stream), d_results, d_results + strings_count, d_results); + thrust::exclusive_scan(rmm::exec_policy(stream), offsets.begin(), offsets.end(), offsets.begin()); // create the temporary compute buffer - rmm::device_uvector compute_buffer(compute_size, stream); + rmm::device_uvector compute_buffer(compute_size, stream); auto d_buffer = compute_buffer.data(); - // compute the edit distance into the output column in-place - // - on input, d_results is the offset to the working section of d_buffer for each row - // - on output, d_results is the calculated edit distance for that row + auto results = cudf::make_fixed_width_column(cudf::data_type{cudf::type_to_id()}, + strings_count, + rmm::device_buffer{0, stream, mr}, + 0, + stream, + mr); + auto d_results = results->mutable_view().data(); + + // compute the edit distance into the output column thrust::for_each_n( rmm::exec_policy(stream), thrust::make_counting_iterator(0), strings_count, - edit_distance_levenshtein_algorithm{d_strings, d_targets, d_buffer, d_results}); + edit_distance_levenshtein_algorithm{d_strings, d_targets, d_buffer, offsets.data(), d_results}); return results; } @@ -216,7 +206,9 @@ std::unique_ptr edit_distance_matrix(cudf::strings_column_view con rmm::mr::device_memory_resource* mr) { cudf::size_type strings_count = strings.size(); - if (strings_count == 0) return cudf::make_empty_column(cudf::data_type{cudf::type_id::INT32}); + if (strings_count == 0) { + return cudf::make_empty_column(cudf::data_type{cudf::type_to_id()}); + } CUDF_EXPECTS(strings_count > 1, "the input strings must include at least 2 strings"); CUDF_EXPECTS(static_cast(strings_count) * static_cast(strings_count) < static_cast(std::numeric_limits().max()), @@ -230,7 +222,7 @@ std::unique_ptr edit_distance_matrix(cudf::strings_column_view con // We only need memory for half the size of the output matrix since the edit distance calculation // is commutative -- `distance(strings[i],strings[j]) == distance(strings[j],strings[i])` cudf::size_type n_upper = (strings_count * (strings_count - 1)) / 2; - rmm::device_uvector offsets(n_upper, stream); + rmm::device_uvector offsets(n_upper, stream); auto d_offsets = offsets.data(); CUDF_CUDA_TRY(cudaMemsetAsync(d_offsets, 0, n_upper * sizeof(cudf::size_type), stream.value())); thrust::for_each_n( @@ -245,28 +237,29 @@ std::unique_ptr edit_distance_matrix(cudf::strings_column_view con d_strings.is_null(row) ? cudf::string_view{} : d_strings.element(row); cudf::string_view const d_str2 = d_strings.is_null(col) ? cudf::string_view{} : d_strings.element(col); - if (d_str1.empty() || d_str2.empty()) return; - // the temp size needed is 3 int16s per character of the shorter string - d_offsets[idx - ((row + 1) * (row + 2)) / 2] = std::min(d_str1.length(), d_str2.length()) * 3; + if (d_str1.empty() || d_str2.empty()) { return; } + // the temp size needed is 2 integers per character of the shorter string + d_offsets[idx - ((row + 1) * (row + 2)) / 2] = + (std::min(d_str1.length(), d_str2.length()) + 1) * 2; }); // get the total size for the compute buffer - size_t compute_size = - thrust::reduce(rmm::exec_policy(stream), offsets.begin(), offsets.end(), size_t{0}); + int64_t compute_size = + thrust::reduce(rmm::exec_policy(stream), offsets.begin(), offsets.end(), int64_t{0}); // convert sizes to offsets in-place thrust::exclusive_scan(rmm::exec_policy(stream), offsets.begin(), offsets.end(), offsets.begin()); // create the compute buffer - rmm::device_uvector compute_buffer(compute_size, stream); + rmm::device_uvector compute_buffer(compute_size, stream); auto d_buffer = compute_buffer.data(); // compute the edit distance into the output column - auto results = cudf::make_fixed_width_column(cudf::data_type{cudf::type_id::INT32}, + auto results = cudf::make_fixed_width_column(cudf::data_type{cudf::type_to_id()}, strings_count * strings_count, rmm::device_buffer{0, stream, mr}, 0, stream, mr); - auto d_results = results->mutable_view().data(); + auto d_results = results->mutable_view().data(); thrust::for_each_n( rmm::exec_policy(stream), thrust::make_counting_iterator(0), @@ -274,20 +267,21 @@ std::unique_ptr edit_distance_matrix(cudf::strings_column_view con edit_distance_matrix_levenshtein_algorithm{d_strings, d_buffer, d_offsets, d_results}); // build a lists column of the results - auto offsets_column = cudf::make_fixed_width_column(cudf::data_type{cudf::type_id::INT32}, - strings_count + 1, - rmm::device_buffer{0, stream, mr}, - 0, - stream, - mr); + auto offsets_column = + cudf::make_fixed_width_column(cudf::data_type{cudf::type_to_id()}, + strings_count + 1, + rmm::device_buffer{0, stream, mr}, + 0, + stream, + mr); thrust::transform_exclusive_scan( rmm::exec_policy(stream), - thrust::make_counting_iterator(0), - thrust::make_counting_iterator(strings_count + 1), - offsets_column->mutable_view().data(), + thrust::counting_iterator(0), + thrust::counting_iterator(strings_count + 1), + offsets_column->mutable_view().data(), [strings_count] __device__(auto idx) { return strings_count; }, - int32_t{0}, - thrust::plus()); + cudf::size_type{0}, + thrust::plus()); return cudf::make_lists_column(strings_count, std::move(offsets_column), std::move(results), From f999e1c5ed183253585606fdfc7552a224aee2d7 Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Wed, 30 Aug 2023 12:34:00 -0500 Subject: [PATCH 019/150] Fix an issue where casting null-array to `object` dtype will result in a failure (#13994) closes #13992 This PR fixes the construction of an empty `MultiIndex` from `pandas` to `cudf` was causing an error in pandas-compatibility mode, null-array is one such case where it is _okay_ to cast to any type because there is no data in it. Hence we pass `str` dtype to `astype`, whenever we encounter an `object` dtype. Authors: - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cudf/pull/13994 --- python/cudf/cudf/core/column/column.py | 17 +++++++++++++---- python/cudf/cudf/tests/test_multiindex.py | 7 +++++++ 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/python/cudf/cudf/core/column/column.py b/python/cudf/cudf/core/column/column.py index b5332f35073..d60f426c642 100644 --- a/python/cudf/cudf/core/column/column.py +++ b/python/cudf/cudf/core/column/column.py @@ -1996,18 +1996,27 @@ def as_column( col = ColumnBase.from_arrow(arbitrary) if isinstance(arbitrary, pa.NullArray): - new_dtype = cudf.dtype(arbitrary.type.to_pandas_dtype()) if dtype is not None: # Cast the column to the `dtype` if specified. - col = col.astype(dtype) + new_dtype = dtype elif len(arbitrary) == 0: # If the column is empty, it has to be # a `float64` dtype. - col = col.astype("float64") + new_dtype = cudf.dtype("float64") else: # If the null column is not empty, it has to # be of `object` dtype. - col = col.astype(new_dtype) + new_dtype = cudf.dtype(arbitrary.type.to_pandas_dtype()) + + if cudf.get_option( + "mode.pandas_compatible" + ) and new_dtype == cudf.dtype("O"): + # We internally raise if we do `astype("object")`, hence + # need to cast to `str` since this is safe to do so because + # it is a null-array. + new_dtype = "str" + + col = col.astype(new_dtype) return col diff --git a/python/cudf/cudf/tests/test_multiindex.py b/python/cudf/cudf/tests/test_multiindex.py index 464b9623bad..bc9cf20b711 100644 --- a/python/cudf/cudf/tests/test_multiindex.py +++ b/python/cudf/cudf/tests/test_multiindex.py @@ -1889,3 +1889,10 @@ def test_multiindex_levels(): assert_eq(gidx.levels[0], pidx.levels[0]) assert_eq(gidx.levels[1], pidx.levels[1]) + + +def test_multiindex_empty_slice_pandas_compatibility(): + expected = pd.MultiIndex.from_tuples([("a", "b")])[:0] + with cudf.option_context("mode.pandas_compatible", True): + actual = cudf.from_pandas(expected) + assert_eq(expected, actual, exact=False) From 8978a2163baa4d70effd9decf4cdd689705b42b5 Mon Sep 17 00:00:00 2001 From: Ed Seidl Date: Wed, 30 Aug 2023 13:59:46 -0700 Subject: [PATCH 020/150] Create table_input_metadata from a table_metadata (#13920) When round-tripping data through cuDF (e.g. read a parquet file with `read_parquet()`, then write slices using the `chunked_parquet_writer`) column nullability information can be lost. The parquet writers will accept a `table_input_metadata` object as an optional parameter, and this object can be used to preserve the nullability. Creating the `table_input_metadata` can be a challenge, however. This PR addresses this problem by adding the ability to create a `table_input_metadata` using the `table_metadata` returned by `read_parquet()`. Authors: - Ed Seidl (https://github.com/etseidl) - Mike Wilson (https://github.com/hyperbolic2346) - Vukasin Milovanovic (https://github.com/vuule) Approvers: - Vukasin Milovanovic (https://github.com/vuule) - Mike Wilson (https://github.com/hyperbolic2346) URL: https://github.com/rapidsai/cudf/pull/13920 --- cpp/include/cudf/io/types.hpp | 25 ++++++++-- cpp/src/io/functions.cpp | 20 ++++++++ cpp/src/io/parquet/reader_impl.cpp | 5 +- cpp/src/io/utilities/column_buffer.cpp | 5 +- cpp/tests/io/parquet_test.cpp | 64 ++++++++++++++++++++++++++ 5 files changed, 112 insertions(+), 7 deletions(-) diff --git a/cpp/include/cudf/io/types.hpp b/cpp/include/cudf/io/types.hpp index 9b0dcff99af..a97f81182ac 100644 --- a/cpp/include/cudf/io/types.hpp +++ b/cpp/include/cudf/io/types.hpp @@ -201,20 +201,27 @@ enum dictionary_policy { }; /** - * @brief Detailed name information for output columns. + * @brief Detailed name (and optionally nullability) information for output columns. * * The hierarchy of children matches the hierarchy of children in the output * cudf columns. */ struct column_name_info { std::string name; ///< Column name + std::optional is_nullable; ///< Column nullability std::vector children; ///< Child column names + /** - * @brief Construct a column name info with a name and no children + * @brief Construct a column name info with a name, optional nullabilty, and no children * * @param _name Column name + * @param _is_nullable True if column is nullable */ - column_name_info(std::string const& _name) : name(_name) {} + column_name_info(std::string const& _name, std::optional _is_nullable = std::nullopt) + : name(_name), is_nullable(_is_nullable) + { + } + column_name_info() = default; }; @@ -798,7 +805,17 @@ class table_input_metadata { * * @param table The table_view to construct metadata for */ - table_input_metadata(table_view const& table); + explicit table_input_metadata(table_view const& table); + + /** + * @brief Construct a new table_input_metadata from a table_metadata object. + * + * The constructed table_input_metadata has the same structure, column names and nullability as + * the passed table_metadata. + * + * @param metadata The table_metadata to construct table_intput_metadata for + */ + explicit table_input_metadata(table_metadata const& metadata); std::vector column_metadata; //!< List of column metadata }; diff --git a/cpp/src/io/functions.cpp b/cpp/src/io/functions.cpp index 5adb2046dbd..45f8b0f8822 100644 --- a/cpp/src/io/functions.cpp +++ b/cpp/src/io/functions.cpp @@ -517,6 +517,26 @@ table_input_metadata::table_input_metadata(table_view const& table) table.begin(), table.end(), std::back_inserter(this->column_metadata), get_children); } +table_input_metadata::table_input_metadata(table_metadata const& metadata) +{ + auto const& names = metadata.schema_info; + + // Create a metadata hierarchy with naming and nullability using `table_metadata` + std::function process_node = + [&](column_name_info const& name) { + auto col_meta = column_in_metadata{name.name}; + if (name.is_nullable.has_value()) { col_meta.set_nullability(name.is_nullable.value()); } + std::transform(name.children.begin(), + name.children.end(), + std::back_inserter(col_meta.children), + process_node); + return col_meta; + }; + + std::transform( + names.begin(), names.end(), std::back_inserter(this->column_metadata), process_node); +} + /** * @copydoc cudf::io::write_parquet */ diff --git a/cpp/src/io/parquet/reader_impl.cpp b/cpp/src/io/parquet/reader_impl.cpp index 9289ddb91b3..8a73c43be3e 100644 --- a/cpp/src/io/parquet/reader_impl.cpp +++ b/cpp/src/io/parquet/reader_impl.cpp @@ -366,8 +366,9 @@ void reader::impl::populate_metadata(table_metadata& out_metadata) // Return column names out_metadata.schema_info.resize(_output_buffers.size()); for (size_t i = 0; i < _output_column_schemas.size(); i++) { - auto const& schema = _metadata->get_schema(_output_column_schemas[i]); - out_metadata.schema_info[i].name = schema.name; + auto const& schema = _metadata->get_schema(_output_column_schemas[i]); + out_metadata.schema_info[i].name = schema.name; + out_metadata.schema_info[i].is_nullable = schema.repetition_type != REQUIRED; } // Return user metadata diff --git a/cpp/src/io/utilities/column_buffer.cpp b/cpp/src/io/utilities/column_buffer.cpp index 3248d94d60a..f3a43cbc63c 100644 --- a/cpp/src/io/utilities/column_buffer.cpp +++ b/cpp/src/io/utilities/column_buffer.cpp @@ -149,7 +149,10 @@ std::unique_ptr make_column(column_buffer_base& buffer, std::optional const& schema, rmm::cuda_stream_view stream) { - if (schema_info != nullptr) { schema_info->name = buffer.name; } + if (schema_info != nullptr) { + schema_info->name = buffer.name; + schema_info->is_nullable = buffer.is_nullable; + } switch (buffer.type.id()) { case type_id::STRING: diff --git a/cpp/tests/io/parquet_test.cpp b/cpp/tests/io/parquet_test.cpp index b210452f619..3cd5c9f5593 100644 --- a/cpp/tests/io/parquet_test.cpp +++ b/cpp/tests/io/parquet_test.cpp @@ -6599,6 +6599,70 @@ TEST_F(ParquetWriterTest, TimestampMicrosINT96NoOverflow) CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view()); } +TEST_F(ParquetWriterTest, PreserveNullability) +{ + constexpr auto num_rows = 100; + + auto const col0_data = random_values(num_rows); + auto const col1_data = random_values(num_rows); + + auto const col0_validity = cudf::test::iterators::no_nulls(); + auto const col1_validity = + cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 2 == 0; }); + + column_wrapper col0{col0_data.begin(), col0_data.end(), col0_validity}; + column_wrapper col1{col1_data.begin(), col1_data.end(), col1_validity}; + auto const col2 = make_parquet_list_list_col(0, num_rows, 5, 8, true); + + auto const expected = table_view{{col0, col1, *col2}}; + + cudf::io::table_input_metadata expected_metadata(expected); + expected_metadata.column_metadata[0].set_name("mandatory"); + expected_metadata.column_metadata[0].set_nullability(false); + expected_metadata.column_metadata[1].set_name("optional"); + expected_metadata.column_metadata[1].set_nullability(true); + expected_metadata.column_metadata[2].set_name("lists"); + expected_metadata.column_metadata[2].set_nullability(true); + // offsets is a cudf thing that's not part of the parquet schema so it won't have nullability set + expected_metadata.column_metadata[2].child(0).set_name("offsets"); + expected_metadata.column_metadata[2].child(1).set_name("element"); + expected_metadata.column_metadata[2].child(1).set_nullability(false); + expected_metadata.column_metadata[2].child(1).child(0).set_name("offsets"); + expected_metadata.column_metadata[2].child(1).child(1).set_name("element"); + expected_metadata.column_metadata[2].child(1).child(1).set_nullability(true); + + auto const filepath = temp_env->get_temp_filepath("PreserveNullability.parquet"); + cudf::io::parquet_writer_options out_opts = + cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected) + .metadata(expected_metadata); + + cudf::io::write_parquet(out_opts); + + cudf::io::parquet_reader_options const in_opts = + cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath}); + auto const result = cudf::io::read_parquet(in_opts); + auto const read_metadata = cudf::io::table_input_metadata{result.metadata}; + + // test that expected_metadata matches read_metadata + std::function + compare_names_and_nullability = [&](auto lhs, auto rhs) { + EXPECT_EQ(lhs.get_name(), rhs.get_name()); + ASSERT_EQ(lhs.is_nullability_defined(), rhs.is_nullability_defined()); + if (lhs.is_nullability_defined()) { EXPECT_EQ(lhs.nullable(), rhs.nullable()); } + ASSERT_EQ(lhs.num_children(), rhs.num_children()); + for (int i = 0; i < lhs.num_children(); ++i) { + compare_names_and_nullability(lhs.child(i), rhs.child(i)); + } + }; + + ASSERT_EQ(expected_metadata.column_metadata.size(), read_metadata.column_metadata.size()); + + for (size_t i = 0; i < expected_metadata.column_metadata.size(); ++i) { + compare_names_and_nullability(expected_metadata.column_metadata[i], + read_metadata.column_metadata[i]); + } +} + TEST_P(ParquetV2Test, CheckEncodings) { using cudf::io::parquet::Encoding; From 04ee729f583ffd44f73483f25a080d880c959f41 Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Wed, 30 Aug 2023 16:05:35 -0500 Subject: [PATCH 021/150] Fix return type of `MultiIndex.difference` (#14009) closes #14008 This PR ensures `MultiIndex.difference` returns `cudf.MultiIndex`. Authors: - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Matthew Roeschke (https://github.com/mroeschke) - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cudf/pull/14009 --- python/cudf/cudf/core/multiindex.py | 2 +- python/cudf/cudf/tests/test_multiindex.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/python/cudf/cudf/core/multiindex.py b/python/cudf/cudf/core/multiindex.py index eb953a54f6b..12da69740d8 100644 --- a/python/cudf/cudf/core/multiindex.py +++ b/python/cudf/cudf/core/multiindex.py @@ -1637,7 +1637,7 @@ def memory_usage(self, deep=False): def difference(self, other, sort=None): if hasattr(other, "to_pandas"): other = other.to_pandas() - return self.to_pandas().difference(other, sort) + return cudf.from_pandas(self.to_pandas().difference(other, sort)) @_cudf_nvtx_annotate def append(self, other): diff --git a/python/cudf/cudf/tests/test_multiindex.py b/python/cudf/cudf/tests/test_multiindex.py index bc9cf20b711..eedc9b0c174 100644 --- a/python/cudf/cudf/tests/test_multiindex.py +++ b/python/cudf/cudf/tests/test_multiindex.py @@ -1697,6 +1697,7 @@ def test_difference(): expected = midx2.to_pandas().difference(midx.to_pandas()) actual = midx2.difference(midx) + assert isinstance(actual, cudf.MultiIndex) assert_eq(expected, actual) From c1b79313f0aa6d1bcbef73a3a1a3471512ecfce8 Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Wed, 30 Aug 2023 17:28:40 -0500 Subject: [PATCH 022/150] Raise an error when timezone subtypes are encountered in `pd.IntervalDtype` (#14006) closes #14004 This PR raises an error when an `IntervalIndex` contains a timezone-aware sub-type so that we don't go into infinite recursion. Authors: - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Matthew Roeschke (https://github.com/mroeschke) URL: https://github.com/rapidsai/cudf/pull/14006 --- python/cudf/cudf/core/column/column.py | 8 ++++++-- python/cudf/cudf/tests/test_interval.py | 16 ++++++++++++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/python/cudf/cudf/core/column/column.py b/python/cudf/cudf/core/column/column.py index d60f426c642..ad761ea8d18 100644 --- a/python/cudf/cudf/core/column/column.py +++ b/python/cudf/cudf/core/column/column.py @@ -2261,8 +2261,12 @@ def as_column( data = ColumnBase.from_scalar(arbitrary, length if length else 1) elif isinstance(arbitrary, pd.core.arrays.masked.BaseMaskedArray): data = as_column(pa.Array.from_pandas(arbitrary), dtype=dtype) - elif isinstance(arbitrary, pd.DatetimeIndex) and isinstance( - arbitrary.dtype, pd.DatetimeTZDtype + elif ( + isinstance(arbitrary, pd.DatetimeIndex) + and isinstance(arbitrary.dtype, pd.DatetimeTZDtype) + ) or ( + isinstance(arbitrary, pd.IntervalIndex) + and is_datetime64tz_dtype(arbitrary.dtype.subtype) ): raise NotImplementedError( "cuDF does not yet support timezone-aware datetimes" diff --git a/python/cudf/cudf/tests/test_interval.py b/python/cudf/cudf/tests/test_interval.py index f2e8f585a69..9704be44b95 100644 --- a/python/cudf/cudf/tests/test_interval.py +++ b/python/cudf/cudf/tests/test_interval.py @@ -165,3 +165,19 @@ def test_interval_index_unique(): actual = gi.unique() assert_eq(expected, actual) + + +@pytest.mark.parametrize("tz", ["US/Eastern", None]) +def test_interval_with_datetime(tz): + dti = pd.date_range( + start=pd.Timestamp("20180101", tz=tz), + end=pd.Timestamp("20181231", tz=tz), + freq="M", + ) + pidx = pd.IntervalIndex.from_breaks(dti) + if tz is None: + gidx = cudf.from_pandas(pidx) + assert_eq(pidx, gidx) + else: + with pytest.raises(NotImplementedError): + cudf.from_pandas(pidx) From c73ff70dc5ad85d71a0719606c688c2447d55d85 Mon Sep 17 00:00:00 2001 From: Martin Marenz Date: Thu, 31 Aug 2023 00:29:31 +0200 Subject: [PATCH 023/150] Enable fractional null probability for hashing benchmark (#13967) In the past, the HASING_NVBENCH benchmark treated the `nulls` parameter as a boolean. Any value other than 0.0 resulted in a null probability of 1.0. Now, the `nulls` parameter directly determines the null probability. For instance, a value of 0.1 will generate 10% of the data as null. Moreover, setting nulls to 0.0 produces data without a null bitmask. Additionally, `bytes_per_second` are added to the benchmark. This patch relates to #13735. Authors: - Martin Marenz (https://github.com/Blonck) - Yunsong Wang (https://github.com/PointKernel) Approvers: - Yunsong Wang (https://github.com/PointKernel) - David Wendt (https://github.com/davidwendt) URL: https://github.com/rapidsai/cudf/pull/13967 --- cpp/benchmarks/hashing/hash.cpp | 35 +++++++++++++++++++++++++++++---- 1 file changed, 31 insertions(+), 4 deletions(-) diff --git a/cpp/benchmarks/hashing/hash.cpp b/cpp/benchmarks/hashing/hash.cpp index f0e9202612e..e679b4b62d2 100644 --- a/cpp/benchmarks/hashing/hash.cpp +++ b/cpp/benchmarks/hashing/hash.cpp @@ -17,32 +17,59 @@ #include #include +#include #include #include #include +#include + static void bench_hash(nvbench::state& state) { - auto const num_rows = static_cast(state.get_int64("num_rows")); - auto const nulls = static_cast(state.get_float64("nulls")); + auto const num_rows = static_cast(state.get_int64("num_rows")); + auto const nulls = state.get_float64("nulls"); + // disable null bitmask if probability is exactly 0.0 + bool const no_nulls = nulls == 0.0; auto const hash_name = state.get_string("hash_name"); - data_profile const profile = data_profile_builder().null_probability(nulls); - auto const data = create_random_table( + data_profile const profile = + data_profile_builder().null_probability(no_nulls ? std::nullopt : std::optional{nulls}); + auto const data = create_random_table( {cudf::type_id::INT64, cudf::type_id::STRING}, row_count{num_rows}, profile); auto stream = cudf::get_default_stream(); state.set_cuda_stream(nvbench::make_cuda_stream_view(stream.value())); + // collect statistics + cudf::strings_column_view input(data->get_column(1).view()); + auto const chars_size = input.chars_size(); + // add memory read from string column + state.add_global_memory_reads(chars_size); + // add memory read from int64_t column + state.add_global_memory_reads(num_rows); + // add memory read from bitmaks + if (!no_nulls) { + state.add_global_memory_reads(2 * + cudf::bitmask_allocation_size_bytes(num_rows)); + } + // memory written depends on used hash + if (hash_name == "murmurhash3_x86_32") { + state.add_global_memory_writes(num_rows); + state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) { auto result = cudf::hashing::murmurhash3_x86_32(data->view()); }); } else if (hash_name == "md5") { + // md5 creates a 32-byte string + state.add_global_memory_writes(32 * num_rows); + state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) { auto result = cudf::hashing::md5(data->view()); }); } else if (hash_name == "spark_murmurhash3_x86_32") { + state.add_global_memory_writes(num_rows); + state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) { auto result = cudf::hashing::spark_murmurhash3_x86_32(data->view()); }); From a2fd6883977fb73027f36357d7114e12bb683296 Mon Sep 17 00:00:00 2001 From: Lawrence Mitchell Date: Thu, 31 Aug 2023 21:33:30 +0100 Subject: [PATCH 024/150] Explicitly depend on zlib in conda recipes (#14018) We were previously obtaining zlib transitively through our cmake dependency, but since the 3.27.4 conda package, this dependency no longer exists. Therefore we must depend on zlib ourselves. - Closes #14021 Authors: - Lawrence Mitchell (https://github.com/wence-) Approvers: - Bradley Dice (https://github.com/bdice) - AJ Schmidt (https://github.com/ajschmidt8) - Vyas Ramasubramani (https://github.com/vyasr) URL: https://github.com/rapidsai/cudf/pull/14018 --- conda/environments/all_cuda-118_arch-x86_64.yaml | 1 + conda/environments/all_cuda-120_arch-x86_64.yaml | 1 + conda/recipes/libcudf/conda_build_config.yaml | 2 ++ conda/recipes/libcudf/meta.yaml | 1 + dependencies.yaml | 1 + 5 files changed, 6 insertions(+) diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index e4a9b2f1d29..8965a43b8ac 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -95,6 +95,7 @@ dependencies: - tokenizers==0.13.1 - transformers==4.24.0 - typing_extensions>=4.0.0 +- zlib>=1.2.13 - pip: - git+https://github.com/python-streamz/streamz.git@master name: all_cuda-118_arch-x86_64 diff --git a/conda/environments/all_cuda-120_arch-x86_64.yaml b/conda/environments/all_cuda-120_arch-x86_64.yaml index d03c4364435..4542eb79267 100644 --- a/conda/environments/all_cuda-120_arch-x86_64.yaml +++ b/conda/environments/all_cuda-120_arch-x86_64.yaml @@ -92,6 +92,7 @@ dependencies: - tokenizers==0.13.1 - transformers==4.24.0 - typing_extensions>=4.0.0 +- zlib>=1.2.13 - pip: - git+https://github.com/python-streamz/streamz.git@master name: all_cuda-120_arch-x86_64 diff --git a/conda/recipes/libcudf/conda_build_config.yaml b/conda/recipes/libcudf/conda_build_config.yaml index 0397045786b..25b3f19de77 100644 --- a/conda/recipes/libcudf/conda_build_config.yaml +++ b/conda/recipes/libcudf/conda_build_config.yaml @@ -40,6 +40,8 @@ spdlog_version: nvcomp_version: - "=2.6.1" +zlib_version: + - ">=1.2.13" # The CTK libraries below are missing from the conda-forge::cudatoolkit package # for CUDA 11. The "*_host_*" version specifiers correspond to `11.8` packages # and the "*_run_*" version specifiers correspond to `11.x` packages. diff --git a/conda/recipes/libcudf/meta.yaml b/conda/recipes/libcudf/meta.yaml index de32facba74..c844131ad31 100644 --- a/conda/recipes/libcudf/meta.yaml +++ b/conda/recipes/libcudf/meta.yaml @@ -73,6 +73,7 @@ requirements: - benchmark {{ gbench_version }} - gtest {{ gtest_version }} - gmock {{ gtest_version }} + - zlib {{ zlib_version }} outputs: - name: libcudf diff --git a/dependencies.yaml b/dependencies.yaml index a1d928797b0..97f86c6b864 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -179,6 +179,7 @@ dependencies: - c-compiler - cxx-compiler - dlpack>=0.5,<0.6.0a0 + - zlib>=1.2.13 specific: - output_types: conda matrices: From eeb761359f4eb5b15563177177c07b2ffa20cc4f Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Thu, 31 Aug 2023 16:08:03 -0700 Subject: [PATCH 025/150] Simplify wheel build scripts and allow alphas of RAPIDS dependencies (#13963) This PR makes a handful of changes aimed at simplifying the CI pipeline for building wheels as a precursor to switching RAPIDS nightlies to using proper alpha versions: - Inlines apply_wheel_modifications.sh in build_wheel.sh. Now that the build doesn't rely excessively on logic in shared workflows, there's no real benefit to having a separate script (previously apply_wheel_modification.sh was a special script that the shared workflow knew to execute i.e. it was a hook into an externally controlled workflow). - Consolidates the textual replacements using for loops and makes the replacements more targeted by only modifying the Python package being built in a given script. For instance, python/dask_cudf/pyproject.toml is no longer overwritten when building cudf. - Modifies dependency specs for RAPIDS packages to include a `>=0.0.0a0` component. This is the key change that will allow alpha dependencies to be discovered. Authors: - Vyas Ramasubramani (https://github.com/vyasr) Approvers: - Bradley Dice (https://github.com/bdice) - AJ Schmidt (https://github.com/ajschmidt8) URL: https://github.com/rapidsai/cudf/pull/13963 --- ci/build_wheel.sh | 55 +++++++++++++++++++++++++ ci/build_wheel_cudf.sh | 24 ++++------- ci/build_wheel_dask_cudf.sh | 19 ++------- ci/release/apply_wheel_modifications.sh | 32 -------------- 4 files changed, 65 insertions(+), 65 deletions(-) create mode 100755 ci/build_wheel.sh delete mode 100755 ci/release/apply_wheel_modifications.sh diff --git a/ci/build_wheel.sh b/ci/build_wheel.sh new file mode 100755 index 00000000000..06d0c3c7a56 --- /dev/null +++ b/ci/build_wheel.sh @@ -0,0 +1,55 @@ +#!/bin/bash +# Copyright (c) 2023, NVIDIA CORPORATION. + +set -euo pipefail + +package_name=$1 +package_dir=$2 + +source rapids-configure-sccache +source rapids-date-string + +# Use gha-tools rapids-pip-wheel-version to generate wheel version then +# update the necessary files +version_override="$(rapids-pip-wheel-version ${RAPIDS_DATE_STRING})" + +RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})" + +# This is the version of the suffix with a preceding hyphen. It's used +# everywhere except in the final wheel name. +PACKAGE_CUDA_SUFFIX="-${RAPIDS_PY_CUDA_SUFFIX}" + +# Patch project metadata files to include the CUDA version suffix and version override. +pyproject_file="${package_dir}/pyproject.toml" + +sed -i "s/^version = .*/version = \"${version_override}\"/g" ${pyproject_file} +sed -i "s/name = \"${package_name}\"/name = \"${package_name}${PACKAGE_CUDA_SUFFIX}\"/g" ${pyproject_file} + +# For nightlies we want to ensure that we're pulling in alphas as well. The +# easiest way to do so is to augment the spec with a constraint containing a +# min alpha version that doesn't affect the version bounds but does allow usage +# of alpha versions for that dependency without --pre +alpha_spec='' +if ! rapids-is-release-build; then + alpha_spec=',>=0.0.0a0' +fi + +if [[ ${package_name} == "dask_cudf" ]]; then + sed -r -i "s/cudf==(.*)\"/cudf${PACKAGE_CUDA_SUFFIX}==\1${alpha_spec}\"/g" ${pyproject_file} +else + sed -r -i "s/rmm(.*)\"/rmm${PACKAGE_CUDA_SUFFIX}\1${alpha_spec}\"/g" ${pyproject_file} + # ptxcompiler and cubinlinker aren't version constrained + sed -r -i "s/ptxcompiler\"/ptxcompiler${PACKAGE_CUDA_SUFFIX}\"/g" ${pyproject_file} + sed -r -i "s/cubinlinker\"/cubinlinker${PACKAGE_CUDA_SUFFIX}\"/g" ${pyproject_file} +fi + +if [[ $PACKAGE_CUDA_SUFFIX == "-cu12" ]]; then + sed -i "s/cuda-python[<=>\.,0-9a]*/cuda-python>=12.0,<13.0a0/g" ${pyproject_file} + sed -i "s/cupy-cuda11x/cupy-cuda12x/g" ${pyproject_file} + sed -i "/ptxcompiler/d" ${pyproject_file} + sed -i "/cubinlinker/d" ${pyproject_file} +fi + +cd "${package_dir}" + +python -m pip wheel . -w dist -vvv --no-deps --disable-pip-version-check diff --git a/ci/build_wheel_cudf.sh b/ci/build_wheel_cudf.sh index c20a5162788..7d3919b2d72 100755 --- a/ci/build_wheel_cudf.sh +++ b/ci/build_wheel_cudf.sh @@ -3,24 +3,14 @@ set -euo pipefail -source rapids-configure-sccache -source rapids-date-string +package_dir="python/cudf" -# Use gha-tools rapids-pip-wheel-version to generate wheel version then -# update the necessary files -version_override="$(rapids-pip-wheel-version ${RAPIDS_DATE_STRING})" +export SKBUILD_CONFIGURE_OPTIONS="-DCUDF_BUILD_WHEELS=ON -DDETECT_CONDA_ENV=OFF" -RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})" - -ci/release/apply_wheel_modifications.sh ${version_override} "-${RAPIDS_PY_CUDA_SUFFIX}" -echo "The package name and/or version was modified in the package source. The git diff is:" -git diff - -cd python/cudf +./ci/build_wheel.sh cudf ${package_dir} -SKBUILD_CONFIGURE_OPTIONS="-DCUDF_BUILD_WHEELS=ON -DDETECT_CONDA_ENV=OFF" python -m pip wheel . -w dist -vvv --no-deps --disable-pip-version-check +mkdir -p ${package_dir}/final_dist +python -m auditwheel repair -w ${package_dir}/final_dist ${package_dir}/dist/* -mkdir -p final_dist -python -m auditwheel repair -w final_dist dist/* - -RAPIDS_PY_WHEEL_NAME="cudf_${RAPIDS_PY_CUDA_SUFFIX}" rapids-upload-wheels-to-s3 final_dist +RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})" +RAPIDS_PY_WHEEL_NAME="cudf_${RAPIDS_PY_CUDA_SUFFIX}" rapids-upload-wheels-to-s3 ${package_dir}/final_dist diff --git a/ci/build_wheel_dask_cudf.sh b/ci/build_wheel_dask_cudf.sh index 9af90d3a863..47e35c46004 100755 --- a/ci/build_wheel_dask_cudf.sh +++ b/ci/build_wheel_dask_cudf.sh @@ -3,22 +3,9 @@ set -euo pipefail -source rapids-configure-sccache -source rapids-date-string +package_dir="python/dask_cudf" -# Use gha-tools rapids-pip-wheel-version to generate wheel version then -# update the necessary files -version_override="$(rapids-pip-wheel-version ${RAPIDS_DATE_STRING})" +./ci/build_wheel.sh dask_cudf ${package_dir} RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})" - -ci/release/apply_wheel_modifications.sh ${version_override} "-${RAPIDS_PY_CUDA_SUFFIX}" -echo "The package name and/or version was modified in the package source. The git diff is:" -git diff - -cd python/dask_cudf - -# Hardcode the output dir -python -m pip wheel . -w dist -vvv --no-deps --disable-pip-version-check - -RAPIDS_PY_WHEEL_NAME="dask_cudf_${RAPIDS_PY_CUDA_SUFFIX}" rapids-upload-wheels-to-s3 dist +RAPIDS_PY_WHEEL_NAME="dask_cudf_${RAPIDS_PY_CUDA_SUFFIX}" rapids-upload-wheels-to-s3 ${package_dir}/dist diff --git a/ci/release/apply_wheel_modifications.sh b/ci/release/apply_wheel_modifications.sh deleted file mode 100755 index 9d337aaa057..00000000000 --- a/ci/release/apply_wheel_modifications.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -# Copyright (c) 2023, NVIDIA CORPORATION. -# -# Usage: bash apply_wheel_modifications.sh - -VERSION=${1} -CUDA_SUFFIX=${2} - -# pyproject.toml versions -sed -i "s/^version = .*/version = \"${VERSION}\"/g" python/cudf/pyproject.toml -sed -i "s/^version = .*/version = \"${VERSION}\"/g" python/dask_cudf/pyproject.toml -sed -i "s/^version = .*/version = \"${VERSION}\"/g" python/cudf_kafka/pyproject.toml -sed -i "s/^version = .*/version = \"${VERSION}\"/g" python/custreamz/pyproject.toml - -# cudf pyproject.toml cuda suffixes -sed -i "s/^name = \"cudf\"/name = \"cudf${CUDA_SUFFIX}\"/g" python/cudf/pyproject.toml -sed -i "s/rmm/rmm${CUDA_SUFFIX}/g" python/cudf/pyproject.toml -sed -i "s/ptxcompiler/ptxcompiler${CUDA_SUFFIX}/g" python/cudf/pyproject.toml -sed -i "s/cubinlinker/cubinlinker${CUDA_SUFFIX}/g" python/cudf/pyproject.toml - -# dask_cudf pyproject.toml cuda suffixes -sed -i "s/^name = \"dask_cudf\"/name = \"dask_cudf${CUDA_SUFFIX}\"/g" python/dask_cudf/pyproject.toml -# Need to provide the == to avoid modifying the URL -sed -i "s/\"cudf==/\"cudf${CUDA_SUFFIX}==/g" python/dask_cudf/pyproject.toml - -if [[ $CUDA_SUFFIX == "-cu12" ]]; then - sed -i "s/cuda-python[<=>\.,0-9a]*/cuda-python>=12.0,<13.0a0/g" python/cudf/pyproject.toml - sed -i "s/cupy-cuda11x/cupy-cuda12x/g" python/{cudf,dask_cudf}/pyproject.toml - sed -i "s/numba[<=>\.,0-9]*/numba>=0.57/g" python/{cudf,dask_cudf}/pyproject.toml - sed -i "/ptxcompiler/d" python/cudf/pyproject.toml - sed -i "/cubinlinker/d" python/cudf/pyproject.toml -fi From a8d3597ace2291d74325dd24c9cfa5126bb21847 Mon Sep 17 00:00:00 2001 From: David Wendt <45795991+davidwendt@users.noreply.github.com> Date: Thu, 31 Aug 2023 19:19:21 -0400 Subject: [PATCH 026/150] Add stream parameter to public cudf::strings::split APIs (#13997) Adds a `stream` parameter to the libcudf `cudf:strings::split(), cudf:strings::rsplit(), cudf:strings::split_record(), cudf:strings::rsplit_record()` APIs. Change needed for work in #13891 Authors: - David Wendt (https://github.com/davidwendt) Approvers: - https://github.com/nvdbaranec - Bradley Dice (https://github.com/bdice) - Vyas Ramasubramani (https://github.com/vyasr) URL: https://github.com/rapidsai/cudf/pull/13997 --- cpp/include/cudf/strings/split/split.hpp | 56 ++++++++++++++---------- cpp/src/strings/split/split.cu | 6 ++- cpp/src/strings/split/split_record.cu | 7 +-- 3 files changed, 40 insertions(+), 29 deletions(-) diff --git a/cpp/include/cudf/strings/split/split.hpp b/cpp/include/cudf/strings/split/split.hpp index a6c942d39b4..701950e61a5 100644 --- a/cpp/include/cudf/strings/split/split.hpp +++ b/cpp/include/cudf/strings/split/split.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -43,18 +43,20 @@ namespace strings { * * Any null string entries return corresponding null output columns. * - * @param strings_column Strings instance for this operation. - * @param delimiter UTF-8 encoded string indicating the split points in each string. + * @param strings_column Strings instance for this operation + * @param delimiter UTF-8 encoded string indicating the split points in each string; * Default of empty string indicates split on whitespace. - * @param maxsplit Maximum number of splits to perform. + * @param maxsplit Maximum number of splits to perform; * Default of -1 indicates all possible splits on each string. - * @param mr Device memory resource used to allocate the returned table's device memory. - * @return New table of strings columns. + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned table's device memory + * @return New table of strings columns */ std::unique_ptr
split( strings_column_view const& strings_column, string_scalar const& delimiter = string_scalar(""), size_type maxsplit = -1, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -71,18 +73,20 @@ std::unique_ptr
split( * * Any null string entries return corresponding null output columns. * - * @param strings_column Strings instance for this operation. - * @param delimiter UTF-8 encoded string indicating the split points in each string. + * @param strings_column Strings instance for this operation + * @param delimiter UTF-8 encoded string indicating the split points in each string; * Default of empty string indicates split on whitespace. - * @param maxsplit Maximum number of splits to perform. + * @param maxsplit Maximum number of splits to perform; * Default of -1 indicates all possible splits on each string. - * @param mr Device memory resource used to allocate the returned table's device memory. + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned table's device memory * @return New strings columns. */ std::unique_ptr
rsplit( strings_column_view const& strings_column, string_scalar const& delimiter = string_scalar(""), size_type maxsplit = -1, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -141,20 +145,22 @@ std::unique_ptr
rsplit( * * @throw cudf:logic_error if `delimiter` is invalid. * - * @param strings A column of string elements to be split. - * @param delimiter The string to identify split points in each string. + * @param strings A column of string elements to be split + * @param delimiter The string to identify split points in each string; * Default of empty string indicates split on whitespace. - * @param maxsplit Maximum number of splits to perform. - * Default of -1 indicates all possible splits on each string. - * @param mr Device memory resource used to allocate the returned result's device memory. - * @return Lists column of strings - * Each vector of the lists column holds splits from a single row + * @param maxsplit Maximum number of splits to perform; + * Default of -1 indicates all possible splits on each string + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned result's device memory + * @return Lists column of strings; + * Each row of the lists column holds splits from a single row * element of the input column. */ std::unique_ptr split_record( strings_column_view const& strings, string_scalar const& delimiter = string_scalar(""), size_type maxsplit = -1, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -218,20 +224,22 @@ std::unique_ptr split_record( * * @throw cudf:logic_error if `delimiter` is invalid. * - * @param strings A column of string elements to be split. - * @param delimiter The string to identify split points in each string. + * @param strings A column of string elements to be split + * @param delimiter The string to identify split points in each string; * Default of empty string indicates split on whitespace. - * @param maxsplit Maximum number of splits to perform. - * Default of -1 indicates all possible splits on each string. - * @param mr Device memory resource used to allocate the returned result's device memory. - * @return Lists column of strings - * Each vector of the lists column holds splits from a single row + * @param maxsplit Maximum number of splits to perform; + * Default of -1 indicates all possible splits on each string + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned result's device memory + * @return Lists column of strings; + * Each row of the lists column holds splits from a single row * element of the input column. */ std::unique_ptr rsplit_record( strings_column_view const& strings, string_scalar const& delimiter = string_scalar(""), size_type maxsplit = -1, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of doxygen group diff --git a/cpp/src/strings/split/split.cu b/cpp/src/strings/split/split.cu index 56704a4a4b0..bad7eef4523 100644 --- a/cpp/src/strings/split/split.cu +++ b/cpp/src/strings/split/split.cu @@ -431,19 +431,21 @@ std::unique_ptr
rsplit(strings_column_view const& strings_column, std::unique_ptr
split(strings_column_view const& strings_column, string_scalar const& delimiter, size_type maxsplit, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::split(strings_column, delimiter, maxsplit, cudf::get_default_stream(), mr); + return detail::split(strings_column, delimiter, maxsplit, stream, mr); } std::unique_ptr
rsplit(strings_column_view const& strings_column, string_scalar const& delimiter, size_type maxsplit, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::rsplit(strings_column, delimiter, maxsplit, cudf::get_default_stream(), mr); + return detail::rsplit(strings_column, delimiter, maxsplit, stream, mr); } } // namespace strings diff --git a/cpp/src/strings/split/split_record.cu b/cpp/src/strings/split/split_record.cu index fab95f4f6d1..52f27c68111 100644 --- a/cpp/src/strings/split/split_record.cu +++ b/cpp/src/strings/split/split_record.cu @@ -203,21 +203,22 @@ std::unique_ptr split_record(strings_column_view const& strings, std::unique_ptr split_record(strings_column_view const& strings, string_scalar const& delimiter, size_type maxsplit, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::split_record( - strings, delimiter, maxsplit, cudf::get_default_stream(), mr); + return detail::split_record(strings, delimiter, maxsplit, stream, mr); } std::unique_ptr rsplit_record(strings_column_view const& strings, string_scalar const& delimiter, size_type maxsplit, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::split_record( - strings, delimiter, maxsplit, cudf::get_default_stream(), mr); + strings, delimiter, maxsplit, stream, mr); } } // namespace strings From ad9fa501192332ca8ce310ffe967473ec0945a97 Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Thu, 31 Aug 2023 18:33:26 -0500 Subject: [PATCH 027/150] Preserve types of scalar being returned when possible in `quantile` (#14014) closes #14002 This PR changes the behavior of the `quantile` API by preserving the return type of scalar `interpolation` is either one of `"lower", "higher", "nearest"`. Authors: - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Bradley Dice (https://github.com/bdice) - Vyas Ramasubramani (https://github.com/vyasr) URL: https://github.com/rapidsai/cudf/pull/14014 --- python/cudf/cudf/core/column/numerical_base.py | 10 ++++++++++ python/cudf/cudf/core/dataframe.py | 13 ++++++++++--- python/cudf/cudf/core/series.py | 9 +++++++-- python/cudf/cudf/tests/test_quantiles.py | 15 +++++++++++++++ 4 files changed, 42 insertions(+), 5 deletions(-) diff --git a/python/cudf/cudf/core/column/numerical_base.py b/python/cudf/cudf/core/column/numerical_base.py index 08c2f7cc7b1..e59d56af9dc 100644 --- a/python/cudf/cudf/core/column/numerical_base.py +++ b/python/cudf/cudf/core/column/numerical_base.py @@ -115,6 +115,16 @@ def quantile( result = self._numeric_quantile(q, interpolation, exact) if return_scalar: scalar_result = result.element_indexing(0) + if interpolation in {"lower", "higher", "nearest"}: + try: + new_scalar = self.dtype.type(scalar_result) + scalar_result = ( + new_scalar + if new_scalar == scalar_result + else scalar_result + ) + except (TypeError, ValueError): + pass return ( cudf.utils.dtypes._get_nan_for_dtype(self.dtype) if scalar_result is NA diff --git a/python/cudf/cudf/core/dataframe.py b/python/cudf/cudf/core/dataframe.py index 3f89f78d278..e67604069f1 100644 --- a/python/cudf/cudf/core/dataframe.py +++ b/python/cudf/cudf/core/dataframe.py @@ -5487,16 +5487,23 @@ def quantile( numeric_only : bool, default True If False, the quantile of datetime and timedelta data will be computed as well. - interpolation : {`linear`, `lower`, `higher`, `midpoint`, `nearest`} + interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This parameter specifies the interpolation method to use, when the desired quantile lies between two data points i and j. - Default is ``linear`` for ``method="single"``, and ``nearest`` + Default is ``'linear'`` for ``method="single"``, and ``'nearest'`` for ``method="table"``. + + * linear: `i + (j - i) * fraction`, where `fraction` is the + fractional part of the index surrounded by `i` and `j`. + * lower: `i`. + * higher: `j`. + * nearest: `i` or `j` whichever is nearest. + * midpoint: (`i` + `j`) / 2. columns : list of str List of column names to include. exact : boolean Whether to use approximate or exact quantile algorithm. - method : {`single`, `table`}, default `single` + method : {'single', 'table'}, default `'single'` Whether to compute quantiles per-column ('single') or over all columns ('table'). When 'table', the only allowed interpolation methods are 'nearest', 'lower', and 'higher'. diff --git a/python/cudf/cudf/core/series.py b/python/cudf/cudf/core/series.py index 30d584c2270..2fef741ac09 100644 --- a/python/cudf/cudf/core/series.py +++ b/python/cudf/cudf/core/series.py @@ -3132,8 +3132,13 @@ def quantile( interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points i and j: - columns : list of str - List of column names to include. + + * linear: `i + (j - i) * fraction`, where `fraction` is the + fractional part of the index surrounded by `i` and `j`. + * lower: `i`. + * higher: `j`. + * nearest: `i` or `j` whichever is nearest. + * midpoint: (`i` + `j`) / 2. exact : boolean Whether to use approximate or exact quantile algorithm. quant_index : boolean diff --git a/python/cudf/cudf/tests/test_quantiles.py b/python/cudf/cudf/tests/test_quantiles.py index 53b06e64a91..8b126073a0f 100644 --- a/python/cudf/cudf/tests/test_quantiles.py +++ b/python/cudf/cudf/tests/test_quantiles.py @@ -75,3 +75,18 @@ def test_quantile_q_type(): ), ): gs.quantile(cudf.DataFrame()) + + +@pytest.mark.parametrize( + "interpolation", ["linear", "lower", "higher", "midpoint", "nearest"] +) +def test_quantile_type_int_float(interpolation): + data = [1, 3, 4] + psr = pd.Series(data) + gsr = cudf.Series(data) + + expected = psr.quantile(0.5, interpolation=interpolation) + actual = gsr.quantile(0.5, interpolation=interpolation) + + assert expected == actual + assert type(expected) == type(actual) From 12fe7ee98901d51e8ee369b09ba2615b3a38dfbd Mon Sep 17 00:00:00 2001 From: Bradley Dice Date: Thu, 31 Aug 2023 21:58:10 -0500 Subject: [PATCH 028/150] Fix typo in docstring: metadata. (#14025) Fix for a typo in a docstring for `contiguous_split`. Authors: - Bradley Dice (https://github.com/bdice) Approvers: - Divye Gala (https://github.com/divyegala) - Vyas Ramasubramani (https://github.com/vyasr) - Nghia Truong (https://github.com/ttnghia) URL: https://github.com/rapidsai/cudf/pull/14025 --- cpp/include/cudf/contiguous_split.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cpp/include/cudf/contiguous_split.hpp b/cpp/include/cudf/contiguous_split.hpp index 5fe4e738714..bf10f1fd489 100644 --- a/cpp/include/cudf/contiguous_split.hpp +++ b/cpp/include/cudf/contiguous_split.hpp @@ -28,7 +28,7 @@ namespace cudf { * @addtogroup column_copy * @{ * @file - * @brief Table APIs for contiguous_split, pack, unpack, and metadadata + * @brief Table APIs for contiguous_split, pack, unpack, and metadata */ /** From 27e433ad837e72c71acd37376c98b2e5aeb450ad Mon Sep 17 00:00:00 2001 From: Bradley Dice Date: Fri, 1 Sep 2023 00:02:07 -0500 Subject: [PATCH 029/150] Use grid_stride for stride computations. (#13996) This PR adds `grid_1d::grid_stride()` and uses it in a handful of kernels. Follow-up to #13910, which added a `grid_1d::global_thread_id()`. We'll need to do a later PR that catches any missing instances where this should be used, since there are a large number of PRs in flight touching thread indexing code in various files. See #10368. Authors: - Bradley Dice (https://github.com/bdice) Approvers: - Yunsong Wang (https://github.com/PointKernel) - Vyas Ramasubramani (https://github.com/vyasr) URL: https://github.com/rapidsai/cudf/pull/13996 --- cpp/include/cudf/detail/utilities/cuda.cuh | 23 ++++++++++++++++++++++ cpp/include/cudf/detail/valid_if.cuh | 4 ++-- cpp/src/bitmask/null_mask.cu | 6 +++--- cpp/src/copying/scatter.cu | 4 ++-- cpp/src/partitioning/partitioning.cu | 18 ++++++++--------- cpp/src/replace/nulls.cu | 12 +++++------ cpp/src/transform/compute_column.cu | 5 ++--- 7 files changed, 46 insertions(+), 26 deletions(-) diff --git a/cpp/include/cudf/detail/utilities/cuda.cuh b/cpp/include/cudf/detail/utilities/cuda.cuh index c95189f1f94..264302df0e9 100644 --- a/cpp/include/cudf/detail/utilities/cuda.cuh +++ b/cpp/include/cudf/detail/utilities/cuda.cuh @@ -92,6 +92,29 @@ class grid_1d { { return global_thread_id(threadIdx.x, blockIdx.x, blockDim.x); } + + /** + * @brief Returns the stride of a 1D grid. + * + * The returned stride is the total number of threads in the grid. + * + * @param thread_id The thread index within the block + * @param block_id The block index within the grid + * @param num_threads_per_block The number of threads per block + * @return thread_index_type The global thread index + */ + static constexpr thread_index_type grid_stride(thread_index_type num_threads_per_block, + thread_index_type num_blocks_per_grid) + { + return num_threads_per_block * num_blocks_per_grid; + } + + /** + * @brief Returns the stride of the current 1D grid. + * + * @return thread_index_type The number of threads in the grid. + */ + static __device__ thread_index_type grid_stride() { return grid_stride(blockDim.x, gridDim.x); } }; /** diff --git a/cpp/include/cudf/detail/valid_if.cuh b/cpp/include/cudf/detail/valid_if.cuh index bed884a23eb..f3f95dad017 100644 --- a/cpp/include/cudf/detail/valid_if.cuh +++ b/cpp/include/cudf/detail/valid_if.cuh @@ -49,8 +49,8 @@ __global__ void valid_if_kernel( { constexpr size_type leader_lane{0}; auto const lane_id{threadIdx.x % warp_size}; - thread_index_type i = threadIdx.x + blockIdx.x * blockDim.x; - thread_index_type const stride = blockDim.x * gridDim.x; + auto i = cudf::detail::grid_1d::global_thread_id(); + auto const stride = cudf::detail::grid_1d::grid_stride(); size_type warp_valid_count{0}; auto active_mask = __ballot_sync(0xFFFF'FFFFu, i < size); diff --git a/cpp/src/bitmask/null_mask.cu b/cpp/src/bitmask/null_mask.cu index 33dc7e0556b..5a0d3e4f120 100644 --- a/cpp/src/bitmask/null_mask.cu +++ b/cpp/src/bitmask/null_mask.cu @@ -108,7 +108,7 @@ __global__ void set_null_mask_kernel(bitmask_type* __restrict__ destination, thread_index_type const last_word = word_index(end_bit) - word_index(begin_bit); bitmask_type fill_value = valid ? 0xffff'ffff : 0; - thread_index_type const stride = blockDim.x * gridDim.x; + auto const stride = cudf::detail::grid_1d::grid_stride(); for (thread_index_type destination_word_index = grid_1d::global_thread_id(); destination_word_index < number_of_mask_words; @@ -191,7 +191,7 @@ __global__ void copy_offset_bitmask(bitmask_type* __restrict__ destination, size_type source_end_bit, size_type number_of_mask_words) { - thread_index_type const stride = blockDim.x * gridDim.x; + auto const stride = cudf::detail::grid_1d::grid_stride(); for (thread_index_type destination_word_index = grid_1d::global_thread_id(); destination_word_index < number_of_mask_words; destination_word_index += stride) { @@ -265,7 +265,7 @@ __global__ void count_set_bits_kernel(bitmask_type const* bitmask, auto const first_word_index{word_index(first_bit_index)}; auto const last_word_index{word_index(last_bit_index)}; thread_index_type const tid = grid_1d::global_thread_id(); - thread_index_type const stride = blockDim.x * gridDim.x; + thread_index_type const stride = grid_1d::grid_stride(); thread_index_type thread_word_index = tid + first_word_index; size_type thread_count{0}; diff --git a/cpp/src/copying/scatter.cu b/cpp/src/copying/scatter.cu index 96e24e9059d..11c27fc86e3 100644 --- a/cpp/src/copying/scatter.cu +++ b/cpp/src/copying/scatter.cu @@ -52,8 +52,8 @@ __global__ void marking_bitmask_kernel(mutable_column_device_view destination, MapIterator scatter_map, size_type num_scatter_rows) { - thread_index_type row = threadIdx.x + blockIdx.x * blockDim.x; - thread_index_type const stride = blockDim.x * gridDim.x; + auto row = cudf::detail::grid_1d::global_thread_id(); + auto const stride = cudf::detail::grid_1d::grid_stride(); while (row < num_scatter_rows) { size_type const output_row = scatter_map[row]; diff --git a/cpp/src/partitioning/partitioning.cu b/cpp/src/partitioning/partitioning.cu index ff9c4ea2f59..7b6676346c2 100644 --- a/cpp/src/partitioning/partitioning.cu +++ b/cpp/src/partitioning/partitioning.cu @@ -134,8 +134,8 @@ __global__ void compute_row_partition_numbers(row_hasher_t the_hasher, // Accumulate histogram of the size of each partition in shared memory extern __shared__ size_type shared_partition_sizes[]; - auto tid = cudf::thread_index_type{threadIdx.x} + - cudf::thread_index_type{blockIdx.x} * cudf::thread_index_type{blockDim.x}; + auto tid = cudf::detail::grid_1d::global_thread_id(); + auto const stride = cudf::detail::grid_1d::grid_stride(); // Initialize local histogram size_type partition_number = threadIdx.x; @@ -160,7 +160,7 @@ __global__ void compute_row_partition_numbers(row_hasher_t the_hasher, row_partition_offset[row_number] = atomicAdd(&(shared_partition_sizes[partition_number]), size_type(1)); - tid += cudf::thread_index_type{blockDim.x} * cudf::thread_index_type{gridDim.x}; + tid += stride; } __syncthreads(); @@ -215,8 +215,8 @@ __global__ void compute_row_output_locations(size_type* __restrict__ row_partiti } __syncthreads(); - auto tid = cudf::thread_index_type{threadIdx.x} + - cudf::thread_index_type{blockIdx.x} * cudf::thread_index_type{blockDim.x}; + auto tid = cudf::detail::grid_1d::global_thread_id(); + auto const stride = cudf::detail::grid_1d::grid_stride(); // Get each row's partition number, and get it's output location by // incrementing block's offset counter for that partition number @@ -234,7 +234,7 @@ __global__ void compute_row_output_locations(size_type* __restrict__ row_partiti // Store the row's output location in-place row_partition_numbers[row_number] = row_output_location; - tid += cudf::thread_index_type{blockDim.x} * cudf::thread_index_type{gridDim.x}; + tid += stride; } } @@ -311,10 +311,8 @@ __global__ void copy_block_partitions(InputIter input_iter, __syncthreads(); // Fetch the input data to shared memory - for (auto tid = cudf::thread_index_type{threadIdx.x} + - cudf::thread_index_type{blockIdx.x} * cudf::thread_index_type{blockDim.x}; - tid < num_rows; - tid += cudf::thread_index_type{blockDim.x} * cudf::thread_index_type{gridDim.x}) { + for (auto tid = cudf::detail::grid_1d::global_thread_id(); tid < num_rows; + tid += cudf::detail::grid_1d::grid_stride()) { auto const row_number = static_cast(tid); size_type const ipartition = row_partition_numbers[row_number]; diff --git a/cpp/src/replace/nulls.cu b/cpp/src/replace/nulls.cu index e033db0e52a..5b9fd3d9f0f 100644 --- a/cpp/src/replace/nulls.cu +++ b/cpp/src/replace/nulls.cu @@ -64,9 +64,9 @@ __global__ void replace_nulls_strings(cudf::column_device_view input, char* chars, cudf::size_type* valid_counter) { - cudf::size_type nrows = input.size(); - cudf::thread_index_type i = blockIdx.x * blockDim.x + threadIdx.x; - cudf::thread_index_type const stride = blockDim.x * gridDim.x; + cudf::size_type nrows = input.size(); + auto i = cudf::detail::grid_1d::global_thread_id(); + auto const stride = cudf::detail::grid_1d::grid_stride(); uint32_t active_mask = 0xffff'ffff; active_mask = __ballot_sync(active_mask, i < nrows); @@ -117,9 +117,9 @@ __global__ void replace_nulls(cudf::column_device_view input, cudf::mutable_column_device_view output, cudf::size_type* output_valid_count) { - cudf::size_type nrows = input.size(); - cudf::thread_index_type i = blockIdx.x * blockDim.x + threadIdx.x; - cudf::thread_index_type const stride = blockDim.x * gridDim.x; + cudf::size_type nrows = input.size(); + auto i = cudf::detail::grid_1d::global_thread_id(); + auto const stride = cudf::detail::grid_1d::grid_stride(); uint32_t active_mask = 0xffff'ffff; active_mask = __ballot_sync(active_mask, i < nrows); diff --git a/cpp/src/transform/compute_column.cu b/cpp/src/transform/compute_column.cu index 61293d51ba2..224dd93b048 100644 --- a/cpp/src/transform/compute_column.cu +++ b/cpp/src/transform/compute_column.cu @@ -69,9 +69,8 @@ __launch_bounds__(max_block_size) __global__ auto thread_intermediate_storage = &intermediate_storage[threadIdx.x * device_expression_data.num_intermediates]; - auto const start_idx = - static_cast(threadIdx.x + blockIdx.x * blockDim.x); - auto const stride = static_cast(blockDim.x * gridDim.x); + auto start_idx = cudf::detail::grid_1d::global_thread_id(); + auto const stride = cudf::detail::grid_1d::grid_stride(); auto evaluator = cudf::ast::detail::expression_evaluator(table, device_expression_data); From b705c814b5fa44638e4168abcb070ece1d040b24 Mon Sep 17 00:00:00 2001 From: "Robert (Bobby) Evans" Date: Fri, 1 Sep 2023 08:41:31 -0500 Subject: [PATCH 030/150] Added pinned pool reservation API for java (#13964) This adds in an API to java to reserve pinned memory. It is very much like allocating it, but provides a way to guarantee up front that there is enough memory for multiple allocations to succeed. Authors: - Robert (Bobby) Evans (https://github.com/revans2) Approvers: - Gera Shegalov (https://github.com/gerashegalov) URL: https://github.com/rapidsai/cudf/pull/13964 --- .../ai/rapids/cudf/HostMemoryReservation.java | 32 ++++ .../java/ai/rapids/cudf/PinnedMemoryPool.java | 158 ++++++++++++++++-- 2 files changed, 175 insertions(+), 15 deletions(-) create mode 100644 java/src/main/java/ai/rapids/cudf/HostMemoryReservation.java diff --git a/java/src/main/java/ai/rapids/cudf/HostMemoryReservation.java b/java/src/main/java/ai/rapids/cudf/HostMemoryReservation.java new file mode 100644 index 00000000000..72c2e659372 --- /dev/null +++ b/java/src/main/java/ai/rapids/cudf/HostMemoryReservation.java @@ -0,0 +1,32 @@ +/* + * + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ai.rapids.cudf; + +/** + * Represents some amount of host memory that has been reserved. A reservation guarantees that one + * or more allocations up to the reserved amount, minus padding for alignment will succeed. A + * reservation typically guarantees the amount can be allocated one, meaning when a buffer + * allocated from a reservation is freed it is not returned to the reservation, but to the pool of + * memory the reservation originally came from. If more memory is allocated from the reservation + * an OutOfMemoryError may be thrown, but it is not guaranteed to happen. + * + * When the reservation is closed any unused reservation will be returned to the pool of memory + * the reservation came from. + */ +public interface HostMemoryReservation extends HostMemoryAllocator, AutoCloseable {} diff --git a/java/src/main/java/ai/rapids/cudf/PinnedMemoryPool.java b/java/src/main/java/ai/rapids/cudf/PinnedMemoryPool.java index 969946a9533..9ce72ba237e 100644 --- a/java/src/main/java/ai/rapids/cudf/PinnedMemoryPool.java +++ b/java/src/main/java/ai/rapids/cudf/PinnedMemoryPool.java @@ -1,6 +1,6 @@ /* * - * Copyright (c) 2019-2022, NVIDIA CORPORATION. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -37,13 +37,14 @@ */ public final class PinnedMemoryPool implements AutoCloseable { private static final Logger log = LoggerFactory.getLogger(PinnedMemoryPool.class); - private static final long ALIGNMENT = 8; + private static final long ALIGNMENT = ColumnView.hostPaddingSizeInBytes(); // These static fields should only ever be accessed when class-synchronized. // Do NOT use singleton_ directly! Use the getSingleton accessor instead. private static volatile PinnedMemoryPool singleton_ = null; private static Future initFuture = null; + private final long totalPoolSize; private final long pinnedPoolBase; private final SortedSet freeHeap = new TreeSet<>(new SortedByAddress()); private int numAllocatedSections = 0; @@ -164,6 +165,14 @@ private static void freeInternal(MemorySection section) { Objects.requireNonNull(getSingleton()).free(section); } + /** + * Used to indicate that memory was allocated from a reservation. This primarily is for + * keeping track of outstanding allocations. + */ + private static void reserveAllocInternal(MemorySection section) { + Objects.requireNonNull(getSingleton()).reserveAllocHappened(section); + } + /** * Initialize the pool. * @@ -226,6 +235,21 @@ public static HostMemoryBuffer tryAllocate(long bytes) { return result; } + /** + * Factory method to create a pinned host memory reservation. + * + * @param bytes size in bytes to reserve + * @return newly created reservation or null if insufficient pinned memory to cover it. + */ + public static HostMemoryReservation tryReserve(long bytes) { + HostMemoryReservation result = null; + PinnedMemoryPool pool = getSingleton(); + if (pool != null) { + result = pool.tryReserveInternal(bytes); + } + return result; + } + /** * Factory method to create a host buffer but preferably pointing to pinned memory. * It is not guaranteed that the returned buffer will be pointer to pinned memory. @@ -233,7 +257,7 @@ public static HostMemoryBuffer tryAllocate(long bytes) { * @param bytes size in bytes to allocate * @return newly created buffer */ - public static HostMemoryBuffer allocate(long bytes, HostMemoryAllocator hostMemoryAllocator) { + public static HostMemoryBuffer allocate(long bytes, HostMemoryAllocator hostMemoryAllocator) { HostMemoryBuffer result = tryAllocate(bytes); if (result == null) { result = hostMemoryAllocator.allocate(bytes, false); @@ -241,6 +265,13 @@ public static HostMemoryBuffer allocate(long bytes, HostMemoryAllocator hostMem return result; } + /** + * Factory method to create a host buffer but preferably pointing to pinned memory. + * It is not guaranteed that the returned buffer will be pointer to pinned memory. + * + * @param bytes size in bytes to allocate + * @return newly created buffer + */ public static HostMemoryBuffer allocate(long bytes) { return allocate(bytes, DefaultHostMemoryAllocator.get()); } @@ -258,12 +289,24 @@ public static long getAvailableBytes() { return 0; } + /** + * Get the number of bytes that the pinned memory pool was allocated with. + */ + public static long getTotalPoolSizeBytes() { + PinnedMemoryPool pool = getSingleton(); + if (pool != null) { + return pool.getTotalPoolSizeInternal(); + } + return 0; + } + private PinnedMemoryPool(long poolSize, int gpuId) { if (gpuId > -1) { // set the gpu device to use Cuda.setDevice(gpuId); Cuda.freeZero(); } + this.totalPoolSize = poolSize; this.pinnedPoolBase = Cuda.hostAllocPinned(poolSize); freeHeap.add(new MemorySection(pinnedPoolBase, poolSize)); this.availableBytes = poolSize; @@ -271,32 +314,42 @@ private PinnedMemoryPool(long poolSize, int gpuId) { @Override public void close() { - assert numAllocatedSections == 0; + assert numAllocatedSections == 0 : "Leaked " + numAllocatedSections + " pinned allocations"; Cuda.freePinned(pinnedPoolBase); } - private synchronized HostMemoryBuffer tryAllocateInternal(long bytes) { + /** + * Pads a length of bytes to the alignment the CPU wants in the worst case. This helps to + * calculate the size needed for a reservation if there are multiple buffers. + * @param bytes the size in bytes + * @return the new padded size in bytes. + */ + public static long padToCpuAlignment(long bytes) { + return ((bytes + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT; + } + + private synchronized MemorySection tryGetInternal(long bytes, String what) { if (freeHeap.isEmpty()) { log.debug("No free pinned memory left"); return null; } // Align the allocation - long alignedBytes = ((bytes + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT; + long alignedBytes = padToCpuAlignment(bytes); Optional firstFit = freeHeap.stream() - .filter(section -> section.size >= alignedBytes) - .findFirst(); + .filter(section -> section.size >= alignedBytes) + .findFirst(); if (!firstFit.isPresent()) { if (log.isDebugEnabled()) { MemorySection largest = freeHeap.stream() - .max(new SortedBySize()) - .orElse(new MemorySection(0, 0)); + .max(new SortedBySize()) + .orElse(new MemorySection(0, 0)); log.debug("Insufficient pinned memory. {} needed, {} found", alignedBytes, largest.size); } return null; } MemorySection first = firstFit.get(); - log.debug("Allocating {}/{} bytes pinned from {} FREE COUNT {} OUTSTANDING COUNT {}", - bytes, alignedBytes, first, freeHeap.size(), numAllocatedSections); + log.debug("{} {}/{} bytes pinned from {} FREE COUNT {} OUTSTANDING COUNT {}", + what, bytes, alignedBytes, first, freeHeap.size(), numAllocatedSections); freeHeap.remove(first); MemorySection allocated; if (first.size == alignedBytes) { @@ -307,9 +360,74 @@ private synchronized HostMemoryBuffer tryAllocateInternal(long bytes) { } numAllocatedSections++; availableBytes -= allocated.size; - log.debug("Allocated {} free {} outstanding {}", allocated, freeHeap, numAllocatedSections); - return new HostMemoryBuffer(allocated.baseAddress, bytes, - new PinnedHostBufferCleaner(allocated, bytes)); + log.debug("{} {} free {} outstanding {}", what, allocated, freeHeap, numAllocatedSections); + return allocated; + } + + private synchronized HostMemoryBuffer tryAllocateInternal(long bytes) { + MemorySection allocated = tryGetInternal(bytes, "allocate"); + if (allocated == null) { + return null; + } else { + return new HostMemoryBuffer(allocated.baseAddress, bytes, + new PinnedHostBufferCleaner(allocated, bytes)); + } + } + + private class PinnedReservation implements HostMemoryReservation { + private MemorySection section = null; + + public PinnedReservation(MemorySection section) { + this.section = section; + } + + @Override + public synchronized HostMemoryBuffer allocate(long bytes, boolean preferPinned) { + return this.allocate(bytes); + } + + @Override + public synchronized HostMemoryBuffer allocate(long bytes) { + if (section == null || section.size < bytes) { + throw new OutOfMemoryError("Reservation didn't have enough space " + bytes + " / " + + (section == null ? 0 : section.size)); + } + long alignedSize = padToCpuAlignment(bytes); + MemorySection allocated; + if (section.size >= bytes && section.size <= alignedSize) { + allocated = section; + section = null; + // No need for reserveAllocInternal because the original section is already tracked + } else { + allocated = section.splitOff(alignedSize); + PinnedMemoryPool.reserveAllocInternal(allocated); + } + return new HostMemoryBuffer(allocated.baseAddress, bytes, + new PinnedHostBufferCleaner(allocated, bytes)); + } + + @Override + public synchronized void close() throws Exception { + if (section != null) { + try { + PinnedMemoryPool.freeInternal(section); + } finally { + // Always mark the resource as freed even if an exception is thrown. + // We cannot know how far it progressed before the exception, and + // therefore it is unsafe to retry. + section = null; + } + } + } + } + + private HostMemoryReservation tryReserveInternal(long bytes) { + MemorySection allocated = tryGetInternal(bytes, "allocate"); + if (allocated == null) { + return null; + } else { + return new PinnedReservation(allocated); + } } private synchronized void free(MemorySection section) { @@ -328,7 +446,17 @@ private synchronized void free(MemorySection section) { log.debug("After freeing {} outstanding {}", freeHeap, numAllocatedSections); } + private synchronized void reserveAllocHappened(MemorySection section) { + if (section != null && section.size > 0) { + numAllocatedSections++; + } + } + private synchronized long getAvailableBytesInternal() { return this.availableBytes; } + + private long getTotalPoolSizeInternal() { + return this.totalPoolSize; + } } From d1fb671128a55f965a7db907e99d5b1a841c2213 Mon Sep 17 00:00:00 2001 From: Lawrence Mitchell Date: Fri, 1 Sep 2023 16:16:47 +0100 Subject: [PATCH 031/150] Remove quadratic runtime due to accessing Frame._dtypes in loop (#14028) Frame._dtypes maps column names to dtypes, however, it is a property that is computed on-demand. Consequently, a seemingly innocuous dict lookup is actually O(N). When used in a loop over columns, this makes an O(N) loop into an O(N^2) one. This mostly bites on IO when reading data with many thousands of columns. To fix this, manually move access of Frame._dtypes outside of any loop over columns. A more systematic way might be to make this a cached property, but the cache invalidation is rather hard to reason about. - Closes https://github.com/rapidsai/cudf/issues/14005 Authors: - Lawrence Mitchell (https://github.com/wence-) Approvers: - https://github.com/brandon-b-miller URL: https://github.com/rapidsai/cudf/pull/14028 --- python/cudf/cudf/core/groupby/groupby.py | 3 ++- python/cudf/cudf/core/indexed_frame.py | 6 +----- python/cudf/cudf/io/csv.py | 7 ++++--- python/cudf/cudf/io/json.py | 7 ++++--- 4 files changed, 11 insertions(+), 12 deletions(-) diff --git a/python/cudf/cudf/core/groupby/groupby.py b/python/cudf/cudf/core/groupby/groupby.py index 38b07eca330..b300c55b537 100644 --- a/python/cudf/cudf/core/groupby/groupby.py +++ b/python/cudf/cudf/core/groupby/groupby.py @@ -313,9 +313,10 @@ def dtypes(self): 3 object int64 """ index = self.grouping.keys.unique().sort_values().to_pandas() + obj_dtypes = self.obj._dtypes return pd.DataFrame( { - name: [self.obj._dtypes[name]] * len(index) + name: [obj_dtypes[name]] * len(index) for name in self.grouping.values._column_names }, index=index, diff --git a/python/cudf/cudf/core/indexed_frame.py b/python/cudf/cudf/core/indexed_frame.py index 4c6eb3a50e9..33ac97d7ef8 100644 --- a/python/cudf/cudf/core/indexed_frame.py +++ b/python/cudf/cudf/core/indexed_frame.py @@ -822,11 +822,7 @@ def replace( ) = _get_replacement_values_for_columns( to_replace=to_replace, value=value, - # TODO: This should be replaced with `DataFrame._dtypes` once - # that is moved up to `Frame`. - columns_dtype_map={ - col: self._data[col].dtype for col in self._data - }, + columns_dtype_map=self._dtypes, ) for name, col in self._data.items(): diff --git a/python/cudf/cudf/io/csv.py b/python/cudf/cudf/io/csv.py index 95e0aa18070..bacc0641639 100644 --- a/python/cudf/cudf/io/csv.py +++ b/python/cudf/cudf/io/csv.py @@ -123,11 +123,12 @@ def read_csv( if dtype is None or isinstance(dtype, abc.Mapping): # There exists some dtypes in the result columns that is inferred. # Find them and map them to the default dtypes. - dtype = {} if dtype is None else dtype + specified_dtypes = {} if dtype is None else dtype + df_dtypes = df._dtypes unspecified_dtypes = { - name: df._dtypes[name] + name: df_dtypes[name] for name in df._column_names - if name not in dtype + if name not in specified_dtypes } default_dtypes = {} diff --git a/python/cudf/cudf/io/json.py b/python/cudf/cudf/io/json.py index 4de9a92a068..efac24aee17 100644 --- a/python/cudf/cudf/io/json.py +++ b/python/cudf/cudf/io/json.py @@ -158,11 +158,12 @@ def read_json( if dtype is True or isinstance(dtype, abc.Mapping): # There exists some dtypes in the result columns that is inferred. # Find them and map them to the default dtypes. - dtype = {} if dtype is True else dtype + specified_dtypes = {} if dtype is True else dtype + df_dtypes = df._dtypes unspecified_dtypes = { - name: df._dtypes[name] + name: df_dtypes[name] for name in df._column_names - if name not in dtype + if name not in specified_dtypes } default_dtypes = {} From 2b7294b9afe413b8f6b956dc5148452ca0161e7f Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Fri, 1 Sep 2023 16:20:45 -0700 Subject: [PATCH 032/150] Expose streams in public replace APIs (#14010) Contributes to #925 Authors: - Vyas Ramasubramani (https://github.com/vyasr) Approvers: - Bradley Dice (https://github.com/bdice) - MithunR (https://github.com/mythrocks) - David Wendt (https://github.com/davidwendt) URL: https://github.com/rapidsai/cudf/pull/14010 --- cpp/include/cudf/replace.hpp | 25 ++++- cpp/src/replace/clamp.cu | 6 +- cpp/src/replace/nans.cu | 15 +-- cpp/src/replace/nulls.cu | 9 +- cpp/src/replace/replace.cu | 4 +- cpp/tests/CMakeLists.txt | 1 + cpp/tests/replace/replace_nulls_tests.cpp | 23 ++--- cpp/tests/replace/replace_tests.cpp | 34 +++---- cpp/tests/streams/replace_test.cpp | 109 ++++++++++++++++++++++ 9 files changed, 181 insertions(+), 45 deletions(-) create mode 100644 cpp/tests/streams/replace_test.cpp diff --git a/cpp/include/cudf/replace.hpp b/cpp/include/cudf/replace.hpp index 9df58306ace..3405dc8b796 100644 --- a/cpp/include/cudf/replace.hpp +++ b/cpp/include/cudf/replace.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, NVIDIA CORPORATION. + * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,6 +17,7 @@ #pragma once #include +#include #include @@ -45,6 +46,7 @@ enum class replace_policy : bool { PRECEDING, FOLLOWING }; * * @param[in] input A column whose null values will be replaced * @param[in] replacement A cudf::column whose values will replace null values in input + * @param stream CUDA stream used for device memory operations and kernel launches * @param[in] mr Device memory resource used to allocate device memory of the returned column * * @returns A copy of `input` with the null values replaced with corresponding values from @@ -53,6 +55,7 @@ enum class replace_policy : bool { PRECEDING, FOLLOWING }; std::unique_ptr replace_nulls( column_view const& input, column_view const& replacement, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -63,6 +66,7 @@ std::unique_ptr replace_nulls( * * @param[in] input A column whose null values will be replaced * @param[in] replacement Scalar used to replace null values in `input` + * @param stream CUDA stream used for device memory operations and kernel launches * @param[in] mr Device memory resource used to allocate device memory of the returned column * * @returns Copy of `input` with null values replaced by `replacement` @@ -70,6 +74,7 @@ std::unique_ptr replace_nulls( std::unique_ptr replace_nulls( column_view const& input, scalar const& replacement, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -80,6 +85,7 @@ std::unique_ptr replace_nulls( * * @param[in] input A column whose null values will be replaced * @param[in] replace_policy Specify the position of replacement values relative to null values + * @param stream CUDA stream used for device memory operations and kernel launches * @param[in] mr Device memory resource used to allocate device memory of the returned column * * @returns Copy of `input` with null values replaced based on `replace_policy` @@ -87,6 +93,7 @@ std::unique_ptr replace_nulls( std::unique_ptr replace_nulls( column_view const& input, replace_policy const& replace_policy, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -106,6 +113,7 @@ std::unique_ptr replace_nulls( * * @param input A column whose NaN values will be replaced * @param replacement A cudf::column whose values will replace NaN values in input + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return A copy of `input` with the NaN values replaced with corresponding values from * `replacement`. @@ -113,6 +121,7 @@ std::unique_ptr replace_nulls( std::unique_ptr replace_nans( column_view const& input, column_view const& replacement, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -132,12 +141,14 @@ std::unique_ptr replace_nans( * * @param input A column whose NaN values will be replaced * @param replacement A cudf::scalar whose value will replace NaN values in input + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return A copy of `input` with the NaN values replaced by `replacement` */ std::unique_ptr replace_nans( column_view const& input, scalar const& replacement, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -147,6 +158,7 @@ std::unique_ptr replace_nans( * @param input_col The column to find and replace values in * @param values_to_replace The values to replace * @param replacement_values The values to replace with + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * * @returns Copy of `input_col` with specified values replaced @@ -155,6 +167,7 @@ std::unique_ptr find_and_replace_all( column_view const& input_col, column_view const& values_to_replace, column_view const& replacement_values, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -198,6 +211,7 @@ std::unique_ptr find_and_replace_all( * @param[in] hi Maximum clamp value. All elements greater than `hi` will be replaced by * `hi_replace`. Ignored if null. * @param[in] hi_replace All elements greater than `hi` will be replaced by `hi_replace` + * @param stream CUDA stream used for device memory operations and kernel launches * @param[in] mr Device memory resource used to allocate device memory of the returned column * * @return Returns a clamped column as per `lo` and `hi` boundaries @@ -208,6 +222,7 @@ std::unique_ptr clamp( scalar const& lo_replace, scalar const& hi, scalar const& hi_replace, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -244,6 +259,7 @@ std::unique_ptr clamp( * if null. * @param[in] hi Maximum clamp value. All elements greater than `hi` will be replaced by `hi` * Ignored if null. + * @param stream CUDA stream used for device memory operations and kernel launches * @param[in] mr Device memory resource used to allocate device memory of the returned column * * @return Returns a clamped column as per `lo` and `hi` boundaries @@ -252,6 +268,7 @@ std::unique_ptr clamp( column_view const& input, scalar const& lo, scalar const& hi, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -264,12 +281,14 @@ std::unique_ptr clamp( * * @throws cudf::logic_error if column does not have floating point data type. * @param[in] input column_view of floating-point elements to copy and normalize + * @param stream CUDA stream used for device memory operations and kernel launches * @param[in] mr device_memory_resource allocator for allocating output data * * @returns new column with the modified data */ std::unique_ptr normalize_nans_and_zeros( column_view const& input, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -282,8 +301,10 @@ std::unique_ptr normalize_nans_and_zeros( * * @throws cudf::logic_error if column does not have floating point data type. * @param[in, out] in_out of floating-point elements to normalize + * @param stream CUDA stream used for device memory operations and kernel launches */ -void normalize_nans_and_zeros(mutable_column_view& in_out); +void normalize_nans_and_zeros(mutable_column_view& in_out, + rmm::cuda_stream_view stream = cudf::get_default_stream()); /** @} */ // end of group } // namespace cudf diff --git a/cpp/src/replace/clamp.cu b/cpp/src/replace/clamp.cu index 68b496e0ab8..2b48aed2d29 100644 --- a/cpp/src/replace/clamp.cu +++ b/cpp/src/replace/clamp.cu @@ -386,19 +386,21 @@ std::unique_ptr clamp(column_view const& input, scalar const& lo_replace, scalar const& hi, scalar const& hi_replace, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::clamp(input, lo, lo_replace, hi, hi_replace, cudf::get_default_stream(), mr); + return detail::clamp(input, lo, lo_replace, hi, hi_replace, stream, mr); } // clamp input at lo and hi std::unique_ptr clamp(column_view const& input, scalar const& lo, scalar const& hi, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::clamp(input, lo, lo, hi, hi, cudf::get_default_stream(), mr); + return detail::clamp(input, lo, lo, hi, hi, stream, mr); } } // namespace cudf diff --git a/cpp/src/replace/nans.cu b/cpp/src/replace/nans.cu index ce0d2d07b36..2fcb934ba65 100644 --- a/cpp/src/replace/nans.cu +++ b/cpp/src/replace/nans.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -111,18 +111,20 @@ std::unique_ptr replace_nans(column_view const& input, std::unique_ptr replace_nans(column_view const& input, column_view const& replacement, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::replace_nans(input, replacement, cudf::get_default_stream(), mr); + return detail::replace_nans(input, replacement, stream, mr); } std::unique_ptr replace_nans(column_view const& input, scalar const& replacement, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::replace_nans(input, replacement, cudf::get_default_stream(), mr); + return detail::replace_nans(input, replacement, stream, mr); } } // namespace cudf @@ -202,7 +204,7 @@ std::unique_ptr normalize_nans_and_zeros(column_view const& input, // from device. unique_ptr which gets automatically cleaned up when we leave. auto out_view = out->mutable_view(); - normalize_nans_and_zeros(out_view, stream); + detail::normalize_nans_and_zeros(out_view, stream); out->set_null_count(input.null_count()); return out; @@ -221,10 +223,11 @@ std::unique_ptr normalize_nans_and_zeros(column_view const& input, * @param mr Device memory resource used to allocate the returned column's device memory. */ std::unique_ptr normalize_nans_and_zeros(column_view const& input, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::normalize_nans_and_zeros(input, cudf::get_default_stream(), mr); + return detail::normalize_nans_and_zeros(input, stream, mr); } /** @@ -237,7 +240,7 @@ std::unique_ptr normalize_nans_and_zeros(column_view const& input, * @throws cudf::logic_error if column does not have floating point data type. * @param[in, out] in_out mutable_column_view representing input data. data is processed in-place */ -void normalize_nans_and_zeros(mutable_column_view& in_out) +void normalize_nans_and_zeros(mutable_column_view& in_out, rmm::cuda_stream_view stream) { CUDF_FUNC_RANGE(); detail::normalize_nans_and_zeros(in_out, cudf::get_default_stream()); diff --git a/cpp/src/replace/nulls.cu b/cpp/src/replace/nulls.cu index 5b9fd3d9f0f..2eb624d3f05 100644 --- a/cpp/src/replace/nulls.cu +++ b/cpp/src/replace/nulls.cu @@ -446,26 +446,29 @@ std::unique_ptr replace_nulls(cudf::column_view const& input, std::unique_ptr replace_nulls(cudf::column_view const& input, cudf::column_view const& replacement, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::replace_nulls(input, replacement, cudf::get_default_stream(), mr); + return detail::replace_nulls(input, replacement, stream, mr); } std::unique_ptr replace_nulls(cudf::column_view const& input, cudf::scalar const& replacement, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::replace_nulls(input, replacement, cudf::get_default_stream(), mr); + return detail::replace_nulls(input, replacement, stream, mr); } std::unique_ptr replace_nulls(column_view const& input, replace_policy const& replace_policy, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::replace_nulls(input, replace_policy, cudf::get_default_stream(), mr); + return detail::replace_nulls(input, replace_policy, stream, mr); } } // namespace cudf diff --git a/cpp/src/replace/replace.cu b/cpp/src/replace/replace.cu index a7847bc0e7f..07eefdc27c6 100644 --- a/cpp/src/replace/replace.cu +++ b/cpp/src/replace/replace.cu @@ -527,9 +527,9 @@ std::unique_ptr find_and_replace_all(cudf::column_view const& inpu std::unique_ptr find_and_replace_all(cudf::column_view const& input_col, cudf::column_view const& values_to_replace, cudf::column_view const& replacement_values, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { - return detail::find_and_replace_all( - input_col, values_to_replace, replacement_values, cudf::get_default_stream(), mr); + return detail::find_and_replace_all(input_col, values_to_replace, replacement_values, stream, mr); } } // namespace cudf diff --git a/cpp/tests/CMakeLists.txt b/cpp/tests/CMakeLists.txt index 8a0aa27b175..1bb1987198d 100644 --- a/cpp/tests/CMakeLists.txt +++ b/cpp/tests/CMakeLists.txt @@ -625,6 +625,7 @@ ConfigureTest(STREAM_COPYING_TEST streams/copying_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_GROUPBY_TEST streams/groupby_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_CONCATENATE_TEST streams/concatenate_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_FILLING_TEST streams/filling_test.cpp STREAM_MODE testing) +ConfigureTest(STREAM_REPLACE_TEST streams/replace_test.cpp STREAM_MODE testing) # ################################################################################################## # Install tests #################################################################################### diff --git a/cpp/tests/replace/replace_nulls_tests.cpp b/cpp/tests/replace/replace_nulls_tests.cpp index 7e84a0695e3..a7c54145708 100644 --- a/cpp/tests/replace/replace_nulls_tests.cpp +++ b/cpp/tests/replace/replace_nulls_tests.cpp @@ -46,8 +46,7 @@ TEST_F(ReplaceErrorTest, SizeMismatch) {0, 0, 1, 1, 1, 1, 1, 1}}; cudf::test::fixed_width_column_wrapper values_to_replace_column{{10, 11, 12, 13}}; - ASSERT_THROW(cudf::replace_nulls(input_column, values_to_replace_column, mr()), - cudf::logic_error); + ASSERT_THROW(cudf::replace_nulls(input_column, values_to_replace_column), cudf::logic_error); } // Error: column type mismatch @@ -58,8 +57,7 @@ TEST_F(ReplaceErrorTest, TypeMismatch) cudf::test::fixed_width_column_wrapper values_to_replace_column{ {10, 11, 12, 13, 14, 15, 16, 17}}; - EXPECT_THROW(cudf::replace_nulls(input_column, values_to_replace_column, mr()), - cudf::logic_error); + EXPECT_THROW(cudf::replace_nulls(input_column, values_to_replace_column), cudf::logic_error); } // Error: column type mismatch @@ -69,7 +67,7 @@ TEST_F(ReplaceErrorTest, TypeMismatchScalar) {0, 0, 1, 1, 1, 1, 1, 1}}; cudf::numeric_scalar replacement(1); - EXPECT_THROW(cudf::replace_nulls(input_column, replacement, mr()), cudf::logic_error); + EXPECT_THROW(cudf::replace_nulls(input_column, replacement), cudf::logic_error); } struct ReplaceNullsStringsTest : public cudf::test::BaseFixture {}; @@ -88,7 +86,7 @@ TEST_F(ReplaceNullsStringsTest, SimpleReplace) replacement.begin(), replacement.end(), replacement_v.begin()}; std::unique_ptr result; - ASSERT_NO_THROW(result = cudf::replace_nulls(input_w, replacement_w, mr())); + ASSERT_NO_THROW(result = cudf::replace_nulls(input_w, replacement_w)); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*result, expected_w); } @@ -107,7 +105,7 @@ TEST_F(ReplaceNullsStringsTest, ReplaceWithNulls) replacement.begin(), replacement.end(), replacement_v.begin()}; std::unique_ptr result; - ASSERT_NO_THROW(result = cudf::replace_nulls(input_w, replacement_w, mr())); + ASSERT_NO_THROW(result = cudf::replace_nulls(input_w, replacement_w)); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*result, expected_w); } @@ -125,7 +123,7 @@ TEST_F(ReplaceNullsStringsTest, ReplaceWithAllNulls) cudf::test::strings_column_wrapper expected_w{input.begin(), input.end(), input_v.begin()}; std::unique_ptr result; - ASSERT_NO_THROW(result = cudf::replace_nulls(input_w, replacement_w, mr())); + ASSERT_NO_THROW(result = cudf::replace_nulls(input_w, replacement_w)); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*result, expected_w); } @@ -143,7 +141,7 @@ TEST_F(ReplaceNullsStringsTest, ReplaceWithAllEmpty) cudf::test::strings_column_wrapper expected_w{input.begin(), input.end(), replacement_v.begin()}; std::unique_ptr result; - ASSERT_NO_THROW(result = cudf::replace_nulls(input_w, replacement_w, mr())); + ASSERT_NO_THROW(result = cudf::replace_nulls(input_w, replacement_w)); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*result, expected_w); } @@ -161,7 +159,7 @@ TEST_F(ReplaceNullsStringsTest, ReplaceNone) cudf::test::strings_column_wrapper expected_w{input.begin(), input.end()}; std::unique_ptr result; - ASSERT_NO_THROW(result = cudf::replace_nulls(input_w, replacement_w, mr())); + ASSERT_NO_THROW(result = cudf::replace_nulls(input_w, replacement_w)); CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(*result, expected_w); } @@ -170,8 +168,7 @@ TEST_F(ReplaceNullsStringsTest, SimpleReplaceScalar) { std::vector input{"", "", "", "", "", "", "", ""}; std::vector input_v{0, 0, 0, 0, 0, 0, 0, 0}; - std::unique_ptr repl = - cudf::make_string_scalar("rep", cudf::get_default_stream(), mr()); + std::unique_ptr repl = cudf::make_string_scalar("rep"); repl->set_valid_async(true, cudf::get_default_stream()); std::vector expected{"rep", "rep", "rep", "rep", "rep", "rep", "rep", "rep"}; @@ -179,7 +176,7 @@ TEST_F(ReplaceNullsStringsTest, SimpleReplaceScalar) cudf::test::strings_column_wrapper expected_w{expected.begin(), expected.end()}; std::unique_ptr result; - ASSERT_NO_THROW(result = cudf::replace_nulls(input_w, *repl, mr())); + ASSERT_NO_THROW(result = cudf::replace_nulls(input_w, *repl)); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*result, expected_w); } diff --git a/cpp/tests/replace/replace_tests.cpp b/cpp/tests/replace/replace_tests.cpp index 451cfa5bc9e..63460b0cb15 100644 --- a/cpp/tests/replace/replace_tests.cpp +++ b/cpp/tests/replace/replace_tests.cpp @@ -47,9 +47,9 @@ TEST_F(ReplaceErrorTest, SizeMismatch) cudf::test::fixed_width_column_wrapper values_to_replace_column{{10, 11, 12, 13}}; cudf::test::fixed_width_column_wrapper replacement_values_column{{15, 16, 17}}; - EXPECT_THROW(cudf::find_and_replace_all( - input_column, values_to_replace_column, replacement_values_column, mr()), - cudf::logic_error); + EXPECT_THROW( + cudf::find_and_replace_all(input_column, values_to_replace_column, replacement_values_column), + cudf::logic_error); } // Error: column type mismatch @@ -59,9 +59,9 @@ TEST_F(ReplaceErrorTest, TypeMismatch) cudf::test::fixed_width_column_wrapper values_to_replace_column{{10, 11, 12}}; cudf::test::fixed_width_column_wrapper replacement_values_column{{15, 16, 17}}; - EXPECT_THROW(cudf::find_and_replace_all( - input_column, values_to_replace_column, replacement_values_column, mr()), - cudf::logic_error); + EXPECT_THROW( + cudf::find_and_replace_all(input_column, values_to_replace_column, replacement_values_column), + cudf::logic_error); } // Error: nulls in old-values @@ -72,9 +72,9 @@ TEST_F(ReplaceErrorTest, NullInOldValues) {0, 1, 0, 1}}; cudf::test::fixed_width_column_wrapper replacement_values_column{{15, 16, 17, 18}}; - EXPECT_THROW(cudf::find_and_replace_all( - input_column, values_to_replace_column, replacement_values_column, mr()), - cudf::logic_error); + EXPECT_THROW( + cudf::find_and_replace_all(input_column, values_to_replace_column, replacement_values_column), + cudf::logic_error); } struct ReplaceStringsTest : public cudf::test::BaseFixture {}; @@ -93,7 +93,7 @@ TEST_F(ReplaceStringsTest, Strings) std::unique_ptr result; ASSERT_NO_THROW(result = cudf::find_and_replace_all( - input_wrapper, values_to_replace_wrapper, replacement_wrapper, mr())); + input_wrapper, values_to_replace_wrapper, replacement_wrapper)); std::vector expected{"z", "b", "c", "d", "e", "f", "g", "h"}; std::vector ex_valid{1, 1, 1, 1, 1, 1, 1, 1}; cudf::test::strings_column_wrapper expected_wrapper{ @@ -117,7 +117,7 @@ TEST_F(ReplaceStringsTest, StringsReplacementNulls) std::unique_ptr result; ASSERT_NO_THROW(result = cudf::find_and_replace_all( - input_wrapper, values_to_replace_wrapper, replacement_wrapper, mr())); + input_wrapper, values_to_replace_wrapper, replacement_wrapper)); std::vector expected{"z", "", "c", "d", "e", "f", "g", "h"}; std::vector ex_valid{1, 0, 1, 1, 1, 1, 1, 1}; cudf::test::strings_column_wrapper expected_wrapper{ @@ -143,7 +143,7 @@ TEST_F(ReplaceStringsTest, StringsResultAllNulls) std::unique_ptr result; ASSERT_NO_THROW(result = cudf::find_and_replace_all( - input_wrapper, values_to_replace_wrapper, replacement_wrapper, mr())); + input_wrapper, values_to_replace_wrapper, replacement_wrapper)); cudf::test::strings_column_wrapper expected_wrapper{ expected.begin(), expected.end(), ex_valid.begin()}; @@ -167,7 +167,7 @@ TEST_F(ReplaceStringsTest, StringsResultAllEmpty) std::unique_ptr result; ASSERT_NO_THROW(result = cudf::find_and_replace_all( - input_wrapper, values_to_replace_wrapper, replacement_wrapper, mr())); + input_wrapper, values_to_replace_wrapper, replacement_wrapper)); cudf::test::strings_column_wrapper expected_wrapper{ expected.begin(), expected.end(), ex_valid.begin()}; @@ -188,7 +188,7 @@ TEST_F(ReplaceStringsTest, StringsInputNulls) std::unique_ptr result; ASSERT_NO_THROW(result = cudf::find_and_replace_all( - input_wrapper, values_to_replace_wrapper, replacement_wrapper, mr())); + input_wrapper, values_to_replace_wrapper, replacement_wrapper)); std::vector expected{"z", "y", "", "", "e", "f", "g", "h"}; std::vector ex_valid{1, 1, 0, 0, 1, 1, 1, 1}; cudf::test::strings_column_wrapper expected_wrapper{ @@ -213,7 +213,7 @@ TEST_F(ReplaceStringsTest, StringsInputAndReplacementNulls) std::unique_ptr result; ASSERT_NO_THROW(result = cudf::find_and_replace_all( - input_wrapper, values_to_replace_wrapper, replacement_wrapper, mr())); + input_wrapper, values_to_replace_wrapper, replacement_wrapper)); std::vector expected{"z", "", "", "", "e", "f", "g", "h"}; std::vector ex_valid{1, 0, 0, 0, 1, 1, 1, 1}; cudf::test::strings_column_wrapper expected_wrapper{ @@ -236,7 +236,7 @@ TEST_F(ReplaceStringsTest, StringsEmptyReplacement) std::unique_ptr result; ASSERT_NO_THROW(result = cudf::find_and_replace_all( - input_wrapper, values_to_replace_wrapper, replacement_wrapper, mr())); + input_wrapper, values_to_replace_wrapper, replacement_wrapper)); std::vector expected{"a", "b", "", "", "e", "f", "g", "h"}; std::vector ex_valid{1, 1, 0, 0, 1, 1, 1, 1}; cudf::test::strings_column_wrapper expected_wrapper{ @@ -281,7 +281,7 @@ TEST_F(ReplaceStringsTest, StringsLargeScale) std::unique_ptr result; ASSERT_NO_THROW(result = cudf::find_and_replace_all( - input_wrapper, values_to_replace_wrapper, replacement_wrapper, mr())); + input_wrapper, values_to_replace_wrapper, replacement_wrapper)); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*result, expected_wrapper); } diff --git a/cpp/tests/streams/replace_test.cpp b/cpp/tests/streams/replace_test.cpp new file mode 100644 index 00000000000..c794f99b6f6 --- /dev/null +++ b/cpp/tests/streams/replace_test.cpp @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include +#include +#include +#include + +class ReplaceTest : public cudf::test::BaseFixture {}; + +TEST_F(ReplaceTest, ReplaceNullsColumn) +{ + cudf::test::fixed_width_column_wrapper input({{0, 0, 0, 0, 0}, {0, 0, 1, 1, 1}}); + cudf::test::fixed_width_column_wrapper replacement({1, 1, 1, 1, 1}); + cudf::replace_nulls(input, replacement, cudf::test::get_default_stream()); +} + +TEST_F(ReplaceTest, ReplaceNullsScalar) +{ + cudf::test::fixed_width_column_wrapper input({{0, 0, 0, 0, 0}, {0, 0, 1, 1, 1}}); + auto replacement = cudf::numeric_scalar(1, true, cudf::test::get_default_stream()); + cudf::replace_nulls(input, replacement, cudf::test::get_default_stream()); +} + +TEST_F(ReplaceTest, ReplaceNullsPolicy) +{ + cudf::test::fixed_width_column_wrapper input({{0, 0, 0, 0, 0}, {0, 0, 1, 1, 1}}); + cudf::replace_nulls(input, cudf::replace_policy::FOLLOWING, cudf::test::get_default_stream()); +} + +TEST_F(ReplaceTest, ReplaceNansColumn) +{ + auto nan = std::numeric_limits::quiet_NaN(); + auto input_column = cudf::test::make_type_param_vector({0.0, 0.0, nan, nan, nan}); + cudf::test::fixed_width_column_wrapper input(input_column.begin(), input_column.end()); + cudf::test::fixed_width_column_wrapper replacement({0, 1, 2, 3, 4}); + cudf::replace_nans(input, replacement, cudf::test::get_default_stream()); +} + +TEST_F(ReplaceTest, ReplaceNansScalar) +{ + auto nan = std::numeric_limits::quiet_NaN(); + auto input_column = cudf::test::make_type_param_vector({0.0, 0.0, nan, nan, nan}); + cudf::test::fixed_width_column_wrapper input(input_column.begin(), input_column.end()); + auto replacement = cudf::numeric_scalar(4, true, cudf::test::get_default_stream()); + cudf::replace_nans(input, replacement, cudf::test::get_default_stream()); +} + +TEST_F(ReplaceTest, FindAndReplaceAll) +{ + cudf::test::fixed_width_column_wrapper input({0, 0, 0, 0, 0}); + cudf::test::fixed_width_column_wrapper values_to_replace({0, 0, 0, 0, 0}); + cudf::test::fixed_width_column_wrapper replacement_values({1, 1, 1, 1, 1}); + cudf::find_and_replace_all( + input, values_to_replace, replacement_values, cudf::test::get_default_stream()); +} + +TEST_F(ReplaceTest, ClampWithReplace) +{ + cudf::test::fixed_width_column_wrapper input({0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + auto low = cudf::numeric_scalar(3, true, cudf::test::get_default_stream()); + auto low_replace = cudf::numeric_scalar(5, true, cudf::test::get_default_stream()); + auto high = cudf::numeric_scalar(7, true, cudf::test::get_default_stream()); + auto high_replace = cudf::numeric_scalar(6, true, cudf::test::get_default_stream()); + cudf::clamp(input, low, low_replace, high, high_replace, cudf::test::get_default_stream()); +} + +TEST_F(ReplaceTest, Clamp) +{ + cudf::test::fixed_width_column_wrapper input({0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + auto low = cudf::numeric_scalar(3, true, cudf::test::get_default_stream()); + auto high = cudf::numeric_scalar(7, true, cudf::test::get_default_stream()); + cudf::clamp(input, low, high, cudf::test::get_default_stream()); +} + +TEST_F(ReplaceTest, NormalizeNansAndZeros) +{ + auto nan = std::numeric_limits::quiet_NaN(); + auto input_column = cudf::test::make_type_param_vector({-0.0, 0.0, -nan, nan, nan}); + cudf::test::fixed_width_column_wrapper input(input_column.begin(), input_column.end()); + cudf::normalize_nans_and_zeros(static_cast(input), + cudf::test::get_default_stream()); +} + +TEST_F(ReplaceTest, NormalizeNansAndZerosMutable) +{ + auto nan = std::numeric_limits::quiet_NaN(); + auto input_column = cudf::test::make_type_param_vector({-0.0, 0.0, -nan, nan, nan}); + cudf::test::fixed_width_column_wrapper input(input_column.begin(), input_column.end()); + cudf::normalize_nans_and_zeros(static_cast(input), + cudf::test::get_default_stream()); +} From bbbb143be086a85cc56f01157b5e94615f50c307 Mon Sep 17 00:00:00 2001 From: Bradley Dice Date: Fri, 1 Sep 2023 21:33:10 -0500 Subject: [PATCH 033/150] Use cudf::thread_index_type in concatenate.cu. (#13906) This PR uses `cudf::thread_index_type` in `concatenate.cu` to avoid risk of overflow. Authors: - Bradley Dice (https://github.com/bdice) Approvers: - Divye Gala (https://github.com/divyegala) - Yunsong Wang (https://github.com/PointKernel) - Vukasin Milovanovic (https://github.com/vuule) - Nghia Truong (https://github.com/ttnghia) URL: https://github.com/rapidsai/cudf/pull/13906 --- cpp/src/copying/concatenate.cu | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/cpp/src/copying/concatenate.cu b/cpp/src/copying/concatenate.cu index 35f06e47436..d08c3025553 100644 --- a/cpp/src/copying/concatenate.cu +++ b/cpp/src/copying/concatenate.cu @@ -118,13 +118,14 @@ __global__ void concatenate_masks_kernel(column_device_view const* views, size_type number_of_mask_bits, size_type* out_valid_count) { - size_type mask_index = threadIdx.x + blockIdx.x * blockDim.x; - - auto active_mask = __ballot_sync(0xFFFF'FFFFu, mask_index < number_of_mask_bits); + auto tidx = cudf::detail::grid_1d::global_thread_id(); + auto const stride = cudf::detail::grid_1d::grid_stride(); + auto active_mask = __ballot_sync(0xFFFF'FFFFu, tidx < number_of_mask_bits); size_type warp_valid_count = 0; - while (mask_index < number_of_mask_bits) { + while (tidx < number_of_mask_bits) { + auto const mask_index = static_cast(tidx); size_type const source_view_index = thrust::upper_bound( thrust::seq, output_offsets, output_offsets + number_of_views, mask_index) - @@ -141,8 +142,8 @@ __global__ void concatenate_masks_kernel(column_device_view const* views, warp_valid_count += __popc(new_word); } - mask_index += blockDim.x * gridDim.x; - active_mask = __ballot_sync(active_mask, mask_index < number_of_mask_bits); + tidx += stride; + active_mask = __ballot_sync(active_mask, tidx < number_of_mask_bits); } using detail::single_lane_block_sum_reduce; @@ -195,7 +196,8 @@ __global__ void fused_concatenate_kernel(column_device_view const* input_views, auto const output_size = output_view.size(); auto* output_data = output_view.data(); - int64_t output_index = threadIdx.x + blockIdx.x * blockDim.x; + auto output_index = cudf::detail::grid_1d::global_thread_id(); + auto const stride = cudf::detail::grid_1d::grid_stride(); size_type warp_valid_count = 0; unsigned active_mask; @@ -224,7 +226,7 @@ __global__ void fused_concatenate_kernel(column_device_view const* input_views, warp_valid_count += __popc(new_word); } - output_index += blockDim.x * gridDim.x; + output_index += stride; if (Nullable) { active_mask = __ballot_sync(active_mask, output_index < output_size); } } From 0c829cc0b868c288c3591771d555617d4d978ce3 Mon Sep 17 00:00:00 2001 From: Bradley Dice Date: Fri, 1 Sep 2023 21:38:11 -0500 Subject: [PATCH 034/150] Use cudf::thread_index_type in replace.cu. (#13905) This PR uses `cudf::thread_index_type` in `replace.cu` to avoid risk of overflow. Authors: - Bradley Dice (https://github.com/bdice) Approvers: - Yunsong Wang (https://github.com/PointKernel) - Vukasin Milovanovic (https://github.com/vuule) - Nghia Truong (https://github.com/ttnghia) URL: https://github.com/rapidsai/cudf/pull/13905 --- cpp/src/replace/replace.cu | 91 ++++++++++++++++++++++---------------- 1 file changed, 53 insertions(+), 38 deletions(-) diff --git a/cpp/src/replace/replace.cu b/cpp/src/replace/replace.cu index 07eefdc27c6..9341929de44 100644 --- a/cpp/src/replace/replace.cu +++ b/cpp/src/replace/replace.cu @@ -127,40 +127,42 @@ __global__ void replace_strings_first_pass(cudf::column_device_view input, cudf::size_type* __restrict__ output_valid_count) { cudf::size_type nrows = input.size(); - cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x; + auto tid = cudf::detail::grid_1d::global_thread_id(); + auto const stride = cudf::detail::grid_1d::grid_stride(); uint32_t active_mask = 0xffff'ffffu; - active_mask = __ballot_sync(active_mask, i < nrows); + active_mask = __ballot_sync(active_mask, tid < nrows); auto const lane_id{threadIdx.x % cudf::detail::warp_size}; uint32_t valid_sum{0}; - while (i < nrows) { + while (tid < nrows) { + auto const idx = static_cast(tid); bool input_is_valid = true; - if (input_has_nulls) input_is_valid = input.is_valid_nocheck(i); + if (input_has_nulls) input_is_valid = input.is_valid_nocheck(idx); bool output_is_valid = input_is_valid; if (input_is_valid) { - int result = get_new_string_value(i, input, values_to_replace, replacement); - cudf::string_view output = (result == -1) ? input.element(i) + int result = get_new_string_value(idx, input, values_to_replace, replacement); + cudf::string_view output = (result == -1) ? input.element(idx) : replacement.element(result); - offsets.data()[i] = output.size_bytes(); - indices.data()[i] = result; + offsets.data()[idx] = output.size_bytes(); + indices.data()[idx] = result; if (replacement_has_nulls && result != -1) { output_is_valid = replacement.is_valid_nocheck(result); } } else { - offsets.data()[i] = 0; - indices.data()[i] = -1; + offsets.data()[idx] = 0; + indices.data()[idx] = -1; } uint32_t bitmask = __ballot_sync(active_mask, output_is_valid); if (0 == lane_id) { - output_valid[cudf::word_index(i)] = bitmask; + output_valid[cudf::word_index(idx)] = bitmask; valid_sum += __popc(bitmask); } - i += blockDim.x * gridDim.x; - active_mask = __ballot_sync(active_mask, i < nrows); + tid += stride; + active_mask = __ballot_sync(active_mask, tid < nrows); } // Compute total valid count for this block and add it to global count @@ -189,27 +191,32 @@ __global__ void replace_strings_second_pass(cudf::column_device_view input, cudf::mutable_column_device_view indices) { cudf::size_type nrows = input.size(); - cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x; + auto tid = cudf::detail::grid_1d::global_thread_id(); + auto const stride = cudf::detail::grid_1d::grid_stride(); - while (i < nrows) { - bool output_is_valid = true; - bool input_is_valid = true; - cudf::size_type idx = indices.element(i); + while (tid < nrows) { + auto const idx = static_cast(tid); + auto const replace_idx = indices.element(idx); + bool output_is_valid = true; + bool input_is_valid = true; if (input_has_nulls) { - input_is_valid = input.is_valid_nocheck(i); + input_is_valid = input.is_valid_nocheck(idx); output_is_valid = input_is_valid; } - if (replacement_has_nulls && idx != -1) { output_is_valid = replacement.is_valid_nocheck(idx); } + if (replacement_has_nulls && replace_idx != -1) { + output_is_valid = replacement.is_valid_nocheck(replace_idx); + } if (output_is_valid) { - cudf::string_view output = (idx == -1) ? input.element(i) - : replacement.element(idx); - std::memcpy(strings.data() + offsets.data()[i], + cudf::string_view output = (replace_idx == -1) + ? input.element(idx) + : replacement.element(replace_idx); + std::memcpy(strings.data() + offsets.data()[idx], output.data(), output.size_bytes()); } - i += blockDim.x * gridDim.x; + tid += stride; } } @@ -247,23 +254,25 @@ __global__ void replace_kernel(cudf::column_device_view input, { T* __restrict__ output_data = output.data(); - cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x; + auto tid = cudf::detail::grid_1d::global_thread_id(); + auto const stride = cudf::detail::grid_1d::grid_stride(); uint32_t active_mask = 0xffff'ffffu; - active_mask = __ballot_sync(active_mask, i < nrows); + active_mask = __ballot_sync(active_mask, tid < nrows); auto const lane_id{threadIdx.x % cudf::detail::warp_size}; uint32_t valid_sum{0}; - while (i < nrows) { + while (tid < nrows) { + auto const idx = static_cast(tid); bool output_is_valid{true}; bool input_is_valid{true}; if (input_has_nulls) { - input_is_valid = input.is_valid_nocheck(i); + input_is_valid = input.is_valid_nocheck(idx); output_is_valid = input_is_valid; } if (input_is_valid) - thrust::tie(output_data[i], output_is_valid) = get_new_value( - i, + thrust::tie(output_data[idx], output_is_valid) = get_new_value( + idx, input.data(), values_to_replace.data(), values_to_replace.data() + values_to_replace.size(), @@ -274,13 +283,13 @@ __global__ void replace_kernel(cudf::column_device_view input, if (input_has_nulls or replacement_has_nulls) { uint32_t bitmask = __ballot_sync(active_mask, output_is_valid); if (0 == lane_id) { - output.set_mask_word(cudf::word_index(i), bitmask); + output.set_mask_word(cudf::word_index(idx), bitmask); valid_sum += __popc(bitmask); } } - i += blockDim.x * gridDim.x; - active_mask = __ballot_sync(active_mask, i < nrows); + tid += stride; + active_mask = __ballot_sync(active_mask, tid < nrows); } if (input_has_nulls or replacement_has_nulls) { // Compute total valid count for this block and add it to global count @@ -384,10 +393,16 @@ std::unique_ptr replace_kernel_forwarder::operator() sizes = cudf::make_numeric_column( - cudf::data_type(cudf::type_id::INT32), input_col.size(), cudf::mask_state::UNALLOCATED, stream); - std::unique_ptr indices = cudf::make_numeric_column( - cudf::data_type(cudf::type_id::INT32), input_col.size(), cudf::mask_state::UNALLOCATED, stream); + std::unique_ptr sizes = + cudf::make_numeric_column(cudf::data_type{cudf::type_to_id()}, + input_col.size(), + cudf::mask_state::UNALLOCATED, + stream); + std::unique_ptr indices = + cudf::make_numeric_column(cudf::data_type{cudf::type_to_id()}, + input_col.size(), + cudf::mask_state::UNALLOCATED, + stream); auto sizes_view = sizes->mutable_view(); auto indices_view = indices->mutable_view(); @@ -413,7 +428,7 @@ std::unique_ptr replace_kernel_forwarder::operator()(), sizes_view.end(), stream, mr); + sizes_view.begin(), sizes_view.end(), stream, mr); auto offsets_view = offsets->mutable_view(); auto device_offsets = cudf::mutable_column_device_view::create(offsets_view, stream); From c51633627ee7087542ad4c315c0e139dea58e408 Mon Sep 17 00:00:00 2001 From: David Wendt <45795991+davidwendt@users.noreply.github.com> Date: Sun, 3 Sep 2023 02:20:33 -0400 Subject: [PATCH 035/150] Use cudf::make_empty_column instead of column_view constructor (#14030) Replaces places where the `cudf::column_view(type,size,...)` constructor was used to create an empty view with a call to `cudf::make_column_view(type)->view()`. This helps minimize the dependency on calling the constructors directly as part of the work needed for #13733 which may require an update to the `column_view` classes and its constructor(s). Most of the changes occur in strings gtests source files. No functionality or behavior has changed. Authors: - David Wendt (https://github.com/davidwendt) Approvers: - Vukasin Milovanovic (https://github.com/vuule) - Mike Wilson (https://github.com/hyperbolic2346) URL: https://github.com/rapidsai/cudf/pull/14030 --- cpp/src/io/json/write_json.cu | 2 +- cpp/tests/copying/gather_str_tests.cpp | 10 ++++------ cpp/tests/reshape/interleave_columns_tests.cpp | 2 +- cpp/tests/strings/array_tests.cpp | 18 +++++++++--------- cpp/tests/strings/attrs_tests.cpp | 4 ++-- cpp/tests/strings/booleans_tests.cpp | 9 ++++----- cpp/tests/strings/case_tests.cpp | 4 ++-- .../strings/combine/concatenate_tests.cpp | 12 +++++------- .../strings/combine/join_strings_tests.cpp | 4 ++-- cpp/tests/strings/concatenate_tests.cpp | 10 ++++------ cpp/tests/strings/datetime_tests.cpp | 8 +++----- cpp/tests/strings/durations_tests.cpp | 8 +++----- cpp/tests/strings/fill_tests.cpp | 3 +-- cpp/tests/strings/find_multiple_tests.cpp | 10 ++++------ cpp/tests/strings/find_tests.cpp | 7 +++---- cpp/tests/strings/integers_tests.cpp | 9 ++++----- cpp/tests/strings/ipv4_tests.cpp | 3 ++- cpp/tests/strings/pad_tests.cpp | 4 ++-- cpp/tests/strings/replace_tests.cpp | 4 ++-- cpp/tests/strings/reverse_tests.cpp | 4 ++-- cpp/tests/strings/slice_tests.cpp | 10 +++++----- cpp/tests/strings/split_tests.cpp | 8 ++++---- cpp/tests/strings/strip_tests.cpp | 4 ++-- cpp/tests/strings/translate_tests.cpp | 4 ++-- cpp/tests/strings/urls_tests.cpp | 8 ++++---- cpp/tests/text/ngrams_tests.cpp | 4 ++-- 26 files changed, 79 insertions(+), 94 deletions(-) diff --git a/cpp/src/io/json/write_json.cu b/cpp/src/io/json/write_json.cu index ffb4a7cd87b..1e44522ed33 100644 --- a/cpp/src/io/json/write_json.cu +++ b/cpp/src/io/json/write_json.cu @@ -582,7 +582,7 @@ struct column_to_strings_fn { return cudf::strings::detail::from_timestamps( column, format, - strings_column_view(column_view{data_type{type_id::STRING}, 0, nullptr, nullptr, 0}), + strings_column_view(make_empty_column(type_id::STRING)->view()), stream_, mr_); } diff --git a/cpp/tests/copying/gather_str_tests.cpp b/cpp/tests/copying/gather_str_tests.cpp index 41251b028ae..22af600ab96 100644 --- a/cpp/tests/copying/gather_str_tests.cpp +++ b/cpp/tests/copying/gather_str_tests.cpp @@ -133,10 +133,9 @@ TEST_F(GatherTestStr, GatherDontCheckOutOfBounds) TEST_F(GatherTestStr, GatherEmptyMapStringsColumn) { - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING); cudf::test::fixed_width_column_wrapper gather_map; - auto results = cudf::detail::gather(cudf::table_view({zero_size_strings_column}), + auto results = cudf::detail::gather(cudf::table_view({zero_size_strings_column->view()}), gather_map, cudf::out_of_bounds_policy::NULLIFY, cudf::detail::negative_index_policy::NOT_ALLOWED, @@ -147,11 +146,10 @@ TEST_F(GatherTestStr, GatherEmptyMapStringsColumn) TEST_F(GatherTestStr, GatherZeroSizeStringsColumn) { - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING); cudf::test::fixed_width_column_wrapper gather_map({0}); cudf::test::strings_column_wrapper expected{std::pair{"", false}}; - auto results = cudf::detail::gather(cudf::table_view({zero_size_strings_column}), + auto results = cudf::detail::gather(cudf::table_view({zero_size_strings_column->view()}), gather_map, cudf::out_of_bounds_policy::NULLIFY, cudf::detail::negative_index_policy::NOT_ALLOWED, diff --git a/cpp/tests/reshape/interleave_columns_tests.cpp b/cpp/tests/reshape/interleave_columns_tests.cpp index e2697567c38..eba6c961bbb 100644 --- a/cpp/tests/reshape/interleave_columns_tests.cpp +++ b/cpp/tests/reshape/interleave_columns_tests.cpp @@ -189,7 +189,7 @@ struct InterleaveStringsColumnsTest : public cudf::test::BaseFixture {}; TEST_F(InterleaveStringsColumnsTest, ZeroSizedColumns) { - cudf::column_view col0(cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + auto const col0 = cudf::make_empty_column(cudf::type_id::STRING)->view(); auto results = cudf::interleave_columns(cudf::table_view{{col0}}); cudf::test::expect_column_empty(results->view()); diff --git a/cpp/tests/strings/array_tests.cpp b/cpp/tests/strings/array_tests.cpp index ecc38dfd26e..c7ceb899833 100644 --- a/cpp/tests/strings/array_tests.cpp +++ b/cpp/tests/strings/array_tests.cpp @@ -47,8 +47,8 @@ TEST_F(StringsColumnTest, Sort) TEST_F(StringsColumnTest, SortZeroSizeStringsColumn) { - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); + auto results = cudf::sort(cudf::table_view({zero_size_strings_column})); cudf::test::expect_column_empty(results->view().column(0)); } @@ -117,8 +117,8 @@ INSTANTIATE_TEST_CASE_P(StringsColumnTest, TEST_F(StringsColumnTest, SliceZeroSizeStringsColumn) { - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); + auto scol = cudf::slice(zero_size_strings_column, {0, 0}); auto results = std::make_unique(scol.front()); cudf::test::expect_column_empty(results->view()); @@ -141,8 +141,8 @@ TEST_F(StringsColumnTest, Gather) TEST_F(StringsColumnTest, GatherZeroSizeStringsColumn) { - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); + cudf::column_view map_view(cudf::data_type{cudf::type_id::INT32}, 0, nullptr, nullptr, 0); auto results = cudf::gather(cudf::table_view{{zero_size_strings_column}}, map_view)->release(); cudf::test::expect_column_empty(results.front()->view()); @@ -193,9 +193,9 @@ TEST_F(StringsColumnTest, ScatterScalar) TEST_F(StringsColumnTest, ScatterZeroSizeStringsColumn) { - cudf::column_view source(cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); - cudf::column_view target(cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); - cudf::column_view scatter_map(cudf::data_type{cudf::type_id::INT8}, 0, nullptr, nullptr, 0); + auto const source = cudf::make_empty_column(cudf::type_id::STRING)->view(); + auto const target = cudf::make_empty_column(cudf::type_id::STRING)->view(); + auto const scatter_map = cudf::make_empty_column(cudf::type_id::INT8)->view(); auto results = cudf::scatter(cudf::table_view({source}), scatter_map, cudf::table_view({target})); cudf::test::expect_column_empty(results->view().column(0)); diff --git a/cpp/tests/strings/attrs_tests.cpp b/cpp/tests/strings/attrs_tests.cpp index 4f2fc485388..c5f38697f00 100644 --- a/cpp/tests/strings/attrs_tests.cpp +++ b/cpp/tests/strings/attrs_tests.cpp @@ -48,8 +48,8 @@ TEST_F(StringsAttributesTest, CodePoints) TEST_F(StringsAttributesTest, ZeroSizeStringsColumn) { - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); + auto strings_view = cudf::strings_column_view(zero_size_strings_column); cudf::column_view expected_column(cudf::data_type{cudf::type_id::INT32}, 0, nullptr, nullptr, 0); diff --git a/cpp/tests/strings/booleans_tests.cpp b/cpp/tests/strings/booleans_tests.cpp index 21c9f6c70e8..0c7fc992065 100644 --- a/cpp/tests/strings/booleans_tests.cpp +++ b/cpp/tests/strings/booleans_tests.cpp @@ -66,16 +66,15 @@ TEST_F(StringsConvertTest, FromBooleans) TEST_F(StringsConvertTest, ZeroSizeStringsColumnBoolean) { - cudf::column_view zero_size_column(cudf::data_type{cudf::type_id::BOOL8}, 0, nullptr, nullptr, 0); - auto results = cudf::strings::from_booleans(zero_size_column); + auto const zero_size_column = cudf::make_empty_column(cudf::type_id::BOOL8)->view(); + auto results = cudf::strings::from_booleans(zero_size_column); cudf::test::expect_column_empty(results->view()); } TEST_F(StringsConvertTest, ZeroSizeBooleansColumn) { - cudf::column_view zero_size_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); - auto results = cudf::strings::to_booleans(zero_size_column); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); + auto results = cudf::strings::to_booleans(zero_size_strings_column); EXPECT_EQ(0, results->size()); } diff --git a/cpp/tests/strings/case_tests.cpp b/cpp/tests/strings/case_tests.cpp index 5e2aa0584be..1d82d785ae8 100644 --- a/cpp/tests/strings/case_tests.cpp +++ b/cpp/tests/strings/case_tests.cpp @@ -262,8 +262,8 @@ TEST_F(StringsCaseTest, LongStrings) TEST_F(StringsCaseTest, EmptyStringsColumn) { - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); + auto strings_view = cudf::strings_column_view(zero_size_strings_column); auto results = cudf::strings::to_lower(strings_view); diff --git a/cpp/tests/strings/combine/concatenate_tests.cpp b/cpp/tests/strings/combine/concatenate_tests.cpp index 37cb7302a8e..95993e6ecbc 100644 --- a/cpp/tests/strings/combine/concatenate_tests.cpp +++ b/cpp/tests/strings/combine/concatenate_tests.cpp @@ -149,8 +149,7 @@ TEST_F(StringsCombineTest, ConcatenateSkipNulls) TEST_F(StringsCombineTest, ConcatZeroSizeStringsColumns) { - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); std::vector strings_columns; strings_columns.push_back(zero_size_strings_column); strings_columns.push_back(zero_size_strings_column); @@ -161,8 +160,8 @@ TEST_F(StringsCombineTest, ConcatZeroSizeStringsColumns) TEST_F(StringsCombineTest, SingleColumnErrorCheck) { - cudf::column_view col0(cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); - EXPECT_THROW(cudf::strings::concatenate(cudf::table_view{{col0}}), cudf::logic_error); + auto const col0 = cudf::make_empty_column(cudf::type_id::STRING); + EXPECT_THROW(cudf::strings::concatenate(cudf::table_view{{col0->view()}}), cudf::logic_error); } struct StringsConcatenateWithColSeparatorTest : public cudf::test::BaseFixture {}; @@ -180,7 +179,7 @@ TEST_F(StringsConcatenateWithColSeparatorTest, ExceptionTests) } { - cudf::column_view col0(cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + auto const col0 = cudf::make_empty_column(cudf::type_id::STRING)->view(); cudf::test::fixed_width_column_wrapper col1{{1}}; EXPECT_THROW( @@ -200,8 +199,7 @@ TEST_F(StringsConcatenateWithColSeparatorTest, ExceptionTests) TEST_F(StringsConcatenateWithColSeparatorTest, ZeroSizedColumns) { - cudf::column_view col0(cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); - + auto const col0 = cudf::make_empty_column(cudf::type_id::STRING)->view(); auto results = cudf::strings::concatenate(cudf::table_view{{col0}}, cudf::strings_column_view(col0)); cudf::test::expect_column_empty(results->view()); diff --git a/cpp/tests/strings/combine/join_strings_tests.cpp b/cpp/tests/strings/combine/join_strings_tests.cpp index d413c50f122..ecc7432201f 100644 --- a/cpp/tests/strings/combine/join_strings_tests.cpp +++ b/cpp/tests/strings/combine/join_strings_tests.cpp @@ -73,8 +73,8 @@ TEST_F(JoinStringsTest, JoinLongStrings) TEST_F(JoinStringsTest, JoinZeroSizeStringsColumn) { - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); + auto strings_view = cudf::strings_column_view(zero_size_strings_column); auto results = cudf::strings::join_strings(strings_view); cudf::test::expect_column_empty(results->view()); diff --git a/cpp/tests/strings/concatenate_tests.cpp b/cpp/tests/strings/concatenate_tests.cpp index e40a90685c4..5cf4015b9e9 100644 --- a/cpp/tests/strings/concatenate_tests.cpp +++ b/cpp/tests/strings/concatenate_tests.cpp @@ -50,8 +50,8 @@ TEST_F(StringsConcatenateTest, Concatenate) cudf::test::strings_column_wrapper strings2(h_strings.data() + 6, h_strings.data() + 10); cudf::test::strings_column_wrapper strings3(h_strings.data() + 10, h_strings.data() + h_strings.size()); - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); std::vector strings_columns; strings_columns.push_back(strings1); @@ -67,8 +67,7 @@ TEST_F(StringsConcatenateTest, Concatenate) TEST_F(StringsConcatenateTest, ZeroSizeStringsColumns) { - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); std::vector strings_columns; strings_columns.push_back(zero_size_strings_column); strings_columns.push_back(zero_size_strings_column); @@ -79,8 +78,7 @@ TEST_F(StringsConcatenateTest, ZeroSizeStringsColumns) TEST_F(StringsConcatenateTest, ZeroSizeStringsPlusNormal) { - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); std::vector strings_columns; strings_columns.push_back(zero_size_strings_column); diff --git a/cpp/tests/strings/datetime_tests.cpp b/cpp/tests/strings/datetime_tests.cpp index 8ad1858fa36..bb5c96a09bf 100644 --- a/cpp/tests/strings/datetime_tests.cpp +++ b/cpp/tests/strings/datetime_tests.cpp @@ -605,13 +605,11 @@ TEST_F(StringsDatetimeTest, FromTimestampAllSpecifiers) TEST_F(StringsDatetimeTest, ZeroSizeStringsColumn) { - cudf::column_view zero_size_column( - cudf::data_type{cudf::type_id::TIMESTAMP_SECONDS}, 0, nullptr, nullptr, 0); - auto results = cudf::strings::from_timestamps(zero_size_column); + auto const zero_size_column = cudf::make_empty_column(cudf::type_id::TIMESTAMP_SECONDS)->view(); + auto results = cudf::strings::from_timestamps(zero_size_column); cudf::test::expect_column_empty(results->view()); - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); results = cudf::strings::to_timestamps(cudf::strings_column_view(zero_size_strings_column), cudf::data_type{cudf::type_id::TIMESTAMP_SECONDS}, "%Y"); diff --git a/cpp/tests/strings/durations_tests.cpp b/cpp/tests/strings/durations_tests.cpp index f9026f5f624..0c7a1ad8042 100644 --- a/cpp/tests/strings/durations_tests.cpp +++ b/cpp/tests/strings/durations_tests.cpp @@ -728,13 +728,11 @@ TEST_F(StringsDurationsTest, ParseEscapeCharacters) TEST_F(StringsDurationsTest, ZeroSizeStringsColumn) { - cudf::column_view zero_size_column( - cudf::data_type{cudf::type_id::DURATION_SECONDS}, 0, nullptr, nullptr, 0); - auto results = cudf::strings::from_durations(zero_size_column); + auto const zero_size_column = cudf::make_empty_column(cudf::type_id::DURATION_SECONDS)->view(); + auto results = cudf::strings::from_durations(zero_size_column); cudf::test::expect_column_empty(results->view()); - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); results = cudf::strings::to_durations(cudf::strings_column_view(zero_size_strings_column), cudf::data_type{cudf::type_id::DURATION_SECONDS}, "%S"); diff --git a/cpp/tests/strings/fill_tests.cpp b/cpp/tests/strings/fill_tests.cpp index 74254b38d2f..aadd68402c8 100644 --- a/cpp/tests/strings/fill_tests.cpp +++ b/cpp/tests/strings/fill_tests.cpp @@ -69,8 +69,7 @@ TEST_F(StringsFillTest, Fill) TEST_F(StringsFillTest, ZeroSizeStringsColumns) { - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); auto results = cudf::fill(zero_size_strings_column, 0, 0, cudf::string_scalar("")); cudf::test::expect_column_empty(results->view()); } diff --git a/cpp/tests/strings/find_multiple_tests.cpp b/cpp/tests/strings/find_multiple_tests.cpp index 799bf9a3fcb..986f86d2b49 100644 --- a/cpp/tests/strings/find_multiple_tests.cpp +++ b/cpp/tests/strings/find_multiple_tests.cpp @@ -57,9 +57,8 @@ TEST_F(StringsFindMultipleTest, FindMultiple) TEST_F(StringsFindMultipleTest, ZeroSizeStringsColumn) { - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); - auto strings_view = cudf::strings_column_view(zero_size_strings_column); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); + auto strings_view = cudf::strings_column_view(zero_size_strings_column); std::vector h_targets{""}; cudf::test::strings_column_wrapper targets(h_targets.begin(), h_targets.end()); auto targets_view = cudf::strings_column_view(targets); @@ -73,9 +72,8 @@ TEST_F(StringsFindMultipleTest, ErrorTest) cudf::test::strings_column_wrapper strings({"this string intentionally left blank"}, {0}); auto strings_view = cudf::strings_column_view(strings); - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); - auto empty_view = cudf::strings_column_view(zero_size_strings_column); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); + auto empty_view = cudf::strings_column_view(zero_size_strings_column); // targets must have at least one string EXPECT_THROW(cudf::strings::find_multiple(strings_view, empty_view), cudf::logic_error); diff --git a/cpp/tests/strings/find_tests.cpp b/cpp/tests/strings/find_tests.cpp index e64a368a952..5c0a5b760f5 100644 --- a/cpp/tests/strings/find_tests.cpp +++ b/cpp/tests/strings/find_tests.cpp @@ -250,10 +250,9 @@ TEST_F(StringsFindTest, EndsWith) TEST_F(StringsFindTest, ZeroSizeStringsColumn) { - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); - auto strings_view = cudf::strings_column_view(zero_size_strings_column); - auto results = cudf::strings::find(strings_view, cudf::string_scalar("é")); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); + auto strings_view = cudf::strings_column_view(zero_size_strings_column); + auto results = cudf::strings::find(strings_view, cudf::string_scalar("é")); EXPECT_EQ(results->size(), 0); results = cudf::strings::rfind(strings_view, cudf::string_scalar("é")); EXPECT_EQ(results->size(), 0); diff --git a/cpp/tests/strings/integers_tests.cpp b/cpp/tests/strings/integers_tests.cpp index 7a44ca9efba..59805f9cb6d 100644 --- a/cpp/tests/strings/integers_tests.cpp +++ b/cpp/tests/strings/integers_tests.cpp @@ -261,17 +261,16 @@ TEST_F(StringsConvertTest, FromInteger) TEST_F(StringsConvertTest, ZeroSizeStringsColumn) { - cudf::column_view zero_size_column(cudf::data_type{cudf::type_id::INT32}, 0, nullptr, nullptr, 0); - auto results = cudf::strings::from_integers(zero_size_column); + auto const zero_size_column = cudf::make_empty_column(cudf::type_id::INT32)->view(); + auto results = cudf::strings::from_integers(zero_size_column); cudf::test::expect_column_empty(results->view()); } TEST_F(StringsConvertTest, ZeroSizeIntegersColumn) { - cudf::column_view zero_size_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); auto results = - cudf::strings::to_integers(zero_size_column, cudf::data_type{cudf::type_id::INT32}); + cudf::strings::to_integers(zero_size_strings_column, cudf::data_type{cudf::type_id::INT32}); EXPECT_EQ(0, results->size()); } diff --git a/cpp/tests/strings/ipv4_tests.cpp b/cpp/tests/strings/ipv4_tests.cpp index 268806dd3cf..2b2d5730ca7 100644 --- a/cpp/tests/strings/ipv4_tests.cpp +++ b/cpp/tests/strings/ipv4_tests.cpp @@ -72,7 +72,8 @@ TEST_F(StringsConvertTest, IntegersToIPv4) TEST_F(StringsConvertTest, ZeroSizeStringsColumnIPV4) { - cudf::column_view zero_size_column(cudf::data_type{cudf::type_id::INT64}, 0, nullptr, nullptr, 0); + auto const zero_size_column = cudf::make_empty_column(cudf::type_id::INT64)->view(); + auto results = cudf::strings::integers_to_ipv4(zero_size_column); cudf::test::expect_column_empty(results->view()); results = cudf::strings::ipv4_to_integers(results->view()); diff --git a/cpp/tests/strings/pad_tests.cpp b/cpp/tests/strings/pad_tests.cpp index 8c07cb62c6b..81ec87a12a8 100644 --- a/cpp/tests/strings/pad_tests.cpp +++ b/cpp/tests/strings/pad_tests.cpp @@ -97,8 +97,8 @@ TEST_F(StringsPadTest, PaddingBoth) TEST_F(StringsPadTest, ZeroSizeStringsColumn) { - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); + auto strings_view = cudf::strings_column_view(zero_size_strings_column); auto results = cudf::strings::pad(strings_view, 5); cudf::test::expect_column_empty(results->view()); diff --git a/cpp/tests/strings/replace_tests.cpp b/cpp/tests/strings/replace_tests.cpp index 4a45773a29a..f143983aded 100644 --- a/cpp/tests/strings/replace_tests.cpp +++ b/cpp/tests/strings/replace_tests.cpp @@ -470,8 +470,8 @@ TEST_F(StringsReplaceTest, ReplaceMultiLong) TEST_F(StringsReplaceTest, EmptyStringsColumn) { - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); + auto strings_view = cudf::strings_column_view(zero_size_strings_column); auto results = cudf::strings::replace( strings_view, cudf::string_scalar("not"), cudf::string_scalar("pertinent")); diff --git a/cpp/tests/strings/reverse_tests.cpp b/cpp/tests/strings/reverse_tests.cpp index 8c3f87709ff..3df42b61ebf 100644 --- a/cpp/tests/strings/reverse_tests.cpp +++ b/cpp/tests/strings/reverse_tests.cpp @@ -45,8 +45,8 @@ TEST_F(StringsReverseTest, Reverse) TEST_F(StringsReverseTest, EmptyStringsColumn) { - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); + auto results = cudf::strings::reverse(cudf::strings_column_view(zero_size_strings_column)); auto view = results->view(); cudf::test::expect_column_empty(results->view()); diff --git a/cpp/tests/strings/slice_tests.cpp b/cpp/tests/strings/slice_tests.cpp index 1162bbb6b13..92230d06672 100644 --- a/cpp/tests/strings/slice_tests.cpp +++ b/cpp/tests/strings/slice_tests.cpp @@ -288,15 +288,15 @@ TEST_F(StringsSliceTest, Error) TEST_F(StringsSliceTest, ZeroSizeStringsColumn) { - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); - auto strings_view = cudf::strings_column_view(zero_size_strings_column); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); + auto strings_view = cudf::strings_column_view(zero_size_strings_column); auto results = cudf::strings::slice_strings(strings_view, 1, 2); cudf::test::expect_column_empty(results->view()); - cudf::column_view starts_column(cudf::data_type{cudf::type_id::INT32}, 0, nullptr, nullptr, 0); - cudf::column_view stops_column(cudf::data_type{cudf::type_id::INT32}, 0, nullptr, nullptr, 0); + auto const starts_column = cudf::make_empty_column(cudf::type_id::INT32)->view(); + auto const stops_column = cudf::make_empty_column(cudf::type_id::INT32)->view(); + results = cudf::strings::slice_strings(strings_view, starts_column, stops_column); cudf::test::expect_column_empty(results->view()); } diff --git a/cpp/tests/strings/split_tests.cpp b/cpp/tests/strings/split_tests.cpp index e8c4ec8e19c..445e283ef45 100644 --- a/cpp/tests/strings/split_tests.cpp +++ b/cpp/tests/strings/split_tests.cpp @@ -676,8 +676,8 @@ TEST_F(StringsSplitTest, RSplitRegexWithMaxSplit) TEST_F(StringsSplitTest, SplitZeroSizeStringsColumns) { - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); + auto prog = cudf::strings::regex_program::create("\\s"); auto results = cudf::strings::split(zero_size_strings_column); EXPECT_TRUE(results->num_columns() == 1); @@ -912,8 +912,8 @@ TEST_F(StringsSplitTest, RPartitionWhitespace) TEST_F(StringsSplitTest, PartitionZeroSizeStringsColumns) { - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); + auto results = cudf::strings::partition(zero_size_strings_column); EXPECT_TRUE(results->num_columns() == 0); results = cudf::strings::rpartition(zero_size_strings_column); diff --git a/cpp/tests/strings/strip_tests.cpp b/cpp/tests/strings/strip_tests.cpp index bd6d587e0a1..63179474944 100644 --- a/cpp/tests/strings/strip_tests.cpp +++ b/cpp/tests/strings/strip_tests.cpp @@ -92,8 +92,8 @@ TEST_F(StringsStripTest, StripBoth) TEST_F(StringsStripTest, EmptyStringsColumn) { - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); + auto strings_view = cudf::strings_column_view(zero_size_strings_column); auto results = cudf::strings::strip(strings_view); auto view = results->view(); diff --git a/cpp/tests/strings/translate_tests.cpp b/cpp/tests/strings/translate_tests.cpp index 6b4288196f9..ab3973242c6 100644 --- a/cpp/tests/strings/translate_tests.cpp +++ b/cpp/tests/strings/translate_tests.cpp @@ -62,8 +62,8 @@ TEST_F(StringsTranslateTest, Translate) TEST_F(StringsTranslateTest, ZeroSizeStringsColumn) { - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); + auto strings_view = cudf::strings_column_view(zero_size_strings_column); std::vector> translate_table; auto results = cudf::strings::translate(strings_view, translate_table); diff --git a/cpp/tests/strings/urls_tests.cpp b/cpp/tests/strings/urls_tests.cpp index 22147d33569..2aec72160cc 100644 --- a/cpp/tests/strings/urls_tests.cpp +++ b/cpp/tests/strings/urls_tests.cpp @@ -226,10 +226,10 @@ TEST_F(StringsConvertTest, UrlDecodeLargeStrings) TEST_F(StringsConvertTest, ZeroSizeUrlStringsColumn) { - cudf::column_view zero_size_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); - auto results = cudf::strings::url_encode(zero_size_column); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); + + auto results = cudf::strings::url_encode(zero_size_strings_column); cudf::test::expect_column_empty(results->view()); - results = cudf::strings::url_decode(zero_size_column); + results = cudf::strings::url_decode(zero_size_strings_column); cudf::test::expect_column_empty(results->view()); } diff --git a/cpp/tests/text/ngrams_tests.cpp b/cpp/tests/text/ngrams_tests.cpp index feb0cf538b0..323b3eed3e2 100644 --- a/cpp/tests/text/ngrams_tests.cpp +++ b/cpp/tests/text/ngrams_tests.cpp @@ -101,8 +101,8 @@ TEST_F(TextGenerateNgramsTest, NgramsWithNulls) TEST_F(TextGenerateNgramsTest, Empty) { - cudf::column_view zero_size_strings_column( - cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0); + auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); + auto results = nvtext::generate_ngrams(cudf::strings_column_view(zero_size_strings_column)); cudf::test::expect_column_empty(results->view()); results = nvtext::generate_character_ngrams(cudf::strings_column_view(zero_size_strings_column)); From 3e5f019697252f6c300639a09eb67ff11a80ac43 Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Mon, 4 Sep 2023 07:11:13 -1000 Subject: [PATCH 036/150] Raise NotImplementedError for Categoricals with timezones (#14032) Currently `cudf.from_pandas` with a pandas Categorical with datetimetz type will drop the timezone information (due to pyarrow) ```python In [5]: import pandas as pd In [6]: ci = pd.CategoricalIndex(pd.date_range("2016-01-01 01:01:00", periods=5, freq="D").tz_localize("UTC")) In [7]: ci Out[7]: CategoricalIndex(['2016-01-01 01:01:00+00:00', '2016-01-02 01:01:00+00:00', '2016-01-03 01:01:00+00:00', '2016-01-04 01:01:00+00:00', '2016-01-05 01:01:00+00:00'], categories=[2016-01-01 01:01:00+00:00, 2016-01-02 01:01:00+00:00, 2016-01-03 01:01:00+00:00, 2016-01-04 01:01:00+00:00, 2016-01-05 01:01:00+00:00], ordered=False, dtype='category') In [8]: ci_cudf = cudf.from_pandas(ci) In [10]: ci_cudf Out[10]: CategoricalIndex(['2016-01-01 01:01:00', '2016-01-02 01:01:00', '2016-01-03 01:01:00', '2016-01-04 01:01:00', '2016-01-05 01:01:00'], categories=[2016-01-01 01:01:00, 2016-01-02 01:01:00, 2016-01-03 01:01:00, 2016-01-04 01:01:00, 2016-01-05 01:01:00], ordered=False, dtype='category') ``` Like what is done with `IntervalIndex`, raises a `NotImplementedError` for now to avoid this wrong behavior. Authors: - Matthew Roeschke (https://github.com/mroeschke) Approvers: - Lawrence Mitchell (https://github.com/wence-) URL: https://github.com/rapidsai/cudf/pull/14032 --- python/cudf/cudf/core/column/column.py | 31 ++++++++++++++++++++----- python/cudf/cudf/tests/test_datetime.py | 2 ++ python/cudf/cudf/tests/test_interval.py | 11 +++++---- 3 files changed, 33 insertions(+), 11 deletions(-) diff --git a/python/cudf/cudf/core/column/column.py b/python/cudf/cudf/core/column/column.py index ad761ea8d18..9dde17a1045 100644 --- a/python/cudf/cudf/core/column/column.py +++ b/python/cudf/cudf/core/column/column.py @@ -2033,9 +2033,19 @@ def as_column( f"{arbitrary.dtype} is not supported. Convert first to " f"{arbitrary.dtype.subtype}." ) - if is_categorical_dtype(arbitrary): + if is_categorical_dtype(arbitrary.dtype): + if isinstance( + arbitrary.dtype.categories.dtype, pd.DatetimeTZDtype + ): + raise NotImplementedError( + "cuDF does not yet support timezone-aware datetimes" + ) data = as_column(pa.array(arbitrary, from_pandas=True)) elif is_interval_dtype(arbitrary.dtype): + if isinstance(arbitrary.dtype.subtype, pd.DatetimeTZDtype): + raise NotImplementedError( + "cuDF does not yet support timezone-aware datetimes" + ) data = as_column(pa.array(arbitrary, from_pandas=True)) elif arbitrary.dtype == np.bool_: data = as_column(cupy.asarray(arbitrary), dtype=arbitrary.dtype) @@ -2262,11 +2272,20 @@ def as_column( elif isinstance(arbitrary, pd.core.arrays.masked.BaseMaskedArray): data = as_column(pa.Array.from_pandas(arbitrary), dtype=dtype) elif ( - isinstance(arbitrary, pd.DatetimeIndex) - and isinstance(arbitrary.dtype, pd.DatetimeTZDtype) - ) or ( - isinstance(arbitrary, pd.IntervalIndex) - and is_datetime64tz_dtype(arbitrary.dtype.subtype) + ( + isinstance(arbitrary, pd.DatetimeIndex) + and isinstance(arbitrary.dtype, pd.DatetimeTZDtype) + ) + or ( + isinstance(arbitrary, pd.IntervalIndex) + and is_datetime64tz_dtype(arbitrary.dtype.subtype) + ) + or ( + isinstance(arbitrary, pd.CategoricalIndex) + and isinstance( + arbitrary.dtype.categories.dtype, pd.DatetimeTZDtype + ) + ) ): raise NotImplementedError( "cuDF does not yet support timezone-aware datetimes" diff --git a/python/cudf/cudf/tests/test_datetime.py b/python/cudf/cudf/tests/test_datetime.py index abcc057f823..b1685950241 100644 --- a/python/cudf/cudf/tests/test_datetime.py +++ b/python/cudf/cudf/tests/test_datetime.py @@ -2095,6 +2095,8 @@ def test_construction_from_tz_timestamps(data): _ = cudf.Index(data) with pytest.raises(NotImplementedError): _ = cudf.DatetimeIndex(data) + with pytest.raises(NotImplementedError): + cudf.CategoricalIndex(data) @pytest.mark.parametrize("op", _cmpops) diff --git a/python/cudf/cudf/tests/test_interval.py b/python/cudf/cudf/tests/test_interval.py index 9704be44b95..a27de60c2c5 100644 --- a/python/cudf/cudf/tests/test_interval.py +++ b/python/cudf/cudf/tests/test_interval.py @@ -167,17 +167,18 @@ def test_interval_index_unique(): assert_eq(expected, actual) +@pytest.mark.parametrize("box", [pd.Series, pd.IntervalIndex]) @pytest.mark.parametrize("tz", ["US/Eastern", None]) -def test_interval_with_datetime(tz): +def test_interval_with_datetime(tz, box): dti = pd.date_range( start=pd.Timestamp("20180101", tz=tz), end=pd.Timestamp("20181231", tz=tz), freq="M", ) - pidx = pd.IntervalIndex.from_breaks(dti) + pobj = box(pd.IntervalIndex.from_breaks(dti)) if tz is None: - gidx = cudf.from_pandas(pidx) - assert_eq(pidx, gidx) + gobj = cudf.from_pandas(pobj) + assert_eq(pobj, gobj) else: with pytest.raises(NotImplementedError): - cudf.from_pandas(pidx) + cudf.from_pandas(pobj) From 0b01fe49c8d5963e7be07e6dac2b78f842461db3 Mon Sep 17 00:00:00 2001 From: Lawrence Mitchell Date: Tue, 5 Sep 2023 21:22:33 +0100 Subject: [PATCH 037/150] Implement `sort_remaining` for `sort_index` (#14033) Previously, the `sort_remaining` argument to `sort_index` was ignored. Passing `sort_remaining=False` would raise a `NotImplementedError`. Moreover, for a multiindex, `sort_remaining=True` was not handled correctly: if not all levels were requested as sorted, `sort_index` would behave as if `sort_remaining=False` had been passed. To fix this case, construct the sort order based on first the provided levels and, if `sort_remaining=True`, the left-over levels (in index order). To facilitate this, refactor the internal `_get_columns_by_label` function to always return a `Frame`-like object (previously, if we had a `Frame` we would get back a `ColumnAccessor`, and it was only for `IndexedFrame` and above that we'd get something of `Self`-like type back). This meant that calling `_get_sorted_inds` with `by != None` was not possible on an `Index` or `MultiIndex` (the code assumed we'd get a `Frame` back). - Closes #14011 Authors: - Lawrence Mitchell (https://github.com/wence-) Approvers: - GALI PREM SAGAR (https://github.com/galipremsagar) - Matthew Roeschke (https://github.com/mroeschke) - Vyas Ramasubramani (https://github.com/vyasr) URL: https://github.com/rapidsai/cudf/pull/14033 --- python/cudf/cudf/core/dataframe.py | 12 +++++---- python/cudf/cudf/core/frame.py | 4 +-- python/cudf/cudf/core/indexed_frame.py | 31 +++++++++++++---------- python/cudf/cudf/core/series.py | 8 +++--- python/cudf/cudf/tests/test_multiindex.py | 23 +++++++++++++++++ 5 files changed, 53 insertions(+), 25 deletions(-) diff --git a/python/cudf/cudf/core/dataframe.py b/python/cudf/cudf/core/dataframe.py index e67604069f1..5a3d25a08a7 100644 --- a/python/cudf/cudf/core/dataframe.py +++ b/python/cudf/cudf/core/dataframe.py @@ -36,7 +36,7 @@ from pandas.core.dtypes.common import is_float, is_integer from pandas.io.formats import console from pandas.io.formats.printing import pprint_thing -from typing_extensions import assert_never +from typing_extensions import Self, assert_never import cudf import cudf.core.common @@ -1830,13 +1830,15 @@ def _repr_latex_(self): return self._get_renderable_dataframe().to_pandas()._repr_latex_() @_cudf_nvtx_annotate - def _get_columns_by_label(self, labels, downcast=False): + def _get_columns_by_label( + self, labels, *, downcast=False + ) -> Self | Series: """ Return columns of dataframe by `labels` If downcast is True, try and downcast from a DataFrame to a Series """ - new_data = super()._get_columns_by_label(labels, downcast) + ca = self._data.select_by_label(labels) if downcast: if is_scalar(labels): nlevels = 1 @@ -1844,11 +1846,11 @@ def _get_columns_by_label(self, labels, downcast=False): nlevels = len(labels) if self._data.multiindex is False or nlevels == self._data.nlevels: out = self._constructor_sliced._from_data( - new_data, index=self.index, name=labels + ca, index=self.index, name=labels ) return out out = self.__class__._from_data( - new_data, index=self.index, columns=new_data.to_pandas_index() + ca, index=self.index, columns=ca.to_pandas_index() ) return out diff --git a/python/cudf/cudf/core/frame.py b/python/cudf/cudf/core/frame.py index b9f052e7626..6224793d6f1 100644 --- a/python/cudf/cudf/core/frame.py +++ b/python/cudf/cudf/core/frame.py @@ -362,12 +362,12 @@ def equals(self, other): ) @_cudf_nvtx_annotate - def _get_columns_by_label(self, labels, downcast=False): + def _get_columns_by_label(self, labels, *, downcast=False) -> Self: """ Returns columns of the Frame specified by `labels` """ - return self._data.select_by_label(labels) + return self.__class__._from_data(self._data.select_by_label(labels)) @property @_cudf_nvtx_annotate diff --git a/python/cudf/cudf/core/indexed_frame.py b/python/cudf/cudf/core/indexed_frame.py index 33ac97d7ef8..69b25c51a66 100644 --- a/python/cudf/cudf/core/indexed_frame.py +++ b/python/cudf/cudf/core/indexed_frame.py @@ -1526,7 +1526,9 @@ def sort_index( na_position : {'first', 'last'}, default 'last' Puts NaNs at the beginning if first; last puts NaNs at the end. sort_remaining : bool, default True - Not yet supported + When sorting a multiindex on a subset of its levels, + should entries be lexsorted by the remaining + (non-specified) levels as well? ignore_index : bool, default False if True, index will be replaced with RangeIndex. key : callable, optional @@ -1592,11 +1594,6 @@ def sort_index( if kind is not None: raise NotImplementedError("kind is not yet supported") - if not sort_remaining: - raise NotImplementedError( - "sort_remaining == False is not yet supported" - ) - if key is not None: raise NotImplementedError("key is not yet supported.") @@ -1609,16 +1606,22 @@ def sort_index( if level is not None: # Pandas doesn't handle na_position in case of MultiIndex. na_position = "first" if ascending is True else "last" - labels = [ - idx._get_level_label(lvl) - for lvl in (level if is_list_like(level) else (level,)) - ] - # Explicitly construct a Frame rather than using type(self) - # to avoid constructing a SingleColumnFrame (e.g. Series). - idx = Frame._from_data(idx._data.select_by_label(labels)) + if not is_list_like(level): + level = [level] + by = list(map(idx._get_level_label, level)) + if sort_remaining: + handled = set(by) + by.extend( + filter( + lambda n: n not in handled, + self.index._data.names, + ) + ) + else: + by = list(idx._data.names) inds = idx._get_sorted_inds( - ascending=ascending, na_position=na_position + by=by, ascending=ascending, na_position=na_position ) out = self._gather( GatherMap.from_column_unchecked( diff --git a/python/cudf/cudf/core/series.py b/python/cudf/cudf/core/series.py index 2fef741ac09..78be3085754 100644 --- a/python/cudf/cudf/core/series.py +++ b/python/cudf/cudf/core/series.py @@ -797,17 +797,17 @@ def deserialize(cls, header, frames): return obj - def _get_columns_by_label(self, labels, downcast=False): + def _get_columns_by_label(self, labels, *, downcast=False) -> Self: """Return the column specified by `labels` For cudf.Series, either the column, or an empty series is returned. Parameter `downcast` does not have effects. """ - new_data = super()._get_columns_by_label(labels, downcast) + ca = self._data.select_by_label(labels) return ( - self.__class__._from_data(data=new_data, index=self.index) - if len(new_data) > 0 + self.__class__._from_data(data=ca, index=self.index) + if len(ca) > 0 else self.__class__(dtype=self.dtype, name=self.name) ) diff --git a/python/cudf/cudf/tests/test_multiindex.py b/python/cudf/cudf/tests/test_multiindex.py index eedc9b0c174..56bd7d709b7 100644 --- a/python/cudf/cudf/tests/test_multiindex.py +++ b/python/cudf/cudf/tests/test_multiindex.py @@ -1897,3 +1897,26 @@ def test_multiindex_empty_slice_pandas_compatibility(): with cudf.option_context("mode.pandas_compatible", True): actual = cudf.from_pandas(expected) assert_eq(expected, actual, exact=False) + + +@pytest.mark.parametrize( + "levels", + itertools.chain.from_iterable( + itertools.permutations(range(3), n) for n in range(1, 4) + ), + ids=str, +) +def test_multiindex_sort_index_partial(levels): + df = pd.DataFrame( + { + "a": [3, 3, 3, 1, 1, 1, 2, 2], + "b": [4, 2, 7, -1, 11, -2, 7, 7], + "c": [4, 4, 2, 3, 3, 3, 1, 1], + "val": [1, 2, 3, 4, 5, 6, 7, 8], + } + ).set_index(["a", "b", "c"]) + cdf = cudf.from_pandas(df) + + expect = df.sort_index(level=levels, sort_remaining=True) + got = cdf.sort_index(level=levels, sort_remaining=True) + assert_eq(expect, got) From c82a70807849188274d21b595d5ded818aad4464 Mon Sep 17 00:00:00 2001 From: Chong Gao Date: Wed, 6 Sep 2023 10:57:10 +0800 Subject: [PATCH 038/150] Fix map column can not be non-nullable for java (#14003) Make map column non-nullable for java. Changes: - Add a new method to pass nullable; Deprecate the old one. - Update the tests. Authors: - Chong Gao (https://github.com/res-life) - Nghia Truong (https://github.com/ttnghia) Approvers: - Robert (Bobby) Evans (https://github.com/revans2) URL: https://github.com/rapidsai/cudf/pull/14003 --- .../ai/rapids/cudf/ColumnWriterOptions.java | 30 +++++++++++++++++++ .../test/java/ai/rapids/cudf/TableTest.java | 6 ++-- 2 files changed, 34 insertions(+), 2 deletions(-) diff --git a/java/src/main/java/ai/rapids/cudf/ColumnWriterOptions.java b/java/src/main/java/ai/rapids/cudf/ColumnWriterOptions.java index 2177f58c9de..a95c5f58f09 100644 --- a/java/src/main/java/ai/rapids/cudf/ColumnWriterOptions.java +++ b/java/src/main/java/ai/rapids/cudf/ColumnWriterOptions.java @@ -522,7 +522,11 @@ protected String[] getFlatColumnNames(String[] ret) { * Maps are List columns with a Struct named 'key_value' with a child named 'key' and a child * named 'value'. The caller of this method doesn't need to worry about this as this method will * take care of this without the knowledge of the caller. + * + * Note: This method always returns a nullabe column, cannot return non-nullable column. + * Do not use this, use the next function with the parameter `isNullable`. */ + @Deprecated public static ColumnWriterOptions mapColumn(String name, ColumnWriterOptions key, ColumnWriterOptions value) { StructColumnWriterOptions struct = structBuilder("key_value").build(); @@ -537,6 +541,32 @@ public static ColumnWriterOptions mapColumn(String name, ColumnWriterOptions key return opt; } + /** + * Add a Map Column to the schema. + *

+ * Maps are List columns with a Struct named 'key_value' with a child named 'key' and a child + * named 'value'. The caller of this method doesn't need to worry about this as this method will + * take care of this without the knowledge of the caller. + * + * Note: If this map column is a key of another map, should pass isNullable = false. + * e.g.: map1(map2(int, int), int) the map2 should be non-nullable. + * + * @param isNullable is the returned map nullable. + */ + public static ColumnWriterOptions mapColumn(String name, ColumnWriterOptions key, + ColumnWriterOptions value, Boolean isNullable) { + if (key.isNullable) { + throw new IllegalArgumentException("key column can not be nullable"); + } + StructColumnWriterOptions struct = structBuilder("key_value").build(); + struct.childColumnOptions = new ColumnWriterOptions[]{key, value}; + ColumnWriterOptions opt = listBuilder(name, isNullable) + .withStructColumn(struct) + .build(); + opt.isMap = true; + return opt; + } + /** * Creates a ListBuilder for column called 'name' */ diff --git a/java/src/test/java/ai/rapids/cudf/TableTest.java b/java/src/test/java/ai/rapids/cudf/TableTest.java index 5c0c738a20f..3740328615a 100644 --- a/java/src/test/java/ai/rapids/cudf/TableTest.java +++ b/java/src/test/java/ai/rapids/cudf/TableTest.java @@ -8064,7 +8064,8 @@ void testParquetWriteMap() throws IOException { ParquetWriterOptions options = ParquetWriterOptions.builder() .withMapColumn(mapColumn("my_map", new ColumnWriterOptions("key0", false), - new ColumnWriterOptions("value0"))).build(); + new ColumnWriterOptions("value0"), + true)).build(); File f = File.createTempFile("test-map", ".parquet"); List list1 = Arrays.asList(new HostColumnVector.StructData(Arrays.asList("a", "b"))); @@ -8562,7 +8563,8 @@ void testORCWriteMapChunked() throws IOException { ORCWriterOptions options = ORCWriterOptions.builder() .withMapColumn(mapColumn("my_map", new ColumnWriterOptions("key0", false), - new ColumnWriterOptions("value0"))).build(); + new ColumnWriterOptions("value0"), + true)).build(); File f = File.createTempFile("test-map", ".parquet"); List list1 = Arrays.asList(new HostColumnVector.StructData(Arrays.asList("a", "b"))); From 1d7a77be153c09b007410d6dc8538705fbfd73ab Mon Sep 17 00:00:00 2001 From: Divye Gala Date: Wed, 6 Sep 2023 12:13:27 -0400 Subject: [PATCH 039/150] Use `cudf::thread_index_type` in `merge.cu` (#13972) This PR uses `cudf::thread_index_type` to avoid overflows. Authors: - Divye Gala (https://github.com/divyegala) Approvers: - Bradley Dice (https://github.com/bdice) - Vyas Ramasubramani (https://github.com/vyasr) URL: https://github.com/rapidsai/cudf/pull/13972 --- cpp/src/merge/merge.cu | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/cpp/src/merge/merge.cu b/cpp/src/merge/merge.cu index 5c54bb5661c..c0765b48205 100644 --- a/cpp/src/merge/merge.cu +++ b/cpp/src/merge/merge.cu @@ -78,11 +78,14 @@ __global__ void materialize_merged_bitmask_kernel( size_type const num_destination_rows, index_type const* const __restrict__ merged_indices) { - size_type destination_row = threadIdx.x + blockIdx.x * blockDim.x; + auto const stride = detail::grid_1d::grid_stride(); - auto active_threads = __ballot_sync(0xffff'ffffu, destination_row < num_destination_rows); + auto tid = detail::grid_1d::global_thread_id(); - while (destination_row < num_destination_rows) { + auto active_threads = __ballot_sync(0xffff'ffffu, tid < num_destination_rows); + + while (tid < num_destination_rows) { + auto const destination_row = static_cast(tid); auto const [src_side, src_row] = merged_indices[destination_row]; bool const from_left{src_side == side::LEFT}; bool source_bit_is_valid{true}; @@ -99,8 +102,8 @@ __global__ void materialize_merged_bitmask_kernel( // Only one thread writes output if (0 == threadIdx.x % warpSize) { out_validity[word_index(destination_row)] = result_mask; } - destination_row += blockDim.x * gridDim.x; - active_threads = __ballot_sync(active_threads, destination_row < num_destination_rows); + tid += stride; + active_threads = __ballot_sync(active_threads, tid < num_destination_rows); } } From 609f894fcd53b99acf0889562e78e706cb7812d8 Mon Sep 17 00:00:00 2001 From: Nghia Truong <7416935+ttnghia@users.noreply.github.com> Date: Wed, 6 Sep 2023 11:17:17 -0700 Subject: [PATCH 040/150] Temporary fix Parquet metadata with empty value string being ignored from writing (#14026) When writing to Parquet files, Spark needs to write pairs of key-value strings into files' metadata. Sometimes the value strings are just an empty string. Such empty string is ignored from writing into the file, causing other applications (such as Spark) to read the value and interpret it as a `null` instead of an empty string as in the original input, as described in https://github.com/rapidsai/cudf/issues/14024. This is wrong and led to data corruption as I tested. This PR intentionally modifies the empty value string into a space character to workaround the bug. This is a temporary fix while waiting for a better fix to be worked on. Authors: - Nghia Truong (https://github.com/ttnghia) Approvers: - Robert (Bobby) Evans (https://github.com/revans2) URL: https://github.com/rapidsai/cudf/pull/14026 --- java/src/main/native/src/TableJni.cpp | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/java/src/main/native/src/TableJni.cpp b/java/src/main/native/src/TableJni.cpp index f7ada4305db..b05fc9b7bc4 100644 --- a/java/src/main/native/src/TableJni.cpp +++ b/java/src/main/native/src/TableJni.cpp @@ -1592,7 +1592,11 @@ JNIEXPORT long JNICALL Java_ai_rapids_cudf_Table_writeParquetBufferBegin( std::map kv_metadata; std::transform(meta_keys.begin(), meta_keys.end(), meta_values.begin(), std::inserter(kv_metadata, kv_metadata.end()), - [](auto const &key, auto const &value) { return std::make_pair(key, value); }); + [](auto const &key, auto const &value) { + // The metadata value will be ignored if it is empty. + // We modify it into a space character to workaround such issue. + return std::make_pair(key, value.empty() ? std::string(" ") : value); + }); auto stats = std::make_shared(); chunked_parquet_writer_options opts = @@ -1638,7 +1642,11 @@ JNIEXPORT long JNICALL Java_ai_rapids_cudf_Table_writeParquetFileBegin( std::map kv_metadata; std::transform(meta_keys.begin(), meta_keys.end(), meta_values.begin(), std::inserter(kv_metadata, kv_metadata.end()), - [](auto const &key, auto const &value) { return std::make_pair(key, value); }); + [](auto const &key, auto const &value) { + // The metadata value will be ignored if it is empty. + // We modify it into a space character to workaround such issue. + return std::make_pair(key, value.empty() ? std::string(" ") : value); + }); sink_info sink{output_path.get()}; auto stats = std::make_shared(); From ea59dbf74e4d962ac20ebb0d6d3b71eaaeaad494 Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Wed, 6 Sep 2023 09:31:30 -1000 Subject: [PATCH 041/150] Raise NotImplementedError for to_datetime with z format (#14037) Avoids timezone information from being dropped in `to_datetime` when the z directive is provided ```python In [1]: import cudf In [2]: fmt = '%Y-%m-%d %H:%M:%S %Z' ...: dates = ['2010-01-01 12:00:00 UTC', '2010-01-01 12:00:00 UTC'] In [3]: cudf.to_datetime(dates, format=fmt) Out[3]: DatetimeIndex(['2010-01-01 12:00:00', '2010-01-01 12:00:00'], dtype='datetime64[ns]') ``` Authors: - Matthew Roeschke (https://github.com/mroeschke) Approvers: - Lawrence Mitchell (https://github.com/wence-) URL: https://github.com/rapidsai/cudf/pull/14037 --- python/cudf/cudf/core/tools/datetimes.py | 9 +++++++-- python/cudf/cudf/tests/test_datetime.py | 8 ++++++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/python/cudf/cudf/core/tools/datetimes.py b/python/cudf/cudf/core/tools/datetimes.py index 7c4b9810df2..a759f9dc3e1 100644 --- a/python/cudf/cudf/core/tools/datetimes.py +++ b/python/cudf/cudf/core/tools/datetimes.py @@ -147,8 +147,13 @@ def to_datetime( if utc: raise NotImplementedError("utc is not yet implemented") - if format is not None and "%f" in format: - format = format.replace("%f", "%9f") + if format is not None: + if "%Z" in format or "%z" in format: + raise NotImplementedError( + "cuDF does not yet support timezone-aware datetimes" + ) + elif "%f" in format: + format = format.replace("%f", "%9f") try: if isinstance(arg, cudf.DataFrame): diff --git a/python/cudf/cudf/tests/test_datetime.py b/python/cudf/cudf/tests/test_datetime.py index b1685950241..4a4e9b67c2e 100644 --- a/python/cudf/cudf/tests/test_datetime.py +++ b/python/cudf/cudf/tests/test_datetime.py @@ -2148,3 +2148,11 @@ def test_daterange_pandas_compatibility(): "2010-01-01", "2010-02-01", periods=10, name="times" ) assert_eq(expected, actual) + + +@pytest.mark.parametrize("code", ["z", "Z"]) +def test_format_timezone_not_implemented(code): + with pytest.raises(NotImplementedError): + cudf.to_datetime( + ["2020-01-01 00:00:00 UTC"], format=f"%Y-%m-%d %H:%M:%S %{code}" + ) From e81d79e94f268499c8656eba1fe8de8122589780 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Wed, 6 Sep 2023 14:48:50 -0700 Subject: [PATCH 042/150] Expose streams in public search APIs (#14034) Contributes to #925 Authors: - Vyas Ramasubramani (https://github.com/vyasr) Approvers: - David Wendt (https://github.com/davidwendt) - Nghia Truong (https://github.com/ttnghia) URL: https://github.com/rapidsai/cudf/pull/14034 --- cpp/include/cudf/search.hpp | 11 ++++- cpp/src/search/contains_column.cu | 5 ++- cpp/src/search/contains_scalar.cu | 4 +- cpp/src/search/search_ordered.cu | 10 ++--- cpp/tests/CMakeLists.txt | 1 + cpp/tests/streams/search_test.cpp | 69 +++++++++++++++++++++++++++++++ 6 files changed, 90 insertions(+), 10 deletions(-) create mode 100644 cpp/tests/streams/search_test.cpp diff --git a/cpp/include/cudf/search.hpp b/cpp/include/cudf/search.hpp index fee22786d7a..49acce6a63b 100644 --- a/cpp/include/cudf/search.hpp +++ b/cpp/include/cudf/search.hpp @@ -63,6 +63,7 @@ namespace cudf { * @param needles Values for which to find the insert locations in the search space * @param column_order Vector of column sort order * @param null_precedence Vector of null_precedence enums needles + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return A non-nullable column of elements containing the insertion points */ @@ -71,6 +72,7 @@ std::unique_ptr lower_bound( table_view const& needles, std::vector const& column_order, std::vector const& null_precedence, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -103,6 +105,7 @@ std::unique_ptr lower_bound( * @param needles Values for which to find the insert locations in the search space * @param column_order Vector of column sort order * @param null_precedence Vector of null_precedence enums needles + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return A non-nullable column of elements containing the insertion points */ @@ -111,6 +114,7 @@ std::unique_ptr upper_bound( table_view const& needles, std::vector const& column_order, std::vector const& null_precedence, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -128,9 +132,12 @@ std::unique_ptr upper_bound( * * @param haystack The column containing search space * @param needle A scalar value to check for existence in the search space + * @param stream CUDA stream used for device memory operations and kernel launches * @return true if the given `needle` value exists in the `haystack` column */ -bool contains(column_view const& haystack, scalar const& needle); +bool contains(column_view const& haystack, + scalar const& needle, + rmm::cuda_stream_view stream = cudf::get_default_stream()); /** * @brief Check if the given `needles` values exists in the `haystack` column. @@ -149,12 +156,14 @@ bool contains(column_view const& haystack, scalar const& needle); * * @param haystack The column containing search space * @param needles A column of values to check for existence in the search space + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return A BOOL column indicating if each element in `needles` exists in the search space */ std::unique_ptr contains( column_view const& haystack, column_view const& needles, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group diff --git a/cpp/src/search/contains_column.cu b/cpp/src/search/contains_column.cu index 08bcf8d48d8..4363bd212fe 100644 --- a/cpp/src/search/contains_column.cu +++ b/cpp/src/search/contains_column.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -154,10 +154,11 @@ std::unique_ptr contains(column_view const& haystack, std::unique_ptr contains(column_view const& haystack, column_view const& needles, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::contains(haystack, needles, cudf::get_default_stream(), mr); + return detail::contains(haystack, needles, stream, mr); } } // namespace cudf diff --git a/cpp/src/search/contains_scalar.cu b/cpp/src/search/contains_scalar.cu index 7c16a1b12ef..0b344ec347b 100644 --- a/cpp/src/search/contains_scalar.cu +++ b/cpp/src/search/contains_scalar.cu @@ -160,10 +160,10 @@ bool contains(column_view const& haystack, scalar const& needle, rmm::cuda_strea } // namespace detail -bool contains(column_view const& haystack, scalar const& needle) +bool contains(column_view const& haystack, scalar const& needle, rmm::cuda_stream_view stream) { CUDF_FUNC_RANGE(); - return detail::contains(haystack, needle, cudf::get_default_stream()); + return detail::contains(haystack, needle, stream); } } // namespace cudf diff --git a/cpp/src/search/search_ordered.cu b/cpp/src/search/search_ordered.cu index bf0eb8d46f8..3b5dbef0401 100644 --- a/cpp/src/search/search_ordered.cu +++ b/cpp/src/search/search_ordered.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, NVIDIA CORPORATION. + * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -144,22 +144,22 @@ std::unique_ptr lower_bound(table_view const& haystack, table_view const& needles, std::vector const& column_order, std::vector const& null_precedence, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::lower_bound( - haystack, needles, column_order, null_precedence, cudf::get_default_stream(), mr); + return detail::lower_bound(haystack, needles, column_order, null_precedence, stream, mr); } std::unique_ptr upper_bound(table_view const& haystack, table_view const& needles, std::vector const& column_order, std::vector const& null_precedence, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::upper_bound( - haystack, needles, column_order, null_precedence, cudf::get_default_stream(), mr); + return detail::upper_bound(haystack, needles, column_order, null_precedence, stream, mr); } } // namespace cudf diff --git a/cpp/tests/CMakeLists.txt b/cpp/tests/CMakeLists.txt index 1bb1987198d..a69dc9bf2f8 100644 --- a/cpp/tests/CMakeLists.txt +++ b/cpp/tests/CMakeLists.txt @@ -626,6 +626,7 @@ ConfigureTest(STREAM_GROUPBY_TEST streams/groupby_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_CONCATENATE_TEST streams/concatenate_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_FILLING_TEST streams/filling_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_REPLACE_TEST streams/replace_test.cpp STREAM_MODE testing) +ConfigureTest(STREAM_SEARCH_TEST streams/search_test.cpp STREAM_MODE testing) # ################################################################################################## # Install tests #################################################################################### diff --git a/cpp/tests/streams/search_test.cpp b/cpp/tests/streams/search_test.cpp new file mode 100644 index 00000000000..fbe17fb0cc4 --- /dev/null +++ b/cpp/tests/streams/search_test.cpp @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include +#include +#include + +class SearchTest : public cudf::test::BaseFixture {}; + +TEST_F(SearchTest, LowerBound) +{ + cudf::test::fixed_width_column_wrapper column{10, 20, 30, 40, 50}; + cudf::test::fixed_width_column_wrapper values{0, 7, 10, 11, 30, 32, 40, 47, 50, 90}; + cudf::test::fixed_width_column_wrapper expect{0, 0, 0, 1, 2, 3, 3, 4, 4, 5}; + + cudf::lower_bound({cudf::table_view{{column}}}, + {cudf::table_view{{values}}}, + {cudf::order::ASCENDING}, + {cudf::null_order::BEFORE}, + cudf::test::get_default_stream()); +} + +TEST_F(SearchTest, UpperBound) +{ + cudf::test::fixed_width_column_wrapper column{10, 20, 30, 40, 50}; + cudf::test::fixed_width_column_wrapper values{0, 7, 10, 11, 30, 32, 40, 47, 50, 90}; + cudf::test::fixed_width_column_wrapper expect{0, 0, 0, 1, 2, 3, 3, 4, 4, 5}; + + cudf::upper_bound({cudf::table_view{{column}}}, + {cudf::table_view{{values}}}, + {cudf::order::ASCENDING}, + {cudf::null_order::BEFORE}, + cudf::test::get_default_stream()); +} + +TEST_F(SearchTest, ContainsScalar) +{ + cudf::test::fixed_width_column_wrapper column{0, 1, 17, 19, 23, 29, 71}; + cudf::numeric_scalar scalar{23, true, cudf::test::get_default_stream()}; + + cudf::contains(column, scalar, cudf::test::get_default_stream()); +} + +TEST_F(SearchTest, ContainsColumn) +{ + cudf::test::fixed_width_column_wrapper haystack{0, 1, 17, 19, 23, 29, 71}; + cudf::test::fixed_width_column_wrapper needles{17, 19, 45, 72}; + + cudf::test::fixed_width_column_wrapper expect{1, 1, 0, 0}; + + cudf::contains(haystack, needles, cudf::test::get_default_stream()); +} From 0190c2921d0278f80328240b76a22e6628cb24f7 Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Wed, 6 Sep 2023 14:41:46 -1000 Subject: [PATCH 043/150] Raise TypeError for any non-parseable argument in to_datetime (#14044) Avoids the following incorrect behavior ```python In [7]: cudf.to_datetime([True]) Out[7]: GenericIndex([True], dtype='bool') In [1]: import pandas In [2]: pandas.to_datetime([True]) TypeError: is not convertible to datetime ``` Authors: - Matthew Roeschke (https://github.com/mroeschke) Approvers: - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cudf/pull/14044 --- python/cudf/cudf/core/tools/datetimes.py | 16 ++++++++-------- python/cudf/cudf/tests/test_datetime.py | 6 ++++++ 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/python/cudf/cudf/core/tools/datetimes.py b/python/cudf/cudf/core/tools/datetimes.py index a759f9dc3e1..f736e055163 100644 --- a/python/cudf/cudf/core/tools/datetimes.py +++ b/python/cudf/cudf/core/tools/datetimes.py @@ -294,12 +294,8 @@ def to_datetime( def _process_col(col, unit, dayfirst, infer_datetime_format, format): if col.dtype.kind == "M": return col - elif col.dtype.kind == "m": - raise TypeError( - f"dtype {col.dtype} cannot be converted to {_unit_dtype_map[unit]}" - ) - if col.dtype.kind in ("f"): + elif col.dtype.kind in ("f"): if unit not in (None, "ns"): factor = cudf.Scalar( column.datetime._unit_to_nanoseconds_conversion[unit] @@ -325,8 +321,9 @@ def _process_col(col, unit, dayfirst, infer_datetime_format, format): ) else: col = col.as_datetime_column(dtype="datetime64[ns]") + return col - if col.dtype.kind in ("i"): + elif col.dtype.kind in ("i"): if unit in ("D", "h", "m"): factor = cudf.Scalar( column.datetime._unit_to_nanoseconds_conversion[unit] @@ -340,6 +337,7 @@ def _process_col(col, unit, dayfirst, infer_datetime_format, format): ) else: col = col.as_datetime_column(dtype=_unit_dtype_map[unit]) + return col elif col.dtype.kind in ("O"): if unit not in (None, "ns") or col.null_count == len(col): @@ -364,11 +362,13 @@ def _process_col(col, unit, dayfirst, infer_datetime_format, format): format = column.datetime.infer_format( element=col.element_indexing(0) ) - col = col.as_datetime_column( + return col.as_datetime_column( dtype=_unit_dtype_map[unit], format=format, ) - return col + raise TypeError( + f"dtype {col.dtype} cannot be converted to {_unit_dtype_map[unit]}" + ) def get_units(value): diff --git a/python/cudf/cudf/tests/test_datetime.py b/python/cudf/cudf/tests/test_datetime.py index 4a4e9b67c2e..4c20258ae67 100644 --- a/python/cudf/cudf/tests/test_datetime.py +++ b/python/cudf/cudf/tests/test_datetime.py @@ -2156,3 +2156,9 @@ def test_format_timezone_not_implemented(code): cudf.to_datetime( ["2020-01-01 00:00:00 UTC"], format=f"%Y-%m-%d %H:%M:%S %{code}" ) + + +@pytest.mark.parametrize("arg", [True, False]) +def test_args_not_datetime_typerror(arg): + with pytest.raises(TypeError): + cudf.to_datetime([arg]) From dd6553a22d6cfcc2f017775a57d7b49783d62a9c Mon Sep 17 00:00:00 2001 From: Mark Harris <783069+harrism@users.noreply.github.com> Date: Thu, 7 Sep 2023 16:07:23 +1000 Subject: [PATCH 044/150] Ignore compile_commands.json (#14048) Fixes #14047 Adds compile_commands.json to .gitignore. Authors: - Mark Harris (https://github.com/harrism) Approvers: - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cudf/pull/14048 --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index fb5c301fe3f..a9bf0854d65 100644 --- a/.gitignore +++ b/.gitignore @@ -16,6 +16,7 @@ DartConfiguration.tcl *.spec .nfs* .clangd +compile_commands.json ## Python build directories & artifacts dask-worker-space/ From 7331922486c0e5f1e6a765efa8063aa7603c7add Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Thu, 7 Sep 2023 09:59:52 -0500 Subject: [PATCH 045/150] Raise `NotImplementedError` for `MultiIndex.to_series` (#14049) Fixes #14035 This PR raises an error for `MultiIndex.to_series` because we cannot store `tuple` type columns in `cudf`. Authors: - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Lawrence Mitchell (https://github.com/wence-) URL: https://github.com/rapidsai/cudf/pull/14049 --- python/cudf/cudf/core/multiindex.py | 6 ++++++ python/cudf/cudf/tests/test_multiindex.py | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/python/cudf/cudf/core/multiindex.py b/python/cudf/cudf/core/multiindex.py index 12da69740d8..bc6726879c1 100644 --- a/python/cudf/cudf/core/multiindex.py +++ b/python/cudf/cudf/core/multiindex.py @@ -219,6 +219,12 @@ def names(self, value): ) self._names = pd.core.indexes.frozen.FrozenList(value) + @_cudf_nvtx_annotate + def to_series(self, index=None, name=None): + raise NotImplementedError( + "MultiIndex.to_series isn't implemented yet." + ) + @_cudf_nvtx_annotate def astype(self, dtype, copy: bool = True): if not is_object_dtype(dtype): diff --git a/python/cudf/cudf/tests/test_multiindex.py b/python/cudf/cudf/tests/test_multiindex.py index 56bd7d709b7..3c843ace0a8 100644 --- a/python/cudf/cudf/tests/test_multiindex.py +++ b/python/cudf/cudf/tests/test_multiindex.py @@ -1920,3 +1920,9 @@ def test_multiindex_sort_index_partial(levels): expect = df.sort_index(level=levels, sort_remaining=True) got = cdf.sort_index(level=levels, sort_remaining=True) assert_eq(expect, got) + + +def test_multiindex_to_series_error(): + midx = cudf.MultiIndex.from_tuples([("a", "b")]) + with pytest.raises(NotImplementedError): + midx.to_series() From dc5f5006b1e7c9d5ca3649188833e0a3b44cc841 Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Thu, 7 Sep 2023 10:01:37 -0500 Subject: [PATCH 046/150] Fix `IntervalIndex.union` to preserve type-metadata (#14051) Fixes: #14041 This PR fixes `fillna` that will preserve the type-metadata for `IntervalColumn`. Authors: - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cudf/pull/14051 --- python/cudf/cudf/core/column/categorical.py | 13 +------------ python/cudf/cudf/core/column/column.py | 2 +- python/cudf/cudf/core/column/decimal.py | 5 +---- python/cudf/cudf/tests/test_index.py | 4 ++++ 4 files changed, 7 insertions(+), 17 deletions(-) diff --git a/python/cudf/cudf/core/column/categorical.py b/python/cudf/cudf/core/column/categorical.py index eaffc18db70..5be609c81bc 100644 --- a/python/cudf/cudf/core/column/categorical.py +++ b/python/cudf/cudf/core/column/categorical.py @@ -1272,18 +1272,7 @@ def fillna( self.codes.dtype ) - result = super().fillna(value=fill_value, method=method) - - result = column.build_categorical_column( - categories=self.dtype.categories._values, - codes=column.build_column(result.base_data, dtype=result.dtype), - offset=result.offset, - size=result.size, - mask=result.base_mask, - ordered=self.dtype.ordered, - ) - - return result + return super().fillna(value=fill_value, method=method) def indices_of( self, value: ScalarLike diff --git a/python/cudf/cudf/core/column/column.py b/python/cudf/cudf/core/column/column.py index 9dde17a1045..a8735a1dd8d 100644 --- a/python/cudf/cudf/core/column/column.py +++ b/python/cudf/cudf/core/column/column.py @@ -715,7 +715,7 @@ def fillna( """ return libcudf.replace.replace_nulls( input_col=self, replacement=value, method=method, dtype=dtype - ) + )._with_type_metadata(self.dtype) def isnull(self) -> ColumnBase: """Identify missing values in a Column.""" diff --git a/python/cudf/cudf/core/column/decimal.py b/python/cudf/cudf/core/column/decimal.py index a8a707ec805..5a823c5f7c3 100644 --- a/python/cudf/cudf/core/column/decimal.py +++ b/python/cudf/cudf/core/column/decimal.py @@ -147,10 +147,7 @@ def fillna( "integer values" ) - result = libcudf.replace.replace_nulls( - input_col=self, replacement=value, method=method, dtype=dtype - ) - return result._with_type_metadata(self.dtype) + return super().fillna(value=value, method=method) def normalize_binop_value(self, other): if isinstance(other, ColumnBase): diff --git a/python/cudf/cudf/tests/test_index.py b/python/cudf/cudf/tests/test_index.py index 359b3c519de..5730ecc4ae7 100644 --- a/python/cudf/cudf/tests/test_index.py +++ b/python/cudf/cudf/tests/test_index.py @@ -2050,6 +2050,10 @@ def test_range_index_concat(objs): (pd.Index([0, 1, 2, 30], name="a"), [90, 100]), (pd.Index([0, 1, 2, 30]), pd.Index([0, 10, 1.0, 11])), (pd.Index(["a", "b", "c", "d", "c"]), pd.Index(["a", "c", "z"])), + ( + pd.IntervalIndex.from_tuples([(0, 2), (0, 2), (2, 4)]), + pd.IntervalIndex.from_tuples([(0, 2), (2, 4)]), + ), ], ) @pytest.mark.parametrize("sort", [None, False]) From 6945c4f8b9a0f8497b1f9f662a2015bdc4992048 Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Thu, 7 Sep 2023 12:04:23 -0500 Subject: [PATCH 047/150] Raise `MixedTypeError` when a column of mixed-dtype is being constructed (#14050) Fixes #14038 This PR introduces changes that raise an error when a column of `object` dtype is being constructed when the data is not string or bools. Authors: - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Matthew Roeschke (https://github.com/mroeschke) URL: https://github.com/rapidsai/cudf/pull/14050 --- python/cudf/cudf/core/column/column.py | 19 ++++++++++++++----- python/cudf/cudf/tests/test_index.py | 3 ++- python/cudf/cudf/tests/test_parquet.py | 4 ++-- python/cudf/cudf/tests/test_series.py | 6 +++++- 4 files changed, 23 insertions(+), 9 deletions(-) diff --git a/python/cudf/cudf/core/column/column.py b/python/cudf/cudf/core/column/column.py index a8735a1dd8d..b4ad6765207 100644 --- a/python/cudf/cudf/core/column/column.py +++ b/python/cudf/cudf/core/column/column.py @@ -2062,10 +2062,15 @@ def as_column( ) else: pyarrow_array = pa.array(arbitrary, from_pandas=nan_as_null) - if arbitrary.dtype == cudf.dtype("object") and isinstance( - pyarrow_array, (pa.DurationArray, pa.TimestampArray) + if ( + arbitrary.dtype == cudf.dtype("object") + and cudf.dtype(pyarrow_array.type.to_pandas_dtype()) + != cudf.dtype(arbitrary.dtype) + and not is_bool_dtype( + cudf.dtype(pyarrow_array.type.to_pandas_dtype()) + ) ): - raise TypeError("Cannot create column with mixed types") + raise MixedTypeError("Cannot create column with mixed types") if isinstance(pyarrow_array.type, pa.Decimal128Type): pyarrow_type = cudf.Decimal128Dtype.from_arrow( pyarrow_array.type @@ -2436,8 +2441,12 @@ def as_column( if ( isinstance(arbitrary, pd.Index) and arbitrary.dtype == cudf.dtype("object") - and isinstance( - pyarrow_array, (pa.DurationArray, pa.TimestampArray) + and ( + cudf.dtype(pyarrow_array.type.to_pandas_dtype()) + != cudf.dtype(arbitrary.dtype) + and not is_bool_dtype( + cudf.dtype(pyarrow_array.type.to_pandas_dtype()) + ) ) ): raise MixedTypeError( diff --git a/python/cudf/cudf/tests/test_index.py b/python/cudf/cudf/tests/test_index.py index 5730ecc4ae7..819527ac312 100644 --- a/python/cudf/cudf/tests/test_index.py +++ b/python/cudf/cudf/tests/test_index.py @@ -2676,10 +2676,11 @@ def test_scalar_getitem(self, index_values, i): 12, 20, ], + [1, 2, 3, 4], ], ) def test_index_mixed_dtype_error(data): - pi = pd.Index(data) + pi = pd.Index(data, dtype="object") with pytest.raises(TypeError): cudf.Index(pi) diff --git a/python/cudf/cudf/tests/test_parquet.py b/python/cudf/cudf/tests/test_parquet.py index 66c4a253423..b892cc62ac4 100644 --- a/python/cudf/cudf/tests/test_parquet.py +++ b/python/cudf/cudf/tests/test_parquet.py @@ -2374,11 +2374,11 @@ def test_parquet_writer_list_statistics(tmpdir): for i, col in enumerate(pd_slice): stats = pq_file.metadata.row_group(rg).column(i).statistics - actual_min = cudf.Series(pd_slice[col].explode().explode()).min() + actual_min = pd_slice[col].explode().explode().dropna().min() stats_min = stats.min assert normalized_equals(actual_min, stats_min) - actual_max = cudf.Series(pd_slice[col].explode().explode()).max() + actual_max = pd_slice[col].explode().explode().dropna().max() stats_max = stats.max assert normalized_equals(actual_max, stats_max) diff --git a/python/cudf/cudf/tests/test_series.py b/python/cudf/cudf/tests/test_series.py index 51c6bb1634d..783d7d31d7f 100644 --- a/python/cudf/cudf/tests/test_series.py +++ b/python/cudf/cudf/tests/test_series.py @@ -2187,11 +2187,15 @@ def test_series_init_error(): ) -@pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"]) +@pytest.mark.parametrize( + "dtype", ["datetime64[ns]", "timedelta64[ns]", "object", "str"] +) def test_series_mixed_dtype_error(dtype): ps = pd.concat([pd.Series([1, 2, 3], dtype=dtype), pd.Series([10, 11])]) with pytest.raises(TypeError): cudf.Series(ps) + with pytest.raises(TypeError): + cudf.Series(ps.array) @pytest.mark.parametrize("data", [[True, False, None], [10, 200, 300]]) From c9d88219ce6e920b8fad977ade437bf87d1d5099 Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Thu, 7 Sep 2023 12:34:51 -0500 Subject: [PATCH 048/150] Fix empty string column construction (#14052) Fixes #14046 This PR fixes empty string column construction that arises due to a corner-case in the way pyarrow constructs arrays. Authors: - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cudf/pull/14052 --- python/cudf/cudf/core/column/column.py | 15 +++++++++++++++ python/cudf/cudf/tests/test_dataframe.py | 5 +---- python/cudf/cudf/tests/test_index.py | 24 ++++++++++++++++++++++++ 3 files changed, 40 insertions(+), 4 deletions(-) diff --git a/python/cudf/cudf/core/column/column.py b/python/cudf/cudf/core/column/column.py index b4ad6765207..59ab3569814 100644 --- a/python/cudf/cudf/core/column/column.py +++ b/python/cudf/cudf/core/column/column.py @@ -2438,6 +2438,21 @@ def as_column( from_pandas=True if nan_as_null is None else nan_as_null, ) + if ( + isinstance(pyarrow_array, pa.NullArray) + and pa_type is None + and dtype is None + and getattr(arbitrary, "dtype", None) + == cudf.dtype("object") + ): + # pa.array constructor returns a NullArray + # for empty arrays, instead of a StringArray. + # This issue is only specific to this dtype, + # all other dtypes, result in their corresponding + # arrow array creation. + dtype = cudf.dtype("str") + pyarrow_array = pyarrow_array.cast(np_to_pa_dtype(dtype)) + if ( isinstance(arbitrary, pd.Index) and arbitrary.dtype == cudf.dtype("object") diff --git a/python/cudf/cudf/tests/test_dataframe.py b/python/cudf/cudf/tests/test_dataframe.py index 3c84cfe48c4..44d0b9249d0 100644 --- a/python/cudf/cudf/tests/test_dataframe.py +++ b/python/cudf/cudf/tests/test_dataframe.py @@ -7256,10 +7256,7 @@ def test_dataframe_keys(df): def test_series_keys(ps): gds = cudf.from_pandas(ps) - if len(ps) == 0 and not isinstance(ps.index, pd.RangeIndex): - assert_eq(ps.keys().astype("float64"), gds.keys()) - else: - assert_eq(ps.keys(), gds.keys()) + assert_eq(ps.keys(), gds.keys()) @pytest_unmark_spilling diff --git a/python/cudf/cudf/tests/test_index.py b/python/cudf/cudf/tests/test_index.py index 819527ac312..506edd5b3f3 100644 --- a/python/cudf/cudf/tests/test_index.py +++ b/python/cudf/cudf/tests/test_index.py @@ -20,6 +20,7 @@ as_index, ) from cudf.testing._utils import ( + ALL_TYPES, FLOAT_TYPES, NUMERIC_TYPES, OTHER_TYPES, @@ -2703,3 +2704,26 @@ def test_index_getitem_time_duration(dtype): assert gidx[i] is pidx[i] else: assert_eq(gidx[i], pidx[i]) + + +@pytest.mark.parametrize("dtype", ALL_TYPES) +def test_index_empty_from_pandas(request, dtype): + request.node.add_marker( + pytest.mark.xfail( + condition=not PANDAS_GE_200 + and dtype + in { + "datetime64[ms]", + "datetime64[s]", + "datetime64[us]", + "timedelta64[ms]", + "timedelta64[s]", + "timedelta64[us]", + }, + reason="Fixed in pandas-2.0", + ) + ) + pidx = pd.Index([], dtype=dtype) + gidx = cudf.from_pandas(pidx) + + assert_eq(pidx, gidx) From b4da39cfbe569e290ae42ca9cf8ff868d5788757 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Thu, 7 Sep 2023 14:11:08 -0700 Subject: [PATCH 049/150] Use thread_index_type to avoid out of bounds accesses in conditional joins (#13971) See #10368 (and more recently #13771 Authors: - Vyas Ramasubramani (https://github.com/vyasr) Approvers: - Bradley Dice (https://github.com/bdice) - Yunsong Wang (https://github.com/PointKernel) - David Wendt (https://github.com/davidwendt) URL: https://github.com/rapidsai/cudf/pull/13971 --- cpp/src/join/conditional_join_kernels.cuh | 41 ++++++++++++----------- 1 file changed, 22 insertions(+), 19 deletions(-) diff --git a/cpp/src/join/conditional_join_kernels.cuh b/cpp/src/join/conditional_join_kernels.cuh index dc455ad9cef..f665aba698f 100644 --- a/cpp/src/join/conditional_join_kernels.cuh +++ b/cpp/src/join/conditional_join_kernels.cuh @@ -67,23 +67,25 @@ __global__ void compute_conditional_join_output_size( &intermediate_storage[threadIdx.x * device_expression_data.num_intermediates]; std::size_t thread_counter{0}; - cudf::size_type const start_idx = threadIdx.x + blockIdx.x * block_size; - cudf::size_type const stride = block_size * gridDim.x; - cudf::size_type const left_num_rows = left_table.num_rows(); - cudf::size_type const right_num_rows = right_table.num_rows(); - auto const outer_num_rows = (swap_tables ? right_num_rows : left_num_rows); - auto const inner_num_rows = (swap_tables ? left_num_rows : right_num_rows); + auto const start_idx = cudf::detail::grid_1d::global_thread_id(); + auto const stride = cudf::detail::grid_1d::grid_stride(); + + cudf::thread_index_type const left_num_rows = left_table.num_rows(); + cudf::thread_index_type const right_num_rows = right_table.num_rows(); + auto const outer_num_rows = (swap_tables ? right_num_rows : left_num_rows); + auto const inner_num_rows = (swap_tables ? left_num_rows : right_num_rows); auto evaluator = cudf::ast::detail::expression_evaluator( left_table, right_table, device_expression_data); - for (cudf::size_type outer_row_index = start_idx; outer_row_index < outer_num_rows; + for (cudf::thread_index_type outer_row_index = start_idx; outer_row_index < outer_num_rows; outer_row_index += stride) { bool found_match = false; - for (cudf::size_type inner_row_index = 0; inner_row_index < inner_num_rows; inner_row_index++) { - auto output_dest = cudf::ast::detail::value_expression_result(); - auto const left_row_index = swap_tables ? inner_row_index : outer_row_index; - auto const right_row_index = swap_tables ? outer_row_index : inner_row_index; + for (cudf::thread_index_type inner_row_index = 0; inner_row_index < inner_num_rows; + ++inner_row_index) { + auto output_dest = cudf::ast::detail::value_expression_result(); + cudf::size_type const left_row_index = swap_tables ? inner_row_index : outer_row_index; + cudf::size_type const right_row_index = swap_tables ? outer_row_index : inner_row_index; evaluator.evaluate( output_dest, left_row_index, right_row_index, 0, thread_intermediate_storage); if (output_dest.is_valid() && output_dest.value()) { @@ -161,18 +163,18 @@ __global__ void conditional_join(table_device_view left_table, auto thread_intermediate_storage = &intermediate_storage[threadIdx.x * device_expression_data.num_intermediates]; - int const warp_id = threadIdx.x / detail::warp_size; - int const lane_id = threadIdx.x % detail::warp_size; - cudf::size_type const left_num_rows = left_table.num_rows(); - cudf::size_type const right_num_rows = right_table.num_rows(); - auto const outer_num_rows = (swap_tables ? right_num_rows : left_num_rows); - auto const inner_num_rows = (swap_tables ? left_num_rows : right_num_rows); + int const warp_id = threadIdx.x / detail::warp_size; + int const lane_id = threadIdx.x % detail::warp_size; + cudf::thread_index_type const left_num_rows = left_table.num_rows(); + cudf::thread_index_type const right_num_rows = right_table.num_rows(); + cudf::thread_index_type const outer_num_rows = (swap_tables ? right_num_rows : left_num_rows); + cudf::thread_index_type const inner_num_rows = (swap_tables ? left_num_rows : right_num_rows); if (0 == lane_id) { current_idx_shared[warp_id] = 0; } __syncwarp(); - cudf::size_type outer_row_index = threadIdx.x + blockIdx.x * block_size; + auto outer_row_index = cudf::detail::grid_1d::global_thread_id(); unsigned int const activemask = __ballot_sync(0xffff'ffffu, outer_row_index < outer_num_rows); @@ -181,7 +183,8 @@ __global__ void conditional_join(table_device_view left_table, if (outer_row_index < outer_num_rows) { bool found_match = false; - for (size_type inner_row_index(0); inner_row_index < inner_num_rows; ++inner_row_index) { + for (thread_index_type inner_row_index(0); inner_row_index < inner_num_rows; + ++inner_row_index) { auto output_dest = cudf::ast::detail::value_expression_result(); auto const left_row_index = swap_tables ? inner_row_index : outer_row_index; auto const right_row_index = swap_tables ? outer_row_index : inner_row_index; From b2ab2566c155b4b753b14e5b5c013653b701148d Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Fri, 8 Sep 2023 02:22:00 -0700 Subject: [PATCH 050/150] Update doxygen to 1.9.1 (#14059) I selected this version as it is what ships with Ubuntu 22.04. I also ran `doxygen -u` to update the Doxyfile. Authors: - Vyas Ramasubramani (https://github.com/vyasr) Approvers: - David Wendt (https://github.com/davidwendt) - Divye Gala (https://github.com/divyegala) - Ray Douglass (https://github.com/raydouglass) URL: https://github.com/rapidsai/cudf/pull/14059 --- ci/checks/doxygen.sh | 8 +- .../all_cuda-118_arch-x86_64.yaml | 2 +- .../all_cuda-120_arch-x86_64.yaml | 2 +- cpp/doxygen/Doxyfile | 164 ++++++++++++++---- dependencies.yaml | 4 +- 5 files changed, 134 insertions(+), 46 deletions(-) diff --git a/ci/checks/doxygen.sh b/ci/checks/doxygen.sh index f260fbcd1a4..d932fa097e9 100755 --- a/ci/checks/doxygen.sh +++ b/ci/checks/doxygen.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2022, NVIDIA CORPORATION. +# Copyright (c) 2022-2023, NVIDIA CORPORATION. ############################### # cuDF doxygen warnings check # ############################### @@ -13,11 +13,11 @@ fi # Utility to return version as number for comparison function version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; } -# doxygen supported version 1.8.20 to 1.9.1 +# doxygen supported version 1.9.1 DOXYGEN_VERSION=`doxygen --version` -if [ $(version "$DOXYGEN_VERSION") -lt $(version "1.8.20") ] || [ $(version $DOXYGEN_VERSION) -gt $(version "1.9.1") ]; then +if [ ! $(version "$DOXYGEN_VERSION") -eq $(version "1.9.1") ] ; then echo -e "warning: Unsupported doxygen version $DOXYGEN_VERSION" - echo -e "Expecting doxygen version from 1.8.20 to 1.9.1" + echo -e "Expecting doxygen version 1.9.1" exit 0 fi diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index 8965a43b8ac..692ba78f317 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -29,7 +29,7 @@ dependencies: - dask>=2023.7.1 - distributed>=2023.7.1 - dlpack>=0.5,<0.6.0a0 -- doxygen=1.8.20 +- doxygen=1.9.1 - fastavro>=0.22.9 - fmt>=9.1.0,<10 - fsspec>=0.6.0 diff --git a/conda/environments/all_cuda-120_arch-x86_64.yaml b/conda/environments/all_cuda-120_arch-x86_64.yaml index 4542eb79267..cf1bf4b8733 100644 --- a/conda/environments/all_cuda-120_arch-x86_64.yaml +++ b/conda/environments/all_cuda-120_arch-x86_64.yaml @@ -30,7 +30,7 @@ dependencies: - dask>=2023.7.1 - distributed>=2023.7.1 - dlpack>=0.5,<0.6.0a0 -- doxygen=1.8.20 +- doxygen=1.9.1 - fastavro>=0.22.9 - fmt>=9.1.0,<10 - fsspec>=0.6.0 diff --git a/cpp/doxygen/Doxyfile b/cpp/doxygen/Doxyfile index 357daed243b..b072d252881 100644 --- a/cpp/doxygen/Doxyfile +++ b/cpp/doxygen/Doxyfile @@ -1,4 +1,4 @@ -# Doxyfile 1.8.20 +# Doxyfile 1.9.1 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. @@ -32,7 +32,7 @@ DOXYFILE_ENCODING = UTF-8 # title of most generated pages and in a few other places. # The default value is: My Project. -PROJECT_NAME = "libcudf" +PROJECT_NAME = libcudf # The PROJECT_NUMBER tag can be used to enter a project or revision number. This # could be handy for archiving the generated documentation or if some version @@ -93,6 +93,14 @@ ALLOW_UNICODE_NAMES = NO OUTPUT_LANGUAGE = English +# The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all generated output in the proper direction. +# Possible values are: None, LTR, RTL and Context. +# The default value is: None. + +OUTPUT_TEXT_DIRECTION = None + # If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member # descriptions after the members that are listed in the file and class # documentation (similar to Javadoc). Set to NO to disable this. @@ -305,7 +313,10 @@ OPTIMIZE_OUTPUT_SLICE = NO # Note: For files without extension you can use no_extension as a placeholder. # # Note that for custom extensions you also need to set FILE_PATTERNS otherwise -# the files are not read by doxygen. +# the files are not read by doxygen. When specifying no_extension you should add +# * to the FILE_PATTERNS. +# +# Note see also the list of default file extension mappings. EXTENSION_MAPPING = cu=C++ \ cuh=C++ @@ -516,6 +527,13 @@ EXTRACT_LOCAL_METHODS = NO EXTRACT_ANON_NSPACES = NO +# If this flag is set to YES, the name of an unnamed parameter in a declaration +# will be determined by the corresponding definition. By default unnamed +# parameters remain unnamed in the output. +# The default value is: YES. + +RESOLVE_UNNAMED_PARAMS = YES + # If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all # undocumented members inside documented classes or files. If set to NO these # members will be included in the various overviews, but no documentation @@ -553,11 +571,18 @@ HIDE_IN_BODY_DOCS = NO INTERNAL_DOCS = NO -# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file -# names in lower-case letters. If set to YES, upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# (including Cygwin) and Mac users are advised to set this option to NO. +# With the correct setting of option CASE_SENSE_NAMES doxygen will better be +# able to match the capabilities of the underlying filesystem. In case the +# filesystem is case sensitive (i.e. it supports files in the same directory +# whose names only differ in casing), the option must be set to YES to properly +# deal with such files in case they appear in the input. For filesystems that +# are not case sensitive the option should be be set to NO to properly deal with +# output files written for symbols that only differ in casing, such as for two +# classes, one named CLASS and the other named Class, and to also support +# references to files without having to specify the exact matching casing. On +# Windows (including Cygwin) and MacOS, users should typically set this option +# to NO, whereas on Linux or other Unix flavors it should typically be set to +# YES. # The default value is: system dependent. CASE_SENSE_NAMES = YES @@ -796,7 +821,10 @@ WARN_IF_DOC_ERROR = YES WARN_NO_PARAMDOC = YES # If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when -# a warning is encountered. +# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS +# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but +# at the end of the doxygen process doxygen will return with a non-zero status. +# Possible values are: NO, YES and FAIL_ON_WARNINGS. # The default value is: NO. WARN_AS_ERROR = NO @@ -846,8 +874,8 @@ INPUT = main_page.md \ # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses # libiconv (or the iconv built into libc) for the transcoding. See the libiconv -# documentation (see: https://www.gnu.org/software/libiconv/) for the list of -# possible encodings. +# documentation (see: +# https://www.gnu.org/software/libiconv/) for the list of possible encodings. # The default value is: UTF-8. INPUT_ENCODING = UTF-8 @@ -860,13 +888,15 @@ INPUT_ENCODING = UTF-8 # need to set EXTENSION_MAPPING for the extension otherwise the files are not # read by doxygen. # +# Note the list of default checked file patterns might differ from the list of +# default file extension mappings. +# # If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, # *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, # *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, # *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment), -# *.doc (to be provided as doxygen C comment), *.txt (to be provided as doxygen -# C comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, -# *.vhdl, *.ucf, *.qsf and *.ice. +# *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, *.vhdl, +# *.ucf, *.qsf and *.ice. FILE_PATTERNS = *.cpp \ *.hpp \ @@ -1270,10 +1300,11 @@ HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files will be # generated that can be used as input for Apple's Xcode 3 integrated development -# environment (see: https://developer.apple.com/xcode/), introduced with OSX -# 10.5 (Leopard). To create a documentation set, doxygen will generate a -# Makefile in the HTML output directory. Running make will produce the docset in -# that directory and running make install will install the docset in +# environment (see: +# https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To +# create a documentation set, doxygen will generate a Makefile in the HTML +# output directory. Running make will produce the docset in that directory and +# running make install will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at # startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy # genXcode/_index.html for more information. @@ -1315,8 +1346,8 @@ DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three # additional HTML index files: index.hhp, index.hhc, and index.hhk. The # index.hhp is a project file that can be read by Microsoft's HTML Help Workshop -# (see: https://www.microsoft.com/en-us/download/details.aspx?id=21138) on -# Windows. +# (see: +# https://www.microsoft.com/en-us/download/details.aspx?id=21138) on Windows. # # The HTML Help Workshop contains a compiler that can convert all HTML output # generated by doxygen into a single compiled HTML file (.chm). Compiled HTML @@ -1391,7 +1422,8 @@ QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help # Project output. For more information please see Qt Help Project / Namespace -# (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). +# (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_QHP is set to YES. @@ -1399,8 +1431,8 @@ QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt # Help Project output. For more information please see Qt Help Project / Virtual -# Folders (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual- -# folders). +# Folders (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders). # The default value is: doc. # This tag requires that the tag GENERATE_QHP is set to YES. @@ -1408,16 +1440,16 @@ QHP_VIRTUAL_FOLDER = doc # If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom # filter to add. For more information please see Qt Help Project / Custom -# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom- -# filters). +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_NAME = # The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see Qt Help Project / Custom -# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom- -# filters). +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_ATTRS = @@ -1429,9 +1461,9 @@ QHP_CUST_FILTER_ATTRS = QHP_SECT_FILTER_ATTRS = -# The QHG_LOCATION tag can be used to specify the location of Qt's -# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the -# generated .qhp file. +# The QHG_LOCATION tag can be used to specify the location (absolute path +# including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to +# run qhelpgenerator on the generated .qhp file. # This tag requires that the tag GENERATE_QHP is set to YES. QHG_LOCATION = @@ -1558,7 +1590,7 @@ USE_MATHJAX = NO # When MathJax is enabled you can set the default output format to be used for # the MathJax output. See the MathJax site (see: -# http://docs.mathjax.org/en/latest/output.html) for more details. +# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. # Possible values are: HTML-CSS (which is slower, but has the best # compatibility), NativeMML (i.e. MathML) and SVG. # The default value is: HTML-CSS. @@ -1588,7 +1620,8 @@ MATHJAX_EXTENSIONS = # The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces # of code that will be used on startup of the MathJax code. See the MathJax site -# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an +# (see: +# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an # example see the documentation. # This tag requires that the tag USE_MATHJAX is set to YES. @@ -1635,7 +1668,8 @@ SERVER_BASED_SEARCH = NO # # Doxygen ships with an example indexer (doxyindexer) and search engine # (doxysearch.cgi) which are based on the open source search engine library -# Xapian (see: https://xapian.org/). +# Xapian (see: +# https://xapian.org/). # # See the section "External Indexing and Searching" for details. # The default value is: NO. @@ -1648,8 +1682,9 @@ EXTERNAL_SEARCH = NO # # Doxygen ships with an example indexer (doxyindexer) and search engine # (doxysearch.cgi) which are based on the open source search engine library -# Xapian (see: https://xapian.org/). See the section "External Indexing and -# Searching" for details. +# Xapian (see: +# https://xapian.org/). See the section "External Indexing and Searching" for +# details. # This tag requires that the tag SEARCHENGINE is set to YES. SEARCHENGINE_URL = @@ -1839,6 +1874,16 @@ LATEX_BATCHMODE = NO LATEX_HIDE_INDICES = NO +# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source +# code with syntax highlighting in the LaTeX output. +# +# Note that which sources are shown also depends on other settings such as +# SOURCE_BROWSER. +# The default value is: NO. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_SOURCE_CODE = NO + # The LATEX_BIB_STYLE tag can be used to specify the style to use for the # bibliography, e.g. plainnat, or ieeetr. See # https://en.wikipedia.org/wiki/BibTeX and \cite for more info. @@ -1919,6 +1964,16 @@ RTF_STYLESHEET_FILE = RTF_EXTENSIONS_FILE = +# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code +# with syntax highlighting in the RTF output. +# +# Note that which sources are shown also depends on other settings such as +# SOURCE_BROWSER. +# The default value is: NO. +# This tag requires that the tag GENERATE_RTF is set to YES. + +RTF_SOURCE_CODE = NO + #--------------------------------------------------------------------------- # Configuration options related to the man page output #--------------------------------------------------------------------------- @@ -2015,6 +2070,15 @@ GENERATE_DOCBOOK = NO DOCBOOK_OUTPUT = docbook +# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the +# program listings (including syntax highlighting and cross-referencing +# information) to the DOCBOOK output. Note that enabling this will significantly +# increase the size of the DOCBOOK output. +# The default value is: NO. +# This tag requires that the tag GENERATE_DOCBOOK is set to YES. + +DOCBOOK_PROGRAMLISTING = NO + #--------------------------------------------------------------------------- # Configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- @@ -2301,10 +2365,32 @@ UML_LOOK = NO # but if the number exceeds 15, the total amount of fields shown is limited to # 10. # Minimum value: 0, maximum value: 100, default value: 10. -# This tag requires that the tag HAVE_DOT is set to YES. +# This tag requires that the tag UML_LOOK is set to YES. UML_LIMIT_NUM_FIELDS = 10 +# If the DOT_UML_DETAILS tag is set to NO, doxygen will show attributes and +# methods without types and arguments in the UML graphs. If the DOT_UML_DETAILS +# tag is set to YES, doxygen will add type and arguments for attributes and +# methods in the UML graphs. If the DOT_UML_DETAILS tag is set to NONE, doxygen +# will not generate fields with class member information in the UML graphs. The +# class diagrams will look similar to the default class diagrams but using UML +# notation for the relationships. +# Possible values are: NO, YES and NONE. +# The default value is: NO. +# This tag requires that the tag UML_LOOK is set to YES. + +DOT_UML_DETAILS = NO + +# The DOT_WRAP_THRESHOLD tag can be used to set the maximum number of characters +# to display on a single line. If the actual line length exceeds this threshold +# significantly it will wrapped across multiple lines. Some heuristics are apply +# to avoid ugly line breaks. +# Minimum value: 0, maximum value: 1000, default value: 17. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_WRAP_THRESHOLD = 17 + # If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and # collaboration graphs will show the relations between templates and their # instances. @@ -2494,9 +2580,11 @@ DOT_MULTI_TARGETS = NO GENERATE_LEGEND = YES -# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot +# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate # files that are used to generate the various graphs. +# +# Note: This setting is not only used for dot files but also for msc and +# plantuml temporary files. # The default value is: YES. -# This tag requires that the tag HAVE_DOT is set to YES. DOT_CLEANUP = YES diff --git a/dependencies.yaml b/dependencies.yaml index 97f86c6b864..f99b7404854 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -375,13 +375,13 @@ dependencies: - identify>=2.5.20 - output_types: conda packages: - - doxygen=1.8.20 # pre-commit hook needs a specific version. + - &doxygen doxygen=1.9.1 # pre-commit hook needs a specific version. docs: common: - output_types: [conda] packages: - dask-cuda==23.10.* - - doxygen=1.8.20 + - *doxygen - make - myst-nb - nbsphinx From e43809ea9f9ba2015ebab3eb4d2b9ca7dfa72849 Mon Sep 17 00:00:00 2001 From: Lawrence Mitchell Date: Fri, 8 Sep 2023 17:47:41 +0100 Subject: [PATCH 051/150] Use `conda mambabuild` rather than `mamba mambabuild` (#14067) Since Conda 23.7.3, the plugin mechanism changed, and mambabuild broke. Since, with `boa` installed, `conda mambabuild` uses the `libmamba` solver, switch to that. The general handling of subcommands with `mamba` was partially fixed in mamba-org/mamba#2732, but `mamba build` does not currently work due to mamba-org/mamba#2821. - Closes #14068 Authors: - Lawrence Mitchell (https://github.com/wence-) Approvers: - Ray Douglass (https://github.com/raydouglass) URL: https://github.com/rapidsai/cudf/pull/14067 --- ci/build_cpp.sh | 3 ++- ci/build_python.sh | 9 +++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/ci/build_cpp.sh b/ci/build_cpp.sh index 3bd18a88139..8b757fecf5a 100755 --- a/ci/build_cpp.sh +++ b/ci/build_cpp.sh @@ -11,7 +11,8 @@ rapids-print-env rapids-logger "Begin cpp build" -rapids-mamba-retry mambabuild \ +# With boa installed conda build forward to boa +rapids-conda-retry mambabuild \ conda/recipes/libcudf rapids-upload-conda-to-s3 cpp diff --git a/ci/build_python.sh b/ci/build_python.sh index ec34d63b282..61f160b25f5 100755 --- a/ci/build_python.sh +++ b/ci/build_python.sh @@ -15,24 +15,25 @@ CPP_CHANNEL=$(rapids-download-conda-from-s3 cpp) # TODO: Remove `--no-test` flag once importing on a CPU # node works correctly -rapids-mamba-retry mambabuild \ +# With boa installed conda build forwards to the boa builder +rapids-conda-retry mambabuild \ --no-test \ --channel "${CPP_CHANNEL}" \ conda/recipes/cudf -rapids-mamba-retry mambabuild \ +rapids-conda-retry mambabuild \ --no-test \ --channel "${CPP_CHANNEL}" \ --channel "${RAPIDS_CONDA_BLD_OUTPUT_DIR}" \ conda/recipes/dask-cudf -rapids-mamba-retry mambabuild \ +rapids-conda-retry mambabuild \ --no-test \ --channel "${CPP_CHANNEL}" \ --channel "${RAPIDS_CONDA_BLD_OUTPUT_DIR}" \ conda/recipes/cudf_kafka -rapids-mamba-retry mambabuild \ +rapids-conda-retry mambabuild \ --no-test \ --channel "${CPP_CHANNEL}" \ --channel "${RAPIDS_CONDA_BLD_OUTPUT_DIR}" \ From 01730c46a4f403fd5cf9245512c941176eef2428 Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Fri, 8 Sep 2023 13:36:48 -0500 Subject: [PATCH 052/150] Fix `Index.difference` to match with pandas (#14053) This PR fixes `Index.difference` in following ways: - [x] Fixes `name` preservation by correctly evaluating the name of two input objects, closes #14019 - [x] Fixes `is_mixed_with_object_dtype` handling that will resolve incorrect results for `CategoricalIndex`, closes #14022 - [x] Raises errors for invalid input types, the error messages are an exact match to pandas error messages for parity. - [x] Introduce a `Range._try_reconstruct_range_index` that will try to re-construct a `RangeIndex` out of an `Int..Index` to save memory- this is on parity with pandas. closes #14013 Authors: - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Lawrence Mitchell (https://github.com/wence-) URL: https://github.com/rapidsai/cudf/pull/14053 --- python/cudf/cudf/core/_base_index.py | 12 ++++++++++-- python/cudf/cudf/core/index.py | 22 ++++++++++++++++++++++ python/cudf/cudf/tests/test_index.py | 21 +++++++++++++++++++++ python/cudf/cudf/utils/dtypes.py | 5 +++++ 4 files changed, 58 insertions(+), 2 deletions(-) diff --git a/python/cudf/cudf/core/_base_index.py b/python/cudf/cudf/core/_base_index.py index 829ca33d8a5..8091f3f7dd2 100644 --- a/python/cudf/cudf/core/_base_index.py +++ b/python/cudf/cudf/core/_base_index.py @@ -30,7 +30,7 @@ from cudf.core.column import ColumnBase, column from cudf.core.column_accessor import ColumnAccessor from cudf.utils import ioutils -from cudf.utils.dtypes import is_mixed_with_object_dtype +from cudf.utils.dtypes import can_convert_to_column, is_mixed_with_object_dtype from cudf.utils.utils import _is_same_name @@ -935,13 +935,21 @@ def difference(self, other, sort=None): >>> idx1.difference(idx2, sort=False) Int64Index([2, 1], dtype='int64') """ + if not can_convert_to_column(other): + raise TypeError("Input must be Index or array-like") + if sort not in {None, False}: raise ValueError( f"The 'sort' keyword only takes the values " f"of None or False; {sort} was passed." ) - other = cudf.Index(other) + other = cudf.Index(other, name=getattr(other, "name", self.name)) + + if not len(other): + return self._get_reconciled_name_object(other) + elif self.equals(other): + return self[:0]._get_reconciled_name_object(other) res_name = _get_result_name(self.name, other.name) diff --git a/python/cudf/cudf/core/index.py b/python/cudf/cudf/core/index.py index c7e25cdc430..4bb5428838f 100644 --- a/python/cudf/cudf/core/index.py +++ b/python/cudf/cudf/core/index.py @@ -724,6 +724,28 @@ def _intersection(self, other, sort=False): return new_index + @_cudf_nvtx_annotate + def difference(self, other, sort=None): + if isinstance(other, RangeIndex) and self.equals(other): + return self[:0]._get_reconciled_name_object(other) + + return self._try_reconstruct_range_index( + super().difference(other, sort=sort) + ) + + def _try_reconstruct_range_index(self, index): + if isinstance(index, RangeIndex) or index.dtype.kind == "f": + return index + # Evenly spaced values can return a + # RangeIndex instead of a materialized Index. + if not index._column.has_nulls(): + uniques = cupy.unique(cupy.diff(index.values)) + if len(uniques) == 1 and uniques[0].get() != 0: + diff = uniques[0].get() + new_range = range(index[0], index[-1] + diff, diff) + return type(self)(new_range, name=index.name) + return index + def sort_values( self, return_indexer=False, diff --git a/python/cudf/cudf/tests/test_index.py b/python/cudf/cudf/tests/test_index.py index 506edd5b3f3..58dbc48e31e 100644 --- a/python/cudf/cudf/tests/test_index.py +++ b/python/cudf/cudf/tests/test_index.py @@ -789,6 +789,10 @@ def test_index_to_series(data): ["5", "6", "2", "a", "b", "c"], [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [1.0, 5.0, 6.0, 0.0, 1.3], + ["ab", "cd", "ef"], + pd.Series(["1", "2", "a", "3", None], dtype="category"), + range(0, 10), + [], ], ) @pytest.mark.parametrize( @@ -799,8 +803,11 @@ def test_index_to_series(data): [10, 20, 30, 40, 50, 60], ["1", "2", "3", "4", "5", "6"], ["5", "6", "2", "a", "b", "c"], + ["ab", "ef", None], [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [1.0, 5.0, 6.0, 0.0, 1.3], + range(2, 4), + pd.Series(["1", "a", "3", None], dtype="category"), [], ], ) @@ -818,9 +825,23 @@ def test_index_difference(data, other, sort, name_data, name_other): expected = pd_data.difference(pd_other, sort=sort) actual = gd_data.difference(gd_other, sort=sort) + assert_eq(expected, actual) +@pytest.mark.parametrize("other", ["a", 1, None]) +def test_index_difference_invalid_inputs(other): + pdi = pd.Index([1, 2, 3]) + gdi = cudf.Index([1, 2, 3]) + + assert_exceptions_equal( + pdi.difference, + gdi.difference, + ([other], {}), + ([other], {}), + ) + + def test_index_difference_sort_error(): pdi = pd.Index([1, 2, 3]) gdi = cudf.Index([1, 2, 3]) diff --git a/python/cudf/cudf/utils/dtypes.py b/python/cudf/cudf/utils/dtypes.py index ea96a0859ce..e50457b8e7b 100644 --- a/python/cudf/cudf/utils/dtypes.py +++ b/python/cudf/cudf/utils/dtypes.py @@ -426,6 +426,11 @@ def get_min_float_dtype(col): def is_mixed_with_object_dtype(lhs, rhs): + if cudf.api.types.is_categorical_dtype(lhs.dtype): + return is_mixed_with_object_dtype(lhs.dtype.categories, rhs) + elif cudf.api.types.is_categorical_dtype(rhs.dtype): + return is_mixed_with_object_dtype(lhs, rhs.dtype.categories) + return (lhs.dtype == "object" and rhs.dtype != "object") or ( rhs.dtype == "object" and lhs.dtype != "object" ) From 36ee11a719645feec6d5bcf089ac3a3ac20cb621 Mon Sep 17 00:00:00 2001 From: AJ Schmidt Date: Fri, 8 Sep 2023 17:37:13 -0400 Subject: [PATCH 053/150] Remove header tests (#14072) From some internal Slack discussions, it was determined that the `headers_test.sh` file is no longer necessary. This PR removes it and its associated checks. Authors: - AJ Schmidt (https://github.com/ajschmidt8) Approvers: - Ray Douglass (https://github.com/raydouglass) URL: https://github.com/rapidsai/cudf/pull/14072 --- .pre-commit-config.yaml | 12 -- ci/checks/headers_test.sh | 25 ---- conda/recipes/libcudf/meta.yaml | 242 -------------------------------- 3 files changed, 279 deletions(-) delete mode 100755 ci/checks/headers_test.sh diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b5165cf026f..238e5b44030 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -128,18 +128,6 @@ repos: language: system pass_filenames: false verbose: true - - id: headers-recipe-check - name: headers-recipe-check - entry: ./ci/checks/headers_test.sh - files: | - (?x)^( - ^cpp/include/| - ^conda/.*/meta.yaml - ) - types_or: [file] - language: system - pass_filenames: false - verbose: false - repo: https://github.com/codespell-project/codespell rev: v2.2.2 hooks: diff --git a/ci/checks/headers_test.sh b/ci/checks/headers_test.sh deleted file mode 100755 index b859009a8c5..00000000000 --- a/ci/checks/headers_test.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash -# Copyright (c) 2020-2022, NVIDIA CORPORATION. -##################################### -# conda existence test for headers # -##################################### - -RETVAL=0 -LIBNAME=cudf -DIRNAMES="cudf cudf_test" - -# existence tests for lib${LIBNAME} -for DIRNAME in ${DIRNAMES[@]}; do - HEADERS=`cd cpp && find include/${DIRNAME} -type f \( -iname "*.h" -o -iname "*.hpp" \) -print | sed 's|^| - test -f $PREFIX/|' | sort` - META_TESTS=`grep -E "test -f .*/include/${DIRNAME}/.*\.h(pp)?" conda/recipes/lib${LIBNAME}/meta.yaml | sort` - HEADER_DIFF=`diff <(echo "$HEADERS") <(echo "$META_TESTS")` - LIB_RETVAL=$? - - if [ "$LIB_RETVAL" != "0" ]; then - echo -e ">>>> FAILED: lib${LIBNAME} has different headers in include/${DIRNAME}/ and conda/recipes/lib${LIBNAME}/meta.yaml. The diff is shown below:" - echo -e "$HEADER_DIFF" - RETVAL=1 - fi -done - -exit $RETVAL diff --git a/conda/recipes/libcudf/meta.yaml b/conda/recipes/libcudf/meta.yaml index c844131ad31..627065817ba 100644 --- a/conda/recipes/libcudf/meta.yaml +++ b/conda/recipes/libcudf/meta.yaml @@ -110,249 +110,7 @@ outputs: test: commands: - test -f $PREFIX/lib/libcudf.so - - test -f $PREFIX/lib/libcudftestutil.a - - test -f $PREFIX/lib/libcudf_identify_stream_usage_mode_cudf.so - - test -f $PREFIX/lib/libcudf_identify_stream_usage_mode_testing.so - - test -f $PREFIX/include/cudf/aggregation.hpp - - test -f $PREFIX/include/cudf/ast/detail/expression_parser.hpp - - test -f $PREFIX/include/cudf/ast/detail/expression_transformer.hpp - - test -f $PREFIX/include/cudf/ast/detail/operators.hpp - - test -f $PREFIX/include/cudf/ast/expressions.hpp - - test -f $PREFIX/include/cudf/binaryop.hpp - test -f $PREFIX/include/cudf/column/column.hpp - - test -f $PREFIX/include/cudf/column/column_factories.hpp - - test -f $PREFIX/include/cudf/column/column_view.hpp - - test -f $PREFIX/include/cudf/concatenate.hpp - - test -f $PREFIX/include/cudf/contiguous_split.hpp - - test -f $PREFIX/include/cudf/copying.hpp - - test -f $PREFIX/include/cudf/datetime.hpp - - test -f $PREFIX/include/cudf/timezone.hpp - - test -f $PREFIX/include/cudf/detail/aggregation/aggregation.hpp - - test -f $PREFIX/include/cudf/detail/aggregation/result_cache.hpp - - test -f $PREFIX/include/cudf/detail/binaryop.hpp - - test -f $PREFIX/include/cudf/detail/calendrical_month_sequence.cuh - - test -f $PREFIX/include/cudf/detail/concatenate.hpp - - test -f $PREFIX/include/cudf/detail/concatenate_masks.hpp - - test -f $PREFIX/include/cudf/detail/contiguous_split.hpp - - test -f $PREFIX/include/cudf/detail/copy.hpp - - test -f $PREFIX/include/cudf/detail/datetime.hpp - - test -f $PREFIX/include/cudf/detail/fill.hpp - - test -f $PREFIX/include/cudf/detail/gather.hpp - - test -f $PREFIX/include/cudf/detail/groupby.hpp - - test -f $PREFIX/include/cudf/detail/groupby/group_replace_nulls.hpp - - test -f $PREFIX/include/cudf/detail/groupby/sort_helper.hpp - - test -f $PREFIX/include/cudf/detail/interop.hpp - - test -f $PREFIX/include/cudf/detail/is_element_valid.hpp - - test -f $PREFIX/include/cudf/detail/join.hpp - - test -f $PREFIX/include/cudf/detail/label_bins.hpp - - test -f $PREFIX/include/cudf/detail/null_mask.hpp - - test -f $PREFIX/include/cudf/detail/nvtx/nvtx3.hpp - - test -f $PREFIX/include/cudf/detail/nvtx/ranges.hpp - - test -f $PREFIX/include/cudf/detail/quantiles.hpp - - test -f $PREFIX/include/cudf/detail/repeat.hpp - - test -f $PREFIX/include/cudf/detail/replace.hpp - - test -f $PREFIX/include/cudf/detail/reshape.hpp - - test -f $PREFIX/include/cudf/detail/rolling.hpp - - test -f $PREFIX/include/cudf/detail/round.hpp - - test -f $PREFIX/include/cudf/detail/scan.hpp - - test -f $PREFIX/include/cudf/detail/scatter.hpp - - test -f $PREFIX/include/cudf/detail/search.hpp - - test -f $PREFIX/include/cudf/detail/sequence.hpp - - test -f $PREFIX/include/cudf/detail/sorting.hpp - - test -f $PREFIX/include/cudf/detail/stream_compaction.hpp - - test -f $PREFIX/include/cudf/detail/structs/utilities.hpp - - test -f $PREFIX/include/cudf/detail/tdigest/tdigest.hpp - - test -f $PREFIX/include/cudf/detail/timezone.cuh - - test -f $PREFIX/include/cudf/detail/timezone.hpp - - test -f $PREFIX/include/cudf/detail/transform.hpp - - test -f $PREFIX/include/cudf/detail/transpose.hpp - - test -f $PREFIX/include/cudf/detail/unary.hpp - - test -f $PREFIX/include/cudf/detail/utilities/alignment.hpp - - test -f $PREFIX/include/cudf/detail/utilities/default_stream.hpp - - test -f $PREFIX/include/cudf/detail/utilities/int_fastdiv.h - - test -f $PREFIX/include/cudf/detail/utilities/integer_utils.hpp - - test -f $PREFIX/include/cudf/detail/utilities/linked_column.hpp - - test -f $PREFIX/include/cudf/detail/utilities/logger.hpp - - test -f $PREFIX/include/cudf/detail/utilities/pinned_host_vector.hpp - - test -f $PREFIX/include/cudf/detail/utilities/stacktrace.hpp - - test -f $PREFIX/include/cudf/detail/utilities/vector_factories.hpp - - test -f $PREFIX/include/cudf/detail/utilities/visitor_overload.hpp - - test -f $PREFIX/include/cudf/dictionary/detail/concatenate.hpp - - test -f $PREFIX/include/cudf/dictionary/detail/encode.hpp - - test -f $PREFIX/include/cudf/dictionary/detail/merge.hpp - - test -f $PREFIX/include/cudf/dictionary/detail/replace.hpp - - test -f $PREFIX/include/cudf/dictionary/detail/search.hpp - - test -f $PREFIX/include/cudf/dictionary/detail/update_keys.hpp - - test -f $PREFIX/include/cudf/dictionary/dictionary_column_view.hpp - - test -f $PREFIX/include/cudf/dictionary/dictionary_factories.hpp - - test -f $PREFIX/include/cudf/dictionary/encode.hpp - - test -f $PREFIX/include/cudf/dictionary/search.hpp - - test -f $PREFIX/include/cudf/dictionary/update_keys.hpp - - test -f $PREFIX/include/cudf/filling.hpp - - test -f $PREFIX/include/cudf/fixed_point/fixed_point.hpp - - test -f $PREFIX/include/cudf/fixed_point/temporary.hpp - - test -f $PREFIX/include/cudf/groupby.hpp - - test -f $PREFIX/include/cudf/hashing.hpp - - test -f $PREFIX/include/cudf/hashing/detail/hashing.hpp - - test -f $PREFIX/include/cudf/interop.hpp - - test -f $PREFIX/include/cudf/io/arrow_io_source.hpp - - test -f $PREFIX/include/cudf/io/avro.hpp - - test -f $PREFIX/include/cudf/io/csv.hpp - - test -f $PREFIX/include/cudf/io/data_sink.hpp - - test -f $PREFIX/include/cudf/io/datasource.hpp - - test -f $PREFIX/include/cudf/io/detail/avro.hpp - - test -f $PREFIX/include/cudf/io/detail/csv.hpp - - test -f $PREFIX/include/cudf/io/detail/json.hpp - - test -f $PREFIX/include/cudf/io/detail/tokenize_json.hpp - - test -f $PREFIX/include/cudf/io/detail/orc.hpp - - test -f $PREFIX/include/cudf/io/detail/parquet.hpp - - test -f $PREFIX/include/cudf/io/detail/utils.hpp - - test -f $PREFIX/include/cudf/io/json.hpp - - test -f $PREFIX/include/cudf/io/orc.hpp - - test -f $PREFIX/include/cudf/io/orc_metadata.hpp - - test -f $PREFIX/include/cudf/io/orc_types.hpp - - test -f $PREFIX/include/cudf/io/parquet.hpp - - test -f $PREFIX/include/cudf/io/parquet_metadata.hpp - - test -f $PREFIX/include/cudf/io/text/byte_range_info.hpp - - test -f $PREFIX/include/cudf/io/text/data_chunk_source.hpp - - test -f $PREFIX/include/cudf/io/text/data_chunk_source_factories.hpp - - test -f $PREFIX/include/cudf/io/text/detail/bgzip_utils.hpp - - test -f $PREFIX/include/cudf/io/text/detail/multistate.hpp - - test -f $PREFIX/include/cudf/io/text/detail/tile_state.hpp - - test -f $PREFIX/include/cudf/io/text/detail/trie.hpp - - test -f $PREFIX/include/cudf/io/text/multibyte_split.hpp - - test -f $PREFIX/include/cudf/io/types.hpp - - test -f $PREFIX/include/cudf/join.hpp - - test -f $PREFIX/include/cudf/labeling/label_bins.hpp - - test -f $PREFIX/include/cudf/lists/combine.hpp - - test -f $PREFIX/include/cudf/lists/contains.hpp - - test -f $PREFIX/include/cudf/lists/count_elements.hpp - - test -f $PREFIX/include/cudf/lists/detail/combine.hpp - - test -f $PREFIX/include/cudf/lists/detail/concatenate.hpp - - test -f $PREFIX/include/cudf/lists/detail/contains.hpp - - test -f $PREFIX/include/cudf/lists/detail/copying.hpp - - test -f $PREFIX/include/cudf/lists/detail/dremel.hpp - - test -f $PREFIX/include/cudf/lists/detail/extract.hpp - - test -f $PREFIX/include/cudf/lists/detail/interleave_columns.hpp - - test -f $PREFIX/include/cudf/lists/detail/lists_column_factories.hpp - - test -f $PREFIX/include/cudf/lists/detail/reverse.hpp - - test -f $PREFIX/include/cudf/lists/detail/scatter_helper.cuh - - test -f $PREFIX/include/cudf/lists/detail/set_operations.hpp - - test -f $PREFIX/include/cudf/lists/detail/sorting.hpp - - test -f $PREFIX/include/cudf/lists/detail/stream_compaction.hpp - - test -f $PREFIX/include/cudf/lists/explode.hpp - - test -f $PREFIX/include/cudf/lists/extract.hpp - - test -f $PREFIX/include/cudf/lists/filling.hpp - - test -f $PREFIX/include/cudf/lists/gather.hpp - - test -f $PREFIX/include/cudf/lists/list_view.hpp - - test -f $PREFIX/include/cudf/lists/lists_column_view.hpp - - test -f $PREFIX/include/cudf/lists/reverse.hpp - - test -f $PREFIX/include/cudf/lists/set_operations.hpp - - test -f $PREFIX/include/cudf/lists/sorting.hpp - - test -f $PREFIX/include/cudf/lists/stream_compaction.hpp - - test -f $PREFIX/include/cudf/merge.hpp - - test -f $PREFIX/include/cudf/null_mask.hpp - - test -f $PREFIX/include/cudf/partitioning.hpp - - test -f $PREFIX/include/cudf/quantiles.hpp - - test -f $PREFIX/include/cudf/reduction.hpp - - test -f $PREFIX/include/cudf/reduction/detail/reduction.hpp - - test -f $PREFIX/include/cudf/reduction/detail/reduction_functions.hpp - - test -f $PREFIX/include/cudf/reduction/detail/segmented_reduction_functions.hpp - - test -f $PREFIX/include/cudf/replace.hpp - - test -f $PREFIX/include/cudf/reshape.hpp - - test -f $PREFIX/include/cudf/rolling.hpp - - test -f $PREFIX/include/cudf/rolling/range_window_bounds.hpp - - test -f $PREFIX/include/cudf/round.hpp - - test -f $PREFIX/include/cudf/scalar/scalar.hpp - - test -f $PREFIX/include/cudf/scalar/scalar_factories.hpp - - test -f $PREFIX/include/cudf/search.hpp - - test -f $PREFIX/include/cudf/sorting.hpp - - test -f $PREFIX/include/cudf/stream_compaction.hpp - - test -f $PREFIX/include/cudf/strings/attributes.hpp - - test -f $PREFIX/include/cudf/strings/capitalize.hpp - - test -f $PREFIX/include/cudf/strings/case.hpp - - test -f $PREFIX/include/cudf/strings/char_types/char_cases.hpp - - test -f $PREFIX/include/cudf/strings/char_types/char_types.hpp - - test -f $PREFIX/include/cudf/strings/char_types/char_types_enum.hpp - - test -f $PREFIX/include/cudf/strings/combine.hpp - - test -f $PREFIX/include/cudf/strings/contains.hpp - - test -f $PREFIX/include/cudf/strings/convert/convert_booleans.hpp - - test -f $PREFIX/include/cudf/strings/convert/convert_datetime.hpp - - test -f $PREFIX/include/cudf/strings/convert/convert_durations.hpp - - test -f $PREFIX/include/cudf/strings/convert/convert_fixed_point.hpp - - test -f $PREFIX/include/cudf/strings/convert/convert_floats.hpp - - test -f $PREFIX/include/cudf/strings/convert/convert_integers.hpp - - test -f $PREFIX/include/cudf/strings/convert/convert_ipv4.hpp - - test -f $PREFIX/include/cudf/strings/convert/convert_lists.hpp - - test -f $PREFIX/include/cudf/strings/convert/convert_urls.hpp - - test -f $PREFIX/include/cudf/strings/detail/char_tables.hpp - - test -f $PREFIX/include/cudf/strings/detail/combine.hpp - - test -f $PREFIX/include/cudf/strings/detail/concatenate.hpp - - test -f $PREFIX/include/cudf/strings/detail/converters.hpp - - test -f $PREFIX/include/cudf/strings/detail/copying.hpp - - test -f $PREFIX/include/cudf/strings/detail/fill.hpp - - test -f $PREFIX/include/cudf/strings/detail/json.hpp - - test -f $PREFIX/include/cudf/strings/detail/replace.hpp - - test -f $PREFIX/include/cudf/strings/detail/utf8.hpp - - test -f $PREFIX/include/cudf/strings/detail/utilities.hpp - - test -f $PREFIX/include/cudf/strings/extract.hpp - - test -f $PREFIX/include/cudf/strings/find.hpp - - test -f $PREFIX/include/cudf/strings/find_multiple.hpp - - test -f $PREFIX/include/cudf/strings/findall.hpp - - test -f $PREFIX/include/cudf/strings/json.hpp - - test -f $PREFIX/include/cudf/strings/padding.hpp - - test -f $PREFIX/include/cudf/strings/regex/flags.hpp - - test -f $PREFIX/include/cudf/strings/regex/regex_program.hpp - - test -f $PREFIX/include/cudf/strings/repeat_strings.hpp - - test -f $PREFIX/include/cudf/strings/replace.hpp - - test -f $PREFIX/include/cudf/strings/replace_re.hpp - - test -f $PREFIX/include/cudf/strings/reverse.hpp - - test -f $PREFIX/include/cudf/strings/side_type.hpp - - test -f $PREFIX/include/cudf/strings/slice.hpp - - test -f $PREFIX/include/cudf/strings/split/partition.hpp - - test -f $PREFIX/include/cudf/strings/split/split.hpp - - test -f $PREFIX/include/cudf/strings/split/split_re.hpp - - test -f $PREFIX/include/cudf/strings/string_view.hpp - - test -f $PREFIX/include/cudf/strings/strings_column_view.hpp - - test -f $PREFIX/include/cudf/strings/strip.hpp - - test -f $PREFIX/include/cudf/strings/translate.hpp - - test -f $PREFIX/include/cudf/strings/wrap.hpp - - test -f $PREFIX/include/cudf/structs/detail/concatenate.hpp - - test -f $PREFIX/include/cudf/structs/struct_view.hpp - - test -f $PREFIX/include/cudf/structs/structs_column_view.hpp - - test -f $PREFIX/include/cudf/table/table.hpp - - test -f $PREFIX/include/cudf/table/table_view.hpp - - test -f $PREFIX/include/cudf/tdigest/tdigest_column_view.hpp - - test -f $PREFIX/include/cudf/transform.hpp - - test -f $PREFIX/include/cudf/transpose.hpp - - test -f $PREFIX/include/cudf/types.hpp - - test -f $PREFIX/include/cudf/unary.hpp - - test -f $PREFIX/include/cudf/utilities/bit.hpp - - test -f $PREFIX/include/cudf/utilities/default_stream.hpp - - test -f $PREFIX/include/cudf/utilities/error.hpp - - test -f $PREFIX/include/cudf/utilities/logger.hpp - - test -f $PREFIX/include/cudf/utilities/span.hpp - - test -f $PREFIX/include/cudf/utilities/traits.hpp - - test -f $PREFIX/include/cudf/utilities/type_checks.hpp - - test -f $PREFIX/include/cudf/utilities/type_dispatcher.hpp - - test -f $PREFIX/include/cudf/wrappers/dictionary.hpp - - test -f $PREFIX/include/cudf/wrappers/durations.hpp - - test -f $PREFIX/include/cudf/wrappers/timestamps.hpp - - test -f $PREFIX/include/cudf_test/base_fixture.hpp - - test -f $PREFIX/include/cudf_test/column_utilities.hpp - - test -f $PREFIX/include/cudf_test/column_wrapper.hpp - - test -f $PREFIX/include/cudf_test/cudf_gtest.hpp - - test -f $PREFIX/include/cudf_test/cxxopts.hpp - - test -f $PREFIX/include/cudf_test/default_stream.hpp - - test -f $PREFIX/include/cudf_test/detail/column_utilities.hpp - - test -f $PREFIX/include/cudf_test/file_utilities.hpp - - test -f $PREFIX/include/cudf_test/io_metadata_utilities.hpp - - test -f $PREFIX/include/cudf_test/iterator_utilities.hpp - - test -f $PREFIX/include/cudf_test/stream_checking_resource_adaptor.hpp - - test -f $PREFIX/include/cudf_test/table_utilities.hpp - - test -f $PREFIX/include/cudf_test/timestamp_utilities.cuh - - test -f $PREFIX/include/cudf_test/type_list_utilities.hpp - - test -f $PREFIX/include/cudf_test/type_lists.hpp about: home: https://rapids.ai/ license: Apache-2.0 From 886e189e4c3cbad258563f4ec5b0f41fc6e15b5e Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Fri, 8 Sep 2023 18:27:18 -0700 Subject: [PATCH 054/150] Remove the mr from the base fixture (#14057) This mr is just an alias for the current memory resource, so we don't really need it. This came up in https://github.com/rapidsai/cudf/pull/14010#discussion_r1312405952. This PR removes all uses of it, but does not actually remove the mr yet. That will be done in a follow-up (see https://github.com/rapidsai/cudf/pull/14057#issuecomment-1712340714). Authors: - Vyas Ramasubramani (https://github.com/vyasr) Approvers: - David Wendt (https://github.com/davidwendt) - Nghia Truong (https://github.com/ttnghia) URL: https://github.com/rapidsai/cudf/pull/14057 --- cpp/tests/column/factories_test.cpp | 137 +++++++------------------- cpp/tests/copying/split_tests.cpp | 17 ++-- cpp/tests/scalar/factories_test.cpp | 37 +++---- cpp/tests/wrappers/timestamps_test.cu | 13 +-- 4 files changed, 64 insertions(+), 140 deletions(-) diff --git a/cpp/tests/column/factories_test.cpp b/cpp/tests/column/factories_test.cpp index 95706ad9e37..b06d097647d 100644 --- a/cpp/tests/column/factories_test.cpp +++ b/cpp/tests/column/factories_test.cpp @@ -37,7 +37,6 @@ class ColumnFactoryTest : public cudf::test::BaseFixture { public: cudf::size_type size() { return _size; } - rmm::cuda_stream_view stream() { return cudf::get_default_stream(); } }; template @@ -47,11 +46,8 @@ TYPED_TEST_SUITE(NumericFactoryTest, cudf::test::NumericTypes); TYPED_TEST(NumericFactoryTest, EmptyNoMask) { - auto column = cudf::make_numeric_column(cudf::data_type{cudf::type_to_id()}, - 0, - cudf::mask_state::UNALLOCATED, - this->stream(), - this->mr()); + auto column = cudf::make_numeric_column( + cudf::data_type{cudf::type_to_id()}, 0, cudf::mask_state::UNALLOCATED); EXPECT_EQ(column->type(), cudf::data_type{cudf::type_to_id()}); EXPECT_EQ(column->size(), 0); EXPECT_EQ(0, column->null_count()); @@ -62,11 +58,8 @@ TYPED_TEST(NumericFactoryTest, EmptyNoMask) TYPED_TEST(NumericFactoryTest, EmptyAllValidMask) { - auto column = cudf::make_numeric_column(cudf::data_type{cudf::type_to_id()}, - 0, - cudf::mask_state::ALL_VALID, - this->stream(), - this->mr()); + auto column = cudf::make_numeric_column( + cudf::data_type{cudf::type_to_id()}, 0, cudf::mask_state::ALL_VALID); EXPECT_EQ(column->type(), cudf::data_type{cudf::type_to_id()}); EXPECT_EQ(column->size(), 0); EXPECT_EQ(0, column->null_count()); @@ -77,11 +70,8 @@ TYPED_TEST(NumericFactoryTest, EmptyAllValidMask) TYPED_TEST(NumericFactoryTest, EmptyAllNullMask) { - auto column = cudf::make_numeric_column(cudf::data_type{cudf::type_to_id()}, - 0, - cudf::mask_state::ALL_NULL, - this->stream(), - this->mr()); + auto column = cudf::make_numeric_column( + cudf::data_type{cudf::type_to_id()}, 0, cudf::mask_state::ALL_NULL); EXPECT_EQ(column->type(), cudf::data_type{cudf::type_to_id()}); EXPECT_EQ(column->size(), 0); EXPECT_EQ(0, column->null_count()); @@ -92,11 +82,8 @@ TYPED_TEST(NumericFactoryTest, EmptyAllNullMask) TYPED_TEST(NumericFactoryTest, NoMask) { - auto column = cudf::make_numeric_column(cudf::data_type{cudf::type_to_id()}, - this->size(), - cudf::mask_state::UNALLOCATED, - this->stream(), - this->mr()); + auto column = cudf::make_numeric_column( + cudf::data_type{cudf::type_to_id()}, this->size(), cudf::mask_state::UNALLOCATED); EXPECT_EQ(column->type(), cudf::data_type{cudf::type_to_id()}); EXPECT_EQ(column->size(), this->size()); EXPECT_EQ(0, column->null_count()); @@ -107,11 +94,8 @@ TYPED_TEST(NumericFactoryTest, NoMask) TYPED_TEST(NumericFactoryTest, UnitializedMask) { - auto column = cudf::make_numeric_column(cudf::data_type{cudf::type_to_id()}, - this->size(), - cudf::mask_state::UNINITIALIZED, - this->stream(), - this->mr()); + auto column = cudf::make_numeric_column( + cudf::data_type{cudf::type_to_id()}, this->size(), cudf::mask_state::UNINITIALIZED); EXPECT_EQ(column->type(), cudf::data_type{cudf::type_to_id()}); EXPECT_EQ(column->size(), this->size()); EXPECT_TRUE(column->nullable()); @@ -120,11 +104,8 @@ TYPED_TEST(NumericFactoryTest, UnitializedMask) TYPED_TEST(NumericFactoryTest, AllValidMask) { - auto column = cudf::make_numeric_column(cudf::data_type{cudf::type_to_id()}, - this->size(), - cudf::mask_state::ALL_VALID, - this->stream(), - this->mr()); + auto column = cudf::make_numeric_column( + cudf::data_type{cudf::type_to_id()}, this->size(), cudf::mask_state::ALL_VALID); EXPECT_EQ(column->type(), cudf::data_type{cudf::type_to_id()}); EXPECT_EQ(column->size(), this->size()); EXPECT_EQ(0, column->null_count()); @@ -135,11 +116,8 @@ TYPED_TEST(NumericFactoryTest, AllValidMask) TYPED_TEST(NumericFactoryTest, AllNullMask) { - auto column = cudf::make_numeric_column(cudf::data_type{cudf::type_to_id()}, - this->size(), - cudf::mask_state::ALL_NULL, - this->stream(), - this->mr()); + auto column = cudf::make_numeric_column( + cudf::data_type{cudf::type_to_id()}, this->size(), cudf::mask_state::ALL_NULL); EXPECT_EQ(column->type(), cudf::data_type{cudf::type_to_id()}); EXPECT_EQ(column->size(), this->size()); EXPECT_EQ(this->size(), column->null_count()); @@ -154,9 +132,7 @@ TYPED_TEST(NumericFactoryTest, NullMaskAsParm) auto column = cudf::make_numeric_column(cudf::data_type{cudf::type_to_id()}, this->size(), std::move(null_mask), - this->size(), - this->stream(), - this->mr()); + this->size()); EXPECT_EQ(column->type(), cudf::data_type{cudf::type_to_id()}); EXPECT_EQ(column->size(), this->size()); EXPECT_EQ(this->size(), column->null_count()); @@ -167,12 +143,8 @@ TYPED_TEST(NumericFactoryTest, NullMaskAsParm) TYPED_TEST(NumericFactoryTest, NullMaskAsEmptyParm) { - auto column = cudf::make_numeric_column(cudf::data_type{cudf::type_to_id()}, - this->size(), - rmm::device_buffer{}, - 0, - this->stream(), - this->mr()); + auto column = cudf::make_numeric_column( + cudf::data_type{cudf::type_to_id()}, this->size(), rmm::device_buffer{}, 0); EXPECT_EQ(column->type(), cudf::data_type{cudf::type_to_id()}); EXPECT_EQ(column->size(), this->size()); EXPECT_EQ(0, column->null_count()); @@ -188,11 +160,8 @@ class NonNumericFactoryTest : public ColumnFactoryTest, TEST_P(NonNumericFactoryTest, NonNumericThrow) { auto construct = [this]() { - auto column = cudf::make_numeric_column(cudf::data_type{GetParam()}, - this->size(), - cudf::mask_state::UNALLOCATED, - this->stream(), - this->mr()); + auto column = cudf::make_numeric_column( + cudf::data_type{GetParam()}, this->size(), cudf::mask_state::UNALLOCATED); }; EXPECT_THROW(construct(), cudf::logic_error); } @@ -208,11 +177,8 @@ TYPED_TEST_SUITE(FixedWidthFactoryTest, cudf::test::FixedWidthTypes); TYPED_TEST(FixedWidthFactoryTest, EmptyNoMask) { - auto column = cudf::make_fixed_width_column(cudf::data_type{cudf::type_to_id()}, - 0, - cudf::mask_state::UNALLOCATED, - this->stream(), - this->mr()); + auto column = cudf::make_fixed_width_column( + cudf::data_type{cudf::type_to_id()}, 0, cudf::mask_state::UNALLOCATED); EXPECT_EQ(column->type(), cudf::data_type{cudf::type_to_id()}); } @@ -235,11 +201,8 @@ TYPED_TEST(EmptyFactoryTest, Empty) TYPED_TEST(FixedWidthFactoryTest, EmptyAllValidMask) { - auto column = cudf::make_fixed_width_column(cudf::data_type{cudf::type_to_id()}, - 0, - cudf::mask_state::ALL_VALID, - this->stream(), - this->mr()); + auto column = cudf::make_fixed_width_column( + cudf::data_type{cudf::type_to_id()}, 0, cudf::mask_state::ALL_VALID); EXPECT_EQ(column->type(), cudf::data_type{cudf::type_to_id()}); EXPECT_EQ(column->size(), 0); EXPECT_EQ(0, column->null_count()); @@ -250,11 +213,8 @@ TYPED_TEST(FixedWidthFactoryTest, EmptyAllValidMask) TYPED_TEST(FixedWidthFactoryTest, EmptyAllNullMask) { - auto column = cudf::make_fixed_width_column(cudf::data_type{cudf::type_to_id()}, - 0, - cudf::mask_state::ALL_NULL, - this->stream(), - this->mr()); + auto column = cudf::make_fixed_width_column( + cudf::data_type{cudf::type_to_id()}, 0, cudf::mask_state::ALL_NULL); EXPECT_EQ(column->type(), cudf::data_type{cudf::type_to_id()}); EXPECT_EQ(column->size(), 0); EXPECT_EQ(0, column->null_count()); @@ -265,11 +225,8 @@ TYPED_TEST(FixedWidthFactoryTest, EmptyAllNullMask) TYPED_TEST(FixedWidthFactoryTest, NoMask) { - auto column = cudf::make_fixed_width_column(cudf::data_type{cudf::type_to_id()}, - this->size(), - cudf::mask_state::UNALLOCATED, - this->stream(), - this->mr()); + auto column = cudf::make_fixed_width_column( + cudf::data_type{cudf::type_to_id()}, this->size(), cudf::mask_state::UNALLOCATED); EXPECT_EQ(column->type(), cudf::data_type{cudf::type_to_id()}); EXPECT_EQ(column->size(), this->size()); EXPECT_EQ(0, column->null_count()); @@ -280,11 +237,8 @@ TYPED_TEST(FixedWidthFactoryTest, NoMask) TYPED_TEST(FixedWidthFactoryTest, UnitializedMask) { - auto column = cudf::make_fixed_width_column(cudf::data_type{cudf::type_to_id()}, - this->size(), - cudf::mask_state::UNINITIALIZED, - this->stream(), - this->mr()); + auto column = cudf::make_fixed_width_column( + cudf::data_type{cudf::type_to_id()}, this->size(), cudf::mask_state::UNINITIALIZED); EXPECT_EQ(column->type(), cudf::data_type{cudf::type_to_id()}); EXPECT_EQ(column->size(), this->size()); EXPECT_TRUE(column->nullable()); @@ -293,11 +247,8 @@ TYPED_TEST(FixedWidthFactoryTest, UnitializedMask) TYPED_TEST(FixedWidthFactoryTest, AllValidMask) { - auto column = cudf::make_fixed_width_column(cudf::data_type{cudf::type_to_id()}, - this->size(), - cudf::mask_state::ALL_VALID, - this->stream(), - this->mr()); + auto column = cudf::make_fixed_width_column( + cudf::data_type{cudf::type_to_id()}, this->size(), cudf::mask_state::ALL_VALID); EXPECT_EQ(column->type(), cudf::data_type{cudf::type_to_id()}); EXPECT_EQ(column->size(), this->size()); EXPECT_EQ(0, column->null_count()); @@ -308,11 +259,8 @@ TYPED_TEST(FixedWidthFactoryTest, AllValidMask) TYPED_TEST(FixedWidthFactoryTest, AllNullMask) { - auto column = cudf::make_fixed_width_column(cudf::data_type{cudf::type_to_id()}, - this->size(), - cudf::mask_state::ALL_NULL, - this->stream(), - this->mr()); + auto column = cudf::make_fixed_width_column( + cudf::data_type{cudf::type_to_id()}, this->size(), cudf::mask_state::ALL_NULL); EXPECT_EQ(column->type(), cudf::data_type{cudf::type_to_id()}); EXPECT_EQ(column->size(), this->size()); EXPECT_EQ(this->size(), column->null_count()); @@ -327,9 +275,7 @@ TYPED_TEST(FixedWidthFactoryTest, NullMaskAsParm) auto column = cudf::make_fixed_width_column(cudf::data_type{cudf::type_to_id()}, this->size(), std::move(null_mask), - this->size(), - this->stream(), - this->mr()); + this->size()); EXPECT_EQ(column->type(), cudf::data_type{cudf::type_to_id()}); EXPECT_EQ(column->size(), this->size()); EXPECT_EQ(this->size(), column->null_count()); @@ -340,12 +286,8 @@ TYPED_TEST(FixedWidthFactoryTest, NullMaskAsParm) TYPED_TEST(FixedWidthFactoryTest, NullMaskAsEmptyParm) { - auto column = cudf::make_fixed_width_column(cudf::data_type{cudf::type_to_id()}, - this->size(), - rmm::device_buffer{}, - 0, - this->stream(), - this->mr()); + auto column = cudf::make_fixed_width_column( + cudf::data_type{cudf::type_to_id()}, this->size(), rmm::device_buffer{}, 0); EXPECT_EQ(column->type(), cudf::data_type{cudf::type_to_id()}); EXPECT_EQ(column->size(), this->size()); EXPECT_EQ(0, column->null_count()); @@ -361,11 +303,8 @@ class NonFixedWidthFactoryTest : public ColumnFactoryTest, TEST_P(NonFixedWidthFactoryTest, NonFixedWidthThrow) { auto construct = [this]() { - auto column = cudf::make_fixed_width_column(cudf::data_type{GetParam()}, - this->size(), - cudf::mask_state::UNALLOCATED, - this->stream(), - this->mr()); + auto column = cudf::make_fixed_width_column( + cudf::data_type{GetParam()}, this->size(), cudf::mask_state::UNALLOCATED); }; EXPECT_THROW(construct(), cudf::logic_error); } diff --git a/cpp/tests/copying/split_tests.cpp b/cpp/tests/copying/split_tests.cpp index 7a5c738dc12..842ba801df0 100644 --- a/cpp/tests/copying/split_tests.cpp +++ b/cpp/tests/copying/split_tests.cpp @@ -2304,13 +2304,14 @@ TEST_F(ContiguousSplitTableCornerCases, OutBufferToSmall) { // internally, contiguous split chunks GPU work in 1MB contiguous copies // so the output buffer must be 1MB or larger. - EXPECT_THROW(cudf::chunked_pack::create({}, 1 * 1024, mr()), cudf::logic_error); + EXPECT_THROW(cudf::chunked_pack::create({}, 1 * 1024), cudf::logic_error); } TEST_F(ContiguousSplitTableCornerCases, ChunkSpanTooSmall) { - auto chunked_pack = cudf::chunked_pack::create({}, 1 * 1024 * 1024, mr()); - rmm::device_buffer buff(1 * 1024, cudf::get_default_stream(), mr()); + auto chunked_pack = cudf::chunked_pack::create({}, 1 * 1024 * 1024); + rmm::device_buffer buff( + 1 * 1024, cudf::test::get_default_stream(), rmm::mr::get_current_device_resource()); cudf::device_span too_small(static_cast(buff.data()), buff.size()); std::size_t copied = 0; // throws because we created chunked_contig_split with 1MB, but we are giving @@ -2321,8 +2322,9 @@ TEST_F(ContiguousSplitTableCornerCases, ChunkSpanTooSmall) TEST_F(ContiguousSplitTableCornerCases, EmptyTableHasNextFalse) { - auto chunked_pack = cudf::chunked_pack::create({}, 1 * 1024 * 1024, mr()); - rmm::device_buffer buff(1 * 1024 * 1024, cudf::get_default_stream(), mr()); + auto chunked_pack = cudf::chunked_pack::create({}, 1 * 1024 * 1024); + rmm::device_buffer buff( + 1 * 1024 * 1024, cudf::test::get_default_stream(), rmm::mr::get_current_device_resource()); cudf::device_span bounce_buff(static_cast(buff.data()), buff.size()); EXPECT_EQ(chunked_pack->has_next(), false); // empty input table std::size_t copied = 0; @@ -2334,9 +2336,10 @@ TEST_F(ContiguousSplitTableCornerCases, ExhaustedHasNextFalse) { cudf::test::strings_column_wrapper a{"abc", "def", "ghi", "jkl", "mno", "", "st", "uvwx"}; cudf::table_view t({a}); - rmm::device_buffer buff(1 * 1024 * 1024, cudf::get_default_stream(), mr()); + rmm::device_buffer buff( + 1 * 1024 * 1024, cudf::test::get_default_stream(), rmm::mr::get_current_device_resource()); cudf::device_span bounce_buff(static_cast(buff.data()), buff.size()); - auto chunked_pack = cudf::chunked_pack::create(t, buff.size(), mr()); + auto chunked_pack = cudf::chunked_pack::create(t, buff.size()); EXPECT_EQ(chunked_pack->has_next(), true); std::size_t copied = chunked_pack->next(bounce_buff); EXPECT_EQ(copied, chunked_pack->get_total_contiguous_size()); diff --git a/cpp/tests/scalar/factories_test.cpp b/cpp/tests/scalar/factories_test.cpp index febae11832d..7da5c408a48 100644 --- a/cpp/tests/scalar/factories_test.cpp +++ b/cpp/tests/scalar/factories_test.cpp @@ -26,22 +26,17 @@ #include -class ScalarFactoryTest : public cudf::test::BaseFixture { - public: - rmm::cuda_stream_view stream() { return cudf::get_default_stream(); } -}; +class ScalarFactoryTest : public cudf::test::BaseFixture {}; template -struct NumericScalarFactory : public ScalarFactoryTest { - static constexpr auto factory = cudf::make_numeric_scalar; -}; +struct NumericScalarFactory : public ScalarFactoryTest {}; TYPED_TEST_SUITE(NumericScalarFactory, cudf::test::NumericTypes); TYPED_TEST(NumericScalarFactory, FactoryDefault) { std::unique_ptr s = - this->factory(cudf::data_type{cudf::type_to_id()}, this->stream(), this->mr()); + cudf::make_numeric_scalar(cudf::data_type{cudf::type_to_id()}); EXPECT_EQ(s->type(), cudf::data_type{cudf::type_to_id()}); EXPECT_FALSE(s->is_valid()); @@ -50,7 +45,7 @@ TYPED_TEST(NumericScalarFactory, FactoryDefault) TYPED_TEST(NumericScalarFactory, TypeCast) { std::unique_ptr s = - this->factory(cudf::data_type{cudf::type_to_id()}, this->stream(), this->mr()); + cudf::make_numeric_scalar(cudf::data_type{cudf::type_to_id()}); auto numeric_s = static_cast*>(s.get()); @@ -62,16 +57,14 @@ TYPED_TEST(NumericScalarFactory, TypeCast) } template -struct TimestampScalarFactory : public ScalarFactoryTest { - static constexpr auto factory = cudf::make_timestamp_scalar; -}; +struct TimestampScalarFactory : public ScalarFactoryTest {}; TYPED_TEST_SUITE(TimestampScalarFactory, cudf::test::TimestampTypes); TYPED_TEST(TimestampScalarFactory, FactoryDefault) { std::unique_ptr s = - this->factory(cudf::data_type{cudf::type_to_id()}, this->stream(), this->mr()); + cudf::make_timestamp_scalar(cudf::data_type{cudf::type_to_id()}); EXPECT_EQ(s->type(), cudf::data_type{cudf::type_to_id()}); EXPECT_FALSE(s->is_valid()); @@ -80,7 +73,7 @@ TYPED_TEST(TimestampScalarFactory, FactoryDefault) TYPED_TEST(TimestampScalarFactory, TypeCast) { std::unique_ptr s = - this->factory(cudf::data_type{cudf::type_to_id()}, this->stream(), this->mr()); + cudf::make_timestamp_scalar(cudf::data_type{cudf::type_to_id()}); auto numeric_s = static_cast*>(s.get()); @@ -92,9 +85,7 @@ TYPED_TEST(TimestampScalarFactory, TypeCast) } template -struct DefaultScalarFactory : public ScalarFactoryTest { - static constexpr auto factory = cudf::make_default_constructed_scalar; -}; +struct DefaultScalarFactory : public ScalarFactoryTest {}; using MixedTypes = cudf::test::Concat; TYPED_TEST_SUITE(DefaultScalarFactory, MixedTypes); @@ -102,7 +93,7 @@ TYPED_TEST_SUITE(DefaultScalarFactory, MixedTypes); TYPED_TEST(DefaultScalarFactory, FactoryDefault) { std::unique_ptr s = - this->factory(cudf::data_type{cudf::type_to_id()}, this->stream(), this->mr()); + cudf::make_default_constructed_scalar(cudf::data_type{cudf::type_to_id()}); EXPECT_EQ(s->type(), cudf::data_type{cudf::type_to_id()}); EXPECT_FALSE(s->is_valid()); @@ -111,7 +102,7 @@ TYPED_TEST(DefaultScalarFactory, FactoryDefault) TYPED_TEST(DefaultScalarFactory, TypeCast) { std::unique_ptr s = - this->factory(cudf::data_type{cudf::type_to_id()}, this->stream(), this->mr()); + cudf::make_default_constructed_scalar(cudf::data_type{cudf::type_to_id()}); auto numeric_s = static_cast*>(s.get()); @@ -129,8 +120,7 @@ TYPED_TEST(FixedWidthScalarFactory, ValueProvided) { TypeParam value = cudf::test::make_type_param_scalar(54); - std::unique_ptr s = - cudf::make_fixed_width_scalar(value, this->stream(), this->mr()); + std::unique_ptr s = cudf::make_fixed_width_scalar(value); auto numeric_s = static_cast*>(s.get()); @@ -150,9 +140,8 @@ TYPED_TEST(FixedPointScalarFactory, ValueProvided) using namespace numeric; using decimalXX = TypeParam; - auto const rep_value = static_cast(123); - auto const s = - cudf::make_fixed_point_scalar(123, scale_type{-2}, this->stream(), this->mr()); + auto const rep_value = static_cast(123); + auto const s = cudf::make_fixed_point_scalar(123, scale_type{-2}); auto const fp_s = static_cast*>(s.get()); auto const expected_dtype = cudf::data_type{cudf::type_to_id(), -2}; diff --git a/cpp/tests/wrappers/timestamps_test.cu b/cpp/tests/wrappers/timestamps_test.cu index e6c65b4e0e4..f7d3df18ffd 100644 --- a/cpp/tests/wrappers/timestamps_test.cu +++ b/cpp/tests/wrappers/timestamps_test.cu @@ -38,7 +38,6 @@ template struct ChronoColumnTest : public cudf::test::BaseFixture { - rmm::cuda_stream_view stream() { return cudf::get_default_stream(); } cudf::size_type size() { return cudf::size_type(100); } cudf::data_type type() { return cudf::data_type{cudf::type_to_id()}; } }; @@ -188,9 +187,7 @@ TYPED_TEST(ChronoColumnTest, ChronoFactoryNullMaskAsParm) auto column = make_fixed_width_column(cudf::data_type{cudf::type_to_id()}, this->size(), std::move(null_mask), - this->size(), - this->stream(), - this->mr()); + this->size()); EXPECT_EQ(column->type(), cudf::data_type{cudf::type_to_id()}); EXPECT_EQ(column->size(), this->size()); EXPECT_EQ(this->size(), column->null_count()); @@ -202,12 +199,8 @@ TYPED_TEST(ChronoColumnTest, ChronoFactoryNullMaskAsParm) TYPED_TEST(ChronoColumnTest, ChronoFactoryNullMaskAsEmptyParm) { rmm::device_buffer null_mask{}; - auto column = make_fixed_width_column(cudf::data_type{cudf::type_to_id()}, - this->size(), - std::move(null_mask), - 0, - this->stream(), - this->mr()); + auto column = make_fixed_width_column( + cudf::data_type{cudf::type_to_id()}, this->size(), std::move(null_mask), 0); EXPECT_EQ(column->type(), cudf::data_type{cudf::type_to_id()}); EXPECT_EQ(column->size(), this->size()); From 0bcad6cfeb93a895285bcaf19ca694d2d8229347 Mon Sep 17 00:00:00 2001 From: Vukasin Milovanovic Date: Mon, 11 Sep 2023 12:01:58 -0700 Subject: [PATCH 055/150] Remove debug print in a Parquet test (#14063) Removed a debug print. That's it. Authors: - Vukasin Milovanovic (https://github.com/vuule) Approvers: - Karthikeyan (https://github.com/karthikeyann) - David Wendt (https://github.com/davidwendt) - Nghia Truong (https://github.com/ttnghia) URL: https://github.com/rapidsai/cudf/pull/14063 --- cpp/tests/io/parquet_test.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/cpp/tests/io/parquet_test.cpp b/cpp/tests/io/parquet_test.cpp index 3cd5c9f5593..64aca091686 100644 --- a/cpp/tests/io/parquet_test.cpp +++ b/cpp/tests/io/parquet_test.cpp @@ -6534,7 +6534,6 @@ TEST_F(ParquetReaderTest, FilterFloatNAN) auto col0 = cudf::test::fixed_width_column_wrapper(elements, elements + num_rows); auto col1 = cudf::test::fixed_width_column_wrapper(elements, elements + num_rows); - cudf::test::print(col0); auto const written_table = table_view{{col0, col1}}; auto const filepath = temp_env->get_temp_filepath("FilterFloatNAN.parquet"); { From bc304a29d244ad502fbdc6a304c5de0e99aeb57c Mon Sep 17 00:00:00 2001 From: Christopher Harris Date: Mon, 11 Sep 2023 14:28:37 -0500 Subject: [PATCH 056/150] Produce a fatal error if cudf is unable to find pyarrow include directory (#13976) Produce a fatal error if cudf python is unable to find pyarrow include directory. Previously the failure only presented itself while trying to compile cython files which failed to include headers from pyarrow. _Previously:_ ``` FAILED: cudf/_lib/CMakeFiles/avro.dir/avro.cxx.o /usr/bin/sccache /usr/bin/g++ -DFMT_HEADER_ONLY=1 -DSPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_INFO -DSPDLOG_FMT_EXTERNAL -DTHRUST_DEVICE_SYSTEM=THRUST_DEVICE_SYSTEM_CUDA -DTHRUST_HOST_SYSTEM=THRUST_HOST_SYSTEM_CPP -Davro_EXPORTS -I/usr/include/python3.10 -I/home/coder/.local/share/venvs/rapids/lib/python3.10/site-packages/numpy/core/include -I/home/coder/cudf/python/cudf/cudf/_lib -I/home/coder/cudf/cpp/build/release/_deps/libcudacxx-src/lib/cmake/libcudacxx/../../../include -I/home/coder/cudf/cpp/build/release/_deps/thrust-src -I/home/coder/cudf/cpp/build/release/_deps/thrust-src/dependencies/cub -isystem /home/coder/cudf/cpp/build/release/_deps/dlpack-src/include -isystem /home/coder/cudf/cpp/build/release/_deps/jitify-src -isystem /home/coder/cudf/cpp/include -isystem /home/coder/cudf/cpp/build/release/include -isystem /home/coder/rmm/include -isystem /usr/local/cuda/include -isystem /home/coder/fmt/include -isystem /home/coder/rmm/build/release/_deps/spdlog-src/include -O3 -DNDEBUG -fPIC -MD -MT cudf/_lib/CMakeFiles/avro.dir/avro.cxx.o -MF cudf/_lib/CMakeFiles/avro.dir/avro.cxx.o.d -o cudf/_lib/CMakeFiles/avro.dir/avro.cxx.o -c /home/coder/cudf/python/cudf/_skbuild/linux-x86_64-3.10/cmake-build/cudf/_lib/avro.cxx /home/coder/cudf/python/cudf/_skbuild/linux-x86_64-3.10/cmake-build/cudf/_lib/avro.cxx:1291:10: fatal error: arrow/python/platform.h: No such file or directory 1291 | #include "arrow/python/platform.h" | ^~~~~~~~~~~~~~~~~~~~~~~~~ compilation terminated. [6/23] Building CXX object cudf/_lib/CMakeFiles/csv.dir/csv.cxx.o FAILED: cudf/_lib/CMakeFiles/csv.dir/csv.cxx.o /usr/bin/sccache /usr/bin/g++ -DFMT_HEADER_ONLY=1 -DSPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_INFO -DSPDLOG_FMT_EXTERNAL -DTHRUST_DEVICE_SYSTEM=THRUST_DEVICE_SYSTEM_CUDA -DTHRUST_HOST_SYSTEM=THRUST_HOST_SYSTEM_CPP -Dcsv_EXPORTS -I/usr/include/python3.10 -I/home/coder/.local/share/venvs/rapids/lib/python3.10/site-packages/numpy/core/include -I/home/coder/cudf/python/cudf/cudf/_lib -I/home/coder/cudf/cpp/build/release/_deps/libcudacxx-src/lib/cmake/libcudacxx/../../../include -I/home/coder/cudf/cpp/build/release/_deps/thrust-src -I/home/coder/cudf/cpp/build/release/_deps/thrust-src/dependencies/cub -isystem /home/coder/cudf/cpp/build/release/_deps/dlpack-src/include -isystem /home/coder/cudf/cpp/build/release/_deps/jitify-src -isystem /home/coder/cudf/cpp/include -isystem /home/coder/cudf/cpp/build/release/include -isystem /home/coder/rmm/include -isystem /usr/local/cuda/include -isystem /home/coder/fmt/include -isystem /home/coder/rmm/build/release/_deps/spdlog-src/include -O3 -DNDEBUG -fPIC -MD -MT cudf/_lib/CMakeFiles/csv.dir/csv.cxx.o -MF cudf/_lib/CMakeFiles/csv.dir/csv.cxx.o.d -o cudf/_lib/CMakeFiles/csv.dir/csv.cxx.o -c /home/coder/cudf/python/cudf/_skbuild/linux-x86_64-3.10/cmake-build/cudf/_lib/csv.cxx /home/coder/cudf/python/cudf/_skbuild/linux-x86_64-3.10/cmake-build/cudf/_lib/csv.cxx:1292:10: fatal error: arrow/python/platform.h: No such file or directory 1292 | #include "arrow/python/platform.h" | ^~~~~~~~~~~~~~~~~~~~~~~~~ compilation terminated. ... ``` _With these changes:_ ``` CMake Error at cudf/_lib/CMakeLists.txt:107 (message): Error while trying to obtain pyarrow include dir: Traceback (most recent call last): File "", line 1, in File "/home/coder/.local/share/venvs/rapids/lib/python3.10/site-packages/pyarrow/__init__.py", line 65, in import pyarrow.lib as _lib ImportError: libarrow.so.1200: cannot open shared object file: No such file or directory ``` Authors: - Christopher Harris (https://github.com/cwharris) - Vyas Ramasubramani (https://github.com/vyasr) Approvers: - Vyas Ramasubramani (https://github.com/vyasr) URL: https://github.com/rapidsai/cudf/pull/13976 --- python/cudf/cudf/_lib/CMakeLists.txt | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/python/cudf/cudf/_lib/CMakeLists.txt b/python/cudf/cudf/_lib/CMakeLists.txt index 06de6cc825f..947659c290a 100644 --- a/python/cudf/cudf/_lib/CMakeLists.txt +++ b/python/cudf/cudf/_lib/CMakeLists.txt @@ -98,9 +98,15 @@ find_package(Python 3.9 REQUIRED COMPONENTS Interpreter) execute_process( COMMAND "${Python_EXECUTABLE}" -c "import pyarrow; print(pyarrow.get_include())" OUTPUT_VARIABLE PYARROW_INCLUDE_DIR + ERROR_VARIABLE PYARROW_ERROR + RESULT_VARIABLE PYARROW_RESULT OUTPUT_STRIP_TRAILING_WHITESPACE ) +if(${PYARROW_RESULT}) + message(FATAL_ERROR "Error while trying to obtain pyarrow include directory:\n${PYARROW_ERROR}") +endif() + set(targets_using_arrow_headers interop avro csv orc json parquet) foreach(target IN LISTS targets_using_arrow_headers) target_include_directories(${target} PRIVATE "${PYARROW_INCLUDE_DIR}") From c3bf70595210d684fd747a927e59abc739aea8cf Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Mon, 11 Sep 2023 20:19:17 -0500 Subject: [PATCH 057/150] Fix renaming `Series` and `Index` (#14080) This PR resolves renaming `Series` and `Index` by assigning `no_default` to internal API default parameters. Authors: - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Bradley Dice (https://github.com/bdice) - Matthew Roeschke (https://github.com/mroeschke) URL: https://github.com/rapidsai/cudf/pull/14080 --- python/cudf/cudf/core/index.py | 10 +++++---- python/cudf/cudf/core/series.py | 4 ++-- python/cudf/cudf/testing/_utils.py | 27 ++++++++++++++++++++++++ python/cudf/cudf/tests/test_binops.py | 30 ++------------------------- python/cudf/cudf/tests/test_index.py | 13 ++++++++---- python/cudf/cudf/tests/test_series.py | 15 ++++++++++++++ 6 files changed, 61 insertions(+), 38 deletions(-) diff --git a/python/cudf/cudf/core/index.py b/python/cudf/cudf/core/index.py index 4bb5428838f..57c481db0d8 100644 --- a/python/cudf/cudf/core/index.py +++ b/python/cudf/cudf/core/index.py @@ -28,6 +28,7 @@ from cudf._lib.filling import sequence from cudf._lib.search import search_sorted from cudf._lib.types import size_type_dtype +from cudf.api.extensions import no_default from cudf.api.types import ( _is_non_decimal_numeric_dtype, is_categorical_dtype, @@ -95,7 +96,7 @@ def _lexsorted_equal_range( return lower_bound, upper_bound, sort_inds -def _index_from_data(data: MutableMapping, name: Any = None): +def _index_from_data(data: MutableMapping, name: Any = no_default): """Construct an index of the appropriate type from some data.""" if len(data) == 0: @@ -131,7 +132,7 @@ def _index_from_data(data: MutableMapping, name: Any = None): def _index_from_columns( - columns: List[cudf.core.column.ColumnBase], name: Any = None + columns: List[cudf.core.column.ColumnBase], name: Any = no_default ): """Construct an index from ``columns``, with levels named 0, 1, 2...""" return _index_from_data(dict(zip(range(len(columns)), columns)), name=name) @@ -1032,10 +1033,10 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): @classmethod @_cudf_nvtx_annotate def _from_data( - cls, data: MutableMapping, name: Any = None + cls, data: MutableMapping, name: Any = no_default ) -> GenericIndex: out = super()._from_data(data=data) - if name is not None: + if name is not no_default: out.name = name return out @@ -3334,6 +3335,7 @@ def as_index(arbitrary, nan_as_null=None, **kwargs) -> BaseIndex: - DatetimeIndex for Datetime input. - GenericIndex for all other inputs. """ + kwargs = _setdefault_name(arbitrary, **kwargs) if isinstance(arbitrary, cudf.MultiIndex): return arbitrary diff --git a/python/cudf/cudf/core/series.py b/python/cudf/cudf/core/series.py index 78be3085754..f44a3123dd3 100644 --- a/python/cudf/cudf/core/series.py +++ b/python/cudf/cudf/core/series.py @@ -605,10 +605,10 @@ def _from_data( cls, data: MutableMapping, index: Optional[BaseIndex] = None, - name: Any = None, + name: Any = no_default, ) -> Series: out = super()._from_data(data=data, index=index) - if name is not None: + if name is not no_default: out.name = name return out diff --git a/python/cudf/cudf/testing/_utils.py b/python/cudf/cudf/testing/_utils.py index 0489329d801..e949f7d78e7 100644 --- a/python/cudf/cudf/testing/_utils.py +++ b/python/cudf/cudf/testing/_utils.py @@ -48,6 +48,33 @@ OTHER_TYPES = sorted(list(dtypeutils.OTHER_TYPES)) ALL_TYPES = sorted(list(dtypeutils.ALL_TYPES)) +SERIES_OR_INDEX_NAMES = [ + None, + pd.NA, + cudf.NA, + np.nan, + float("NaN"), + "abc", + 1, + pd.NaT, + np.datetime64("nat"), + np.timedelta64("NaT"), + np.timedelta64(10, "D"), + np.timedelta64(5, "D"), + np.datetime64("1970-01-01 00:00:00.000000001"), + np.datetime64("1970-01-01 00:00:00.000000002"), + pd.Timestamp(1), + pd.Timestamp(2), + pd.Timedelta(1), + pd.Timedelta(2), + Decimal("NaN"), + Decimal("1.2"), + np.int64(1), + np.int32(1), + np.float32(1), + pd.Timestamp(1), +] + def set_random_null_mask_inplace(series, null_probability=0.5, seed=None): """Randomly nullify elements in series with the provided probability.""" diff --git a/python/cudf/cudf/tests/test_binops.py b/python/cudf/cudf/tests/test_binops.py index 549cd8da78e..87d510927ae 100644 --- a/python/cudf/cudf/tests/test_binops.py +++ b/python/cudf/cudf/tests/test_binops.py @@ -150,32 +150,6 @@ lambda x: cudf.Scalar(0) / x, ] -_series_or_index_names = [ - None, - pd.NA, - cudf.NA, - np.nan, - float("NaN"), - "abc", - 1, - pd.NaT, - np.datetime64("nat"), - np.timedelta64("NaT"), - np.timedelta64(10, "D"), - np.timedelta64(5, "D"), - np.datetime64("1970-01-01 00:00:00.000000001"), - np.datetime64("1970-01-01 00:00:00.000000002"), - pd.Timestamp(1), - pd.Timestamp(2), - pd.Timedelta(1), - pd.Timedelta(2), - decimal.Decimal("NaN"), - decimal.Decimal("1.2"), - np.int64(1), - np.int32(1), - np.float32(1), - pd.Timestamp(1), -] pytest_xfail = pytest.mark.xfail pytestmark = pytest.mark.spilling @@ -3315,8 +3289,8 @@ def test_binop_index_series(op): utils.assert_eq(expected, actual) -@pytest.mark.parametrize("name1", _series_or_index_names) -@pytest.mark.parametrize("name2", _series_or_index_names) +@pytest.mark.parametrize("name1", utils.SERIES_OR_INDEX_NAMES) +@pytest.mark.parametrize("name2", utils.SERIES_OR_INDEX_NAMES) def test_binop_index_dt_td_series_with_names(name1, name2): gi = cudf.Index([1, 2, 3], dtype="datetime64[ns]", name=name1) gs = cudf.Series([10, 11, 12], dtype="timedelta64[ns]", name=name2) diff --git a/python/cudf/cudf/tests/test_index.py b/python/cudf/cudf/tests/test_index.py index 58dbc48e31e..f7f6e1f9114 100644 --- a/python/cudf/cudf/tests/test_index.py +++ b/python/cudf/cudf/tests/test_index.py @@ -24,6 +24,7 @@ FLOAT_TYPES, NUMERIC_TYPES, OTHER_TYPES, + SERIES_OR_INDEX_NAMES, SIGNED_INTEGER_TYPES, SIGNED_TYPES, UNSIGNED_TYPES, @@ -227,12 +228,16 @@ def test_pandas_as_index(): ) -def test_index_rename(): - pds = pd.Index([1, 2, 3], name="asdf") +@pytest.mark.parametrize("initial_name", SERIES_OR_INDEX_NAMES) +@pytest.mark.parametrize("name", SERIES_OR_INDEX_NAMES) +def test_index_rename(initial_name, name): + pds = pd.Index([1, 2, 3], name=initial_name) gds = as_index(pds) - expect = pds.rename("new_name") - got = gds.rename("new_name") + assert_eq(pds, gds) + + expect = pds.rename(name) + got = gds.rename(name) assert_eq(expect, got) """ diff --git a/python/cudf/cudf/tests/test_series.py b/python/cudf/cudf/tests/test_series.py index 783d7d31d7f..8a652caa6e2 100644 --- a/python/cudf/cudf/tests/test_series.py +++ b/python/cudf/cudf/tests/test_series.py @@ -16,6 +16,7 @@ from cudf.core._compat import PANDAS_LT_140 from cudf.testing._utils import ( NUMERIC_TYPES, + SERIES_OR_INDEX_NAMES, TIMEDELTA_TYPES, _create_pandas_series, assert_eq, @@ -2267,3 +2268,17 @@ def test_series_unique_pandas_compatibility(): actual = gs.unique() expected = ps.unique() assert_eq(actual, expected) + + +@pytest.mark.parametrize("initial_name", SERIES_OR_INDEX_NAMES) +@pytest.mark.parametrize("name", SERIES_OR_INDEX_NAMES) +def test_series_rename(initial_name, name): + gsr = cudf.Series([1, 2, 3], name=initial_name) + psr = pd.Series([1, 2, 3], name=initial_name) + + assert_eq(gsr, psr) + + actual = gsr.rename(name) + expected = psr.rename(name) + + assert_eq(actual, expected) From 1911d33231ac9caeaf5310173bf6a47ffca35fe8 Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Mon, 11 Sep 2023 22:55:32 -0500 Subject: [PATCH 058/150] Fix various issues in `Index.intersection` (#14054) This PR fixes multiple issues with `Index.intersection`: - [x] Fixes issues with handling empty inputs, closes #14020 - [x] Adds validation for inputs. - [x] Properly handles various types in `intersection` implementation and fix `RangeIndex.intersection` by having a separate implementation for it. Authors: - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cudf/pull/14054 --- python/cudf/cudf/core/_base_index.py | 23 +++++++++--- python/cudf/cudf/core/index.py | 6 ++-- python/cudf/cudf/core/join/_join_helpers.py | 1 + python/cudf/cudf/tests/test_index.py | 40 ++++++++++++++++----- python/cudf/cudf/utils/dtypes.py | 10 ++++++ 5 files changed, 65 insertions(+), 15 deletions(-) diff --git a/python/cudf/cudf/core/_base_index.py b/python/cudf/cudf/core/_base_index.py index 8091f3f7dd2..2f6e864b51c 100644 --- a/python/cudf/cudf/core/_base_index.py +++ b/python/cudf/cudf/core/_base_index.py @@ -608,8 +608,14 @@ def intersection(self, other, sort=False): (1, 'Blue')], ) """ + if not can_convert_to_column(other): + raise TypeError("Input must be Index or array-like") + if not isinstance(other, BaseIndex): - other = cudf.Index(other, name=self.name) + other = cudf.Index( + other, + name=getattr(other, "name", self.name), + ) if sort not in {None, False}: raise ValueError( @@ -617,10 +623,17 @@ def intersection(self, other, sort=False): f"None or False; {sort} was passed." ) - if self.equals(other): - if self.has_duplicates: - return self.unique()._get_reconciled_name_object(other) - return self._get_reconciled_name_object(other) + if not len(self) or not len(other) or self.equals(other): + common_dtype = cudf.utils.dtypes._dtype_pandas_compatible( + cudf.utils.dtypes.find_common_type([self.dtype, other.dtype]) + ) + + lhs = self.unique() if self.has_duplicates else self + rhs = other + if not len(other): + lhs, rhs = rhs, lhs + + return lhs._get_reconciled_name_object(rhs).astype(common_dtype) res_name = _get_result_name(self.name, other.name) diff --git a/python/cudf/cudf/core/index.py b/python/cudf/cudf/core/index.py index 57c481db0d8..56ec9ce0359 100644 --- a/python/cudf/cudf/core/index.py +++ b/python/cudf/cudf/core/index.py @@ -682,7 +682,9 @@ def _union(self, other, sort=None): @_cudf_nvtx_annotate def _intersection(self, other, sort=False): if not isinstance(other, RangeIndex): - return super()._intersection(other, sort=sort) + return self._try_reconstruct_range_index( + super()._intersection(other, sort=sort) + ) if not len(self) or not len(other): return RangeIndex(0) @@ -723,7 +725,7 @@ def _intersection(self, other, sort=False): if sort is None: new_index = new_index.sort_values() - return new_index + return self._try_reconstruct_range_index(new_index) @_cudf_nvtx_annotate def difference(self, other, sort=None): diff --git a/python/cudf/cudf/core/join/_join_helpers.py b/python/cudf/cudf/core/join/_join_helpers.py index 7d799fa1573..1071261044f 100644 --- a/python/cudf/cudf/core/join/_join_helpers.py +++ b/python/cudf/cudf/core/join/_join_helpers.py @@ -74,6 +74,7 @@ def _match_join_keys( common_type = ltype.categories.dtype else: common_type = rtype.categories.dtype + common_type = cudf.utils.dtypes._dtype_pandas_compatible(common_type) return lcol.astype(common_type), rcol.astype(common_type) if is_dtype_equal(ltype, rtype): diff --git a/python/cudf/cudf/tests/test_index.py b/python/cudf/cudf/tests/test_index.py index f7f6e1f9114..6fb615c22e0 100644 --- a/python/cudf/cudf/tests/test_index.py +++ b/python/cudf/cudf/tests/test_index.py @@ -11,6 +11,7 @@ import pytest import cudf +from cudf.api.types import is_bool_dtype from cudf.core._compat import PANDAS_GE_133, PANDAS_GE_200 from cudf.core.index import ( CategoricalIndex, @@ -2104,25 +2105,48 @@ def test_union_index(idx1, idx2, sort): (pd.Index([0, 1, 2, 30], name=pd.NA), pd.Index([30, 0, 90, 100])), (pd.Index([0, 1, 2, 30], name="a"), [90, 100]), (pd.Index([0, 1, 2, 30]), pd.Index([0, 10, 1.0, 11])), - (pd.Index(["a", "b", "c", "d", "c"]), pd.Index(["a", "c", "z"])), + ( + pd.Index(["a", "b", "c", "d", "c"]), + pd.Index(["a", "c", "z"], name="abc"), + ), ( pd.Index(["a", "b", "c", "d", "c"]), pd.Index(["a", "b", "c", "d", "c"]), ), (pd.Index([True, False, True, True]), pd.Index([10, 11, 12, 0, 1, 2])), (pd.Index([True, False, True, True]), pd.Index([True, True])), + (pd.RangeIndex(0, 10, name="a"), pd.Index([5, 6, 7], name="b")), + (pd.Index(["a", "b", "c"], dtype="category"), pd.Index(["a", "b"])), + (pd.Index(["a", "b", "c"], dtype="category"), pd.Index([1, 2, 3])), + (pd.Index([0, 1, 2], dtype="category"), pd.RangeIndex(0, 10)), + (pd.Index(["a", "b", "c"], name="abc"), []), + (pd.Index([], name="abc"), pd.RangeIndex(0, 4)), + (pd.Index([1, 2, 3]), pd.Index([1, 2], dtype="category")), + (pd.Index([]), pd.Index([1, 2], dtype="category")), ], ) @pytest.mark.parametrize("sort", [None, False]) -def test_intersection_index(idx1, idx2, sort): +@pytest.mark.parametrize("pandas_compatible", [True, False]) +def test_intersection_index(idx1, idx2, sort, pandas_compatible): expected = idx1.intersection(idx2, sort=sort) - idx1 = cudf.from_pandas(idx1) if isinstance(idx1, pd.Index) else idx1 - idx2 = cudf.from_pandas(idx2) if isinstance(idx2, pd.Index) else idx2 - - actual = idx1.intersection(idx2, sort=sort) - - assert_eq(expected, actual, exact=False) + with cudf.option_context("mode.pandas_compatible", pandas_compatible): + idx1 = cudf.from_pandas(idx1) if isinstance(idx1, pd.Index) else idx1 + idx2 = cudf.from_pandas(idx2) if isinstance(idx2, pd.Index) else idx2 + + actual = idx1.intersection(idx2, sort=sort) + + # TODO: Resolve the bool vs ints mixed issue + # once pandas has a direction on this issue + # https://github.com/pandas-dev/pandas/issues/44000 + assert_eq( + expected, + actual, + exact=False + if (is_bool_dtype(idx1.dtype) and not is_bool_dtype(idx2.dtype)) + or (not is_bool_dtype(idx1.dtype) or is_bool_dtype(idx2.dtype)) + else True, + ) @pytest.mark.parametrize( diff --git a/python/cudf/cudf/utils/dtypes.py b/python/cudf/cudf/utils/dtypes.py index e50457b8e7b..1b94db75340 100644 --- a/python/cudf/cudf/utils/dtypes.py +++ b/python/cudf/cudf/utils/dtypes.py @@ -634,6 +634,16 @@ def find_common_type(dtypes): return cudf.dtype(common_dtype) +def _dtype_pandas_compatible(dtype): + """ + A utility function, that returns `str` instead of `object` + dtype when pandas comptibility mode is enabled. + """ + if cudf.get_option("mode.pandas_compatible") and dtype == cudf.dtype("O"): + return "str" + return dtype + + def _can_cast(from_dtype, to_dtype): """ Utility function to determine if we can cast From 72c958380f42dac5bd04492043cfd569fdcd5f0a Mon Sep 17 00:00:00 2001 From: Bradley Dice Date: Tue, 12 Sep 2023 09:39:15 -0500 Subject: [PATCH 059/150] Add fallback matrix for nvcomp. (#14082) Some platforms (such as aarch64 + CUDA 12) don't have a matching matrix entry for nvcomp. This PR adds a fallback matrix entry so it is possible to attempt local development on aarch64 with CUDA 12. Authors: - Bradley Dice (https://github.com/bdice) Approvers: - AJ Schmidt (https://github.com/ajschmidt8) URL: https://github.com/rapidsai/cudf/pull/14082 --- dependencies.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/dependencies.yaml b/dependencies.yaml index f99b7404854..398ae193fe6 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -241,6 +241,10 @@ dependencies: cuda: "11.8" packages: - *nvcomp + # TODO: Fallback matrix for aarch64 CUDA 12. After migrating to nvcomp 3, + # all CUDA/arch combinations should be supported by existing packages. + - matrix: + packages: build_wheels: common: - output_types: pyproject From 258e0fef942b734af24adf612b7998cb5da523c5 Mon Sep 17 00:00:00 2001 From: Andy Grove Date: Tue, 12 Sep 2023 15:09:22 -0600 Subject: [PATCH 060/150] [Java] Add recoverWithNull to JSONOptions and pass to Table.readJSON (#14078) This PR exposes the recently added `json_reader_options_builder::recovery_mode` option in the JNI layer. closes #14073 Authors: - Andy Grove (https://github.com/andygrove) - Nghia Truong (https://github.com/ttnghia) Approvers: - Gera Shegalov (https://github.com/gerashegalov) - Robert (Bobby) Evans (https://github.com/revans2) - Raza Jafri (https://github.com/razajafri) - Nghia Truong (https://github.com/ttnghia) URL: https://github.com/rapidsai/cudf/pull/14078 --- .../main/java/ai/rapids/cudf/JSONOptions.java | 25 +++++++++++++- java/src/main/java/ai/rapids/cudf/Table.java | 12 ++++--- java/src/main/native/src/TableJni.cpp | 18 +++++++--- .../test/java/ai/rapids/cudf/TableTest.java | 34 +++++++++++++++++++ .../resources/people_with_invalid_lines.json | 4 +++ 5 files changed, 83 insertions(+), 10 deletions(-) create mode 100644 java/src/test/resources/people_with_invalid_lines.json diff --git a/java/src/main/java/ai/rapids/cudf/JSONOptions.java b/java/src/main/java/ai/rapids/cudf/JSONOptions.java index 85a9eb7beb3..f98687df5fa 100644 --- a/java/src/main/java/ai/rapids/cudf/JSONOptions.java +++ b/java/src/main/java/ai/rapids/cudf/JSONOptions.java @@ -1,6 +1,6 @@ /* * - * Copyright (c) 2019-2022, NVIDIA CORPORATION. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -29,11 +29,13 @@ public final class JSONOptions extends ColumnFilterOptions { private final boolean dayFirst; private final boolean lines; + private final boolean recoverWithNull; private JSONOptions(Builder builder) { super(builder); dayFirst = builder.dayFirst; lines = builder.lines; + recoverWithNull = builder.recoverWithNull; } public boolean isDayFirst() { @@ -44,6 +46,11 @@ public boolean isLines() { return lines; } + /** Return the value of the recoverWithNull option */ + public boolean isRecoverWithNull() { + return recoverWithNull; + } + @Override String[] getIncludeColumnNames() { throw new UnsupportedOperationException("JSON reader didn't support column prune"); @@ -57,6 +64,8 @@ public static final class Builder extends ColumnFilterOptions.Builder= 0 && offset < buffer.length; return new TableWithMeta(readAndInferJSON(buffer.getAddress() + offset, len, - opts.isDayFirst(), opts.isLines())); + opts.isDayFirst(), opts.isLines(), opts.isRecoverWithNull())); } /** @@ -1121,7 +1122,8 @@ public static Table readJSON(Schema schema, JSONOptions opts, HostMemoryBuffer b assert offset >= 0 && offset < buffer.length; try (TableWithMeta twm = new TableWithMeta(readJSON(schema.getColumnNames(), schema.getTypeIds(), schema.getTypeScales(), null, - buffer.getAddress() + offset, len, opts.isDayFirst(), opts.isLines()))) { + buffer.getAddress() + offset, len, opts.isDayFirst(), opts.isLines(), + opts.isRecoverWithNull()))) { return gatherJSONColumns(schema, twm); } } diff --git a/java/src/main/native/src/TableJni.cpp b/java/src/main/native/src/TableJni.cpp index b05fc9b7bc4..b208ef8f381 100644 --- a/java/src/main/native/src/TableJni.cpp +++ b/java/src/main/native/src/TableJni.cpp @@ -1331,7 +1331,8 @@ JNIEXPORT void JNICALL Java_ai_rapids_cudf_Table_endWriteCSVToBuffer(JNIEnv *env } JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_readAndInferJSON( - JNIEnv *env, jclass, jlong buffer, jlong buffer_length, jboolean day_first, jboolean lines) { + JNIEnv *env, jclass, jlong buffer, jlong buffer_length, jboolean day_first, jboolean lines, + jboolean recover_with_null) { JNI_NULL_CHECK(env, buffer, "buffer cannot be null", 0); if (buffer_length <= 0) { @@ -1344,9 +1345,13 @@ JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_readAndInferJSON( auto source = cudf::io::source_info{reinterpret_cast(buffer), static_cast(buffer_length)}; + auto const recovery_mode = recover_with_null ? + cudf::io::json_recovery_mode_t::RECOVER_WITH_NULL : + cudf::io::json_recovery_mode_t::FAIL; cudf::io::json_reader_options_builder opts = cudf::io::json_reader_options::builder(source) .dayfirst(static_cast(day_first)) - .lines(static_cast(lines)); + .lines(static_cast(lines)) + .recovery_mode(recovery_mode); auto result = std::make_unique(cudf::io::read_json(opts.build())); @@ -1404,7 +1409,8 @@ JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_TableWithMeta_releaseTable(JNIE JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_readJSON( JNIEnv *env, jclass, jobjectArray col_names, jintArray j_types, jintArray j_scales, - jstring inputfilepath, jlong buffer, jlong buffer_length, jboolean day_first, jboolean lines) { + jstring inputfilepath, jlong buffer, jlong buffer_length, jboolean day_first, jboolean lines, + jboolean recover_with_null) { bool read_buffer = true; if (buffer == 0) { @@ -1448,9 +1454,13 @@ JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_readJSON( static_cast(buffer_length)} : cudf::io::source_info{filename.get()}; + cudf::io::json_recovery_mode_t recovery_mode = + recover_with_null ? cudf::io::json_recovery_mode_t::RECOVER_WITH_NULL : + cudf::io::json_recovery_mode_t::FAIL; cudf::io::json_reader_options_builder opts = cudf::io::json_reader_options::builder(source) .dayfirst(static_cast(day_first)) - .lines(static_cast(lines)); + .lines(static_cast(lines)) + .recovery_mode(recovery_mode); if (!n_col_names.is_null() && data_types.size() > 0) { if (n_col_names.size() != n_types.size()) { diff --git a/java/src/test/java/ai/rapids/cudf/TableTest.java b/java/src/test/java/ai/rapids/cudf/TableTest.java index 3740328615a..59f0d180c6e 100644 --- a/java/src/test/java/ai/rapids/cudf/TableTest.java +++ b/java/src/test/java/ai/rapids/cudf/TableTest.java @@ -86,6 +86,7 @@ public class TableTest extends CudfTestBase { private static final File TEST_ALL_TYPES_PLAIN_AVRO_FILE = TestUtils.getResourceAsFile("alltypes_plain.avro"); private static final File TEST_SIMPLE_CSV_FILE = TestUtils.getResourceAsFile("simple.csv"); private static final File TEST_SIMPLE_JSON_FILE = TestUtils.getResourceAsFile("people.json"); + private static final File TEST_JSON_ERROR_FILE = TestUtils.getResourceAsFile("people_with_invalid_lines.json"); private static final Schema CSV_DATA_BUFFER_SCHEMA = Schema.builder() .column(DType.INT32, "A") @@ -326,6 +327,39 @@ void testReadJSONFile() { } } + @Test + void testReadJSONFileWithInvalidLines() { + Schema schema = Schema.builder() + .column(DType.STRING, "name") + .column(DType.INT32, "age") + .build(); + + // test with recoverWithNulls=true + { + JSONOptions opts = JSONOptions.builder() + .withLines(true) + .withRecoverWithNull(true) + .build(); + try (Table expected = new Table.TestBuilder() + .column("Michael", "Andy", null, "Justin") + .column(null, 30, null, 19) + .build(); + Table table = Table.readJSON(schema, opts, TEST_JSON_ERROR_FILE)) { + assertTablesAreEqual(expected, table); + } + } + + // test with recoverWithNulls=false + { + JSONOptions opts = JSONOptions.builder() + .withLines(true) + .withRecoverWithNull(false) + .build(); + assertThrows(CudfException.class, () -> + Table.readJSON(schema, opts, TEST_JSON_ERROR_FILE)); + } + } + @Test void testReadJSONFileWithDifferentColumnOrder() { Schema schema = Schema.builder() diff --git a/java/src/test/resources/people_with_invalid_lines.json b/java/src/test/resources/people_with_invalid_lines.json new file mode 100644 index 00000000000..a99592e3eca --- /dev/null +++ b/java/src/test/resources/people_with_invalid_lines.json @@ -0,0 +1,4 @@ +{"name":"Michael"} +{"name":"Andy", "age":30} +this_line_is_not_valid +{"name":"Justin", "age":19} From 3be772fc5560127ff0ba6ad99d1cf618176e57fd Mon Sep 17 00:00:00 2001 From: Ed Seidl Date: Tue, 12 Sep 2023 17:26:49 -0700 Subject: [PATCH 061/150] Global stream pool (#13922) #13637 added a static stream pool object for use by the Parquet reader. This PR expands upon that by: - Moving the stream pool to the `cudf::detail` namespace. - Adding a debugging implementation that always returns the default stream. - Hiding implementation details behind a more streamlined interface. - Using cuda events for synchronization. Authors: - Ed Seidl (https://github.com/etseidl) - Vukasin Milovanovic (https://github.com/vuule) - Mark Harris (https://github.com/harrism) Approvers: - Bradley Dice (https://github.com/bdice) - Vukasin Milovanovic (https://github.com/vuule) - Mark Harris (https://github.com/harrism) URL: https://github.com/rapidsai/cudf/pull/13922 --- cpp/CMakeLists.txt | 1 + .../cudf/detail/utilities/stream_pool.hpp | 64 +++++ cpp/src/io/parquet/reader_impl.cpp | 43 +-- cpp/src/io/text/multibyte_split.cu | 48 +--- cpp/src/utilities/stream_pool.cpp | 256 ++++++++++++++++++ 5 files changed, 341 insertions(+), 71 deletions(-) create mode 100644 cpp/include/cudf/detail/utilities/stream_pool.hpp create mode 100644 cpp/src/utilities/stream_pool.cpp diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index 516865e5782..c37d05a21c7 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -633,6 +633,7 @@ add_library( src/utilities/linked_column.cpp src/utilities/logger.cpp src/utilities/stacktrace.cpp + src/utilities/stream_pool.cpp src/utilities/traits.cpp src/utilities/type_checks.cpp src/utilities/type_dispatcher.cpp diff --git a/cpp/include/cudf/detail/utilities/stream_pool.hpp b/cpp/include/cudf/detail/utilities/stream_pool.hpp new file mode 100644 index 00000000000..95384a9d73e --- /dev/null +++ b/cpp/include/cudf/detail/utilities/stream_pool.hpp @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#include + +#include +#include + +namespace cudf::detail { + +/** + * @brief Acquire a set of `cuda_stream_view` objects and synchronize them to an event on another + * stream. + * + * By default an underlying `rmm::cuda_stream_pool` is used to obtain the streams. The only other + * implementation at present is a debugging version that always returns the stream returned by + * `cudf::get_default_stream()`. To use this debugging version, set the environment variable + * `LIBCUDF_USE_DEBUG_STREAM_POOL`. + * + * Example usage: + * @code{.cpp} + * auto stream = cudf::get_default_stream(); + * auto const num_streams = 2; + * // do work on stream + * // allocate streams and wait for an event on stream before executing on any of streams + * auto streams = cudf::detail::fork_stream(stream, num_streams); + * // do work on streams[0] and streams[1] + * // wait for event on streams before continuing to do work on stream + * cudf::detail::join_streams(streams, stream); + * @endcode + * + * @param stream Stream that the returned streams will wait on. + * @param count The number of `cuda_stream_view` objects to return. + * @return Vector containing `count` stream views. + */ +[[nodiscard]] std::vector fork_streams(rmm::cuda_stream_view stream, + std::size_t count); + +/** + * @brief Synchronize a stream to an event on a set of streams. + * + * @param streams Streams to wait on. + * @param stream Joined stream that synchronizes with the waited-on streams. + */ +void join_streams(host_span streams, rmm::cuda_stream_view stream); + +} // namespace cudf::detail diff --git a/cpp/src/io/parquet/reader_impl.cpp b/cpp/src/io/parquet/reader_impl.cpp index 8a73c43be3e..8b0a0bd4eb0 100644 --- a/cpp/src/io/parquet/reader_impl.cpp +++ b/cpp/src/io/parquet/reader_impl.cpp @@ -18,31 +18,15 @@ #include #include +#include #include #include +#include #include namespace cudf::io::detail::parquet { -namespace { - -int constexpr NUM_DECODERS = 3; // how many decode kernels are there to run -int constexpr APPROX_NUM_THREADS = 4; // guestimate from DaveB -int constexpr STREAM_POOL_SIZE = NUM_DECODERS * APPROX_NUM_THREADS; - -auto& get_stream_pool() -{ - // TODO: creating this on the heap because there were issues with trying to call the - // stream pool destructor during cuda shutdown that lead to a segmentation fault in - // nvbench. this allocation is being deliberately leaked to avoid the above, but still - // results in non-fatal warnings when running nvbench in cuda-gdb. - static auto pool = new rmm::cuda_stream_pool{STREAM_POOL_SIZE}; - return *pool; -} - -} // namespace - void reader::impl::decode_page_data(size_t skip_rows, size_t num_rows) { auto& chunks = _file_itm_data.chunks; @@ -178,34 +162,33 @@ void reader::impl::decode_page_data(size_t skip_rows, size_t num_rows) chunks.host_to_device_async(_stream); chunk_nested_valids.host_to_device_async(_stream); chunk_nested_data.host_to_device_async(_stream); - _stream.synchronize(); - auto const level_type_size = _file_itm_data.level_type_size; + // get the number of streams we need from the pool and tell them to wait on the H2D copies + int const nkernels = std::bitset<32>(kernel_mask).count(); + auto streams = cudf::detail::fork_streams(_stream, nkernels); - // vector of launched streams - std::vector streams; + auto const level_type_size = _file_itm_data.level_type_size; // launch string decoder + int s_idx = 0; if (has_strings) { - streams.push_back(get_stream_pool().get_stream()); - chunk_nested_str_data.host_to_device_async(streams.back()); - gpu::DecodeStringPageData(pages, chunks, num_rows, skip_rows, level_type_size, streams.back()); + auto& stream = streams[s_idx++]; + chunk_nested_str_data.host_to_device_async(stream); + gpu::DecodeStringPageData(pages, chunks, num_rows, skip_rows, level_type_size, stream); } // launch delta binary decoder if ((kernel_mask & gpu::KERNEL_MASK_DELTA_BINARY) != 0) { - streams.push_back(get_stream_pool().get_stream()); - gpu::DecodeDeltaBinary(pages, chunks, num_rows, skip_rows, level_type_size, streams.back()); + gpu::DecodeDeltaBinary(pages, chunks, num_rows, skip_rows, level_type_size, streams[s_idx++]); } // launch the catch-all page decoder if ((kernel_mask & gpu::KERNEL_MASK_GENERAL) != 0) { - streams.push_back(get_stream_pool().get_stream()); - gpu::DecodePageData(pages, chunks, num_rows, skip_rows, level_type_size, streams.back()); + gpu::DecodePageData(pages, chunks, num_rows, skip_rows, level_type_size, streams[s_idx++]); } // synchronize the streams - std::for_each(streams.begin(), streams.end(), [](auto& stream) { stream.synchronize(); }); + cudf::detail::join_streams(streams, _stream); pages.device_to_host_async(_stream); page_nesting.device_to_host_async(_stream); diff --git a/cpp/src/io/text/multibyte_split.cu b/cpp/src/io/text/multibyte_split.cu index 818bbc0a18a..772bcad8ada 100644 --- a/cpp/src/io/text/multibyte_split.cu +++ b/cpp/src/io/text/multibyte_split.cu @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -32,7 +33,6 @@ #include #include -#include #include #include #include @@ -301,44 +301,12 @@ namespace io { namespace text { namespace detail { -void fork_stream(std::vector streams, rmm::cuda_stream_view stream) -{ - cudaEvent_t event; - CUDF_CUDA_TRY(cudaEventCreate(&event)); - CUDF_CUDA_TRY(cudaEventRecord(event, stream)); - for (uint32_t i = 0; i < streams.size(); i++) { - CUDF_CUDA_TRY(cudaStreamWaitEvent(streams[i], event, 0)); - } - CUDF_CUDA_TRY(cudaEventDestroy(event)); -} - -void join_stream(std::vector streams, rmm::cuda_stream_view stream) -{ - cudaEvent_t event; - CUDF_CUDA_TRY(cudaEventCreate(&event)); - for (uint32_t i = 0; i < streams.size(); i++) { - CUDF_CUDA_TRY(cudaEventRecord(event, streams[i])); - CUDF_CUDA_TRY(cudaStreamWaitEvent(stream, event, 0)); - } - CUDF_CUDA_TRY(cudaEventDestroy(event)); -} - -std::vector get_streams(int32_t count, rmm::cuda_stream_pool& stream_pool) -{ - auto streams = std::vector(); - for (int32_t i = 0; i < count; i++) { - streams.emplace_back(stream_pool.get_stream()); - } - return streams; -} - std::unique_ptr multibyte_split(cudf::io::text::data_chunk_source const& source, std::string const& delimiter, byte_range_info byte_range, bool strip_delimiters, rmm::cuda_stream_view stream, - rmm::mr::device_memory_resource* mr, - rmm::cuda_stream_pool& stream_pool) + rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); @@ -365,8 +333,7 @@ std::unique_ptr multibyte_split(cudf::io::text::data_chunk_source CUDF_EXPECTS(delimiter.size() < multistate::max_segment_value, "delimiter contains too many total tokens to produce a deterministic result."); - auto concurrency = 2; - auto streams = get_streams(concurrency, stream_pool); + auto const concurrency = 2; // must be at least 32 when using warp-reduce on partials // must be at least 1 more than max possible concurrent tiles @@ -411,7 +378,7 @@ std::unique_ptr multibyte_split(cudf::io::text::data_chunk_source output_builder row_offset_storage(ITEMS_PER_CHUNK, max_growth, stream); output_builder char_storage(ITEMS_PER_CHUNK, max_growth, stream); - fork_stream(streams, stream); + auto streams = cudf::detail::fork_streams(stream, concurrency); cudaEvent_t last_launch_event; CUDF_CUDA_TRY(cudaEventCreate(&last_launch_event)); @@ -532,7 +499,7 @@ std::unique_ptr multibyte_split(cudf::io::text::data_chunk_source CUDF_CUDA_TRY(cudaEventDestroy(last_launch_event)); - join_stream(streams, stream); + cudf::detail::join_streams(streams, stream); // if the input was empty, we didn't find a delimiter at all, // or the first delimiter was also the last: empty output @@ -602,11 +569,10 @@ std::unique_ptr multibyte_split(cudf::io::text::data_chunk_source parse_options options, rmm::mr::device_memory_resource* mr) { - auto stream = cudf::get_default_stream(); - auto stream_pool = rmm::cuda_stream_pool(2); + auto stream = cudf::get_default_stream(); auto result = detail::multibyte_split( - source, delimiter, options.byte_range, options.strip_delimiters, stream, mr, stream_pool); + source, delimiter, options.byte_range, options.strip_delimiters, stream, mr); return result; } diff --git a/cpp/src/utilities/stream_pool.cpp b/cpp/src/utilities/stream_pool.cpp new file mode 100644 index 00000000000..b3b20889ef8 --- /dev/null +++ b/cpp/src/utilities/stream_pool.cpp @@ -0,0 +1,256 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +namespace cudf::detail { + +namespace { + +// TODO: what is a good number here. what's the penalty for making it larger? +// Dave Baranec rule of thumb was max_streams_needed * num_concurrent_threads, +// where num_concurrent_threads was estimated to be 4. so using 32 will allow +// for 8 streams per thread, which should be plenty (decoding will be up to 4 +// kernels when delta_byte_array decoding is added). rmm::cuda_stream_pool +// defaults to 16. +std::size_t constexpr STREAM_POOL_SIZE = 32; + +// FIXME: "borrowed" from rmm...remove when this stream pool is moved there +#ifdef NDEBUG +#define CUDF_ASSERT_CUDA_SUCCESS(_call) \ + do { \ + (_call); \ + } while (0); +#else +#define CUDF_ASSERT_CUDA_SUCCESS(_call) \ + do { \ + cudaError_t const status__ = (_call); \ + if (status__ != cudaSuccess) { \ + std::cerr << "CUDA Error detected. " << cudaGetErrorName(status__) << " " \ + << cudaGetErrorString(status__) << std::endl; \ + } \ + /* NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) */ \ + assert(status__ == cudaSuccess); \ + } while (0) +#endif + +class cuda_stream_pool { + public: + // matching type used in rmm::cuda_stream_pool::get_stream(stream_id) + using stream_id_type = std::size_t; + + virtual ~cuda_stream_pool() = default; + + /** + * @brief Get a `cuda_stream_view` of a stream in the pool. + * + * This function is thread safe with respect to other calls to the same function. + * + * @return Stream view. + */ + virtual rmm::cuda_stream_view get_stream() = 0; + + /** + * @brief Get a `cuda_stream_view` of the stream associated with `stream_id`. + * + * Equivalent values of `stream_id` return a `cuda_stream_view` to the same underlying stream. + * This function is thread safe with respect to other calls to the same function. + * + * @param stream_id Unique identifier for the desired stream + * @return Requested stream view. + */ + virtual rmm::cuda_stream_view get_stream(stream_id_type stream_id) = 0; + + /** + * @brief Get a set of `cuda_stream_view` objects from the pool. + * + * An attempt is made to ensure that the returned vector does not contain duplicate + * streams, but this cannot be guaranteed if `count` is greater than the value returned by + * `get_stream_pool_size()`. + * + * This function is thread safe with respect to other calls to the same function. + * + * @param count The number of stream views to return. + * @return Vector containing `count` stream views. + */ + virtual std::vector get_streams(std::size_t count) = 0; + + /** + * @brief Get the number of stream objects in the pool. + * + * This function is thread safe with respect to other calls to the same function. + * + * @return the number of stream objects in the pool + */ + virtual std::size_t get_stream_pool_size() const = 0; +}; + +/** + * @brief Implementation of `cuda_stream_pool` that wraps an `rmm::cuda_stram_pool`. + */ +class rmm_cuda_stream_pool : public cuda_stream_pool { + rmm::cuda_stream_pool _pool; + + public: + rmm_cuda_stream_pool() : _pool{STREAM_POOL_SIZE} {} + rmm::cuda_stream_view get_stream() override { return _pool.get_stream(); } + rmm::cuda_stream_view get_stream(stream_id_type stream_id) override + { + return _pool.get_stream(stream_id); + } + + std::vector get_streams(std::size_t count) override + { + if (count > STREAM_POOL_SIZE) { + CUDF_LOG_WARN("get_streams called with count ({}) > pool size ({})", count, STREAM_POOL_SIZE); + } + auto streams = std::vector(); + for (uint32_t i = 0; i < count; i++) { + streams.emplace_back(_pool.get_stream()); + } + return streams; + } + + std::size_t get_stream_pool_size() const override { return STREAM_POOL_SIZE; } +}; + +/** + * @brief Implementation of `cuda_stream_pool` that always returns `cudf::get_default_stream()` + */ +class debug_cuda_stream_pool : public cuda_stream_pool { + public: + rmm::cuda_stream_view get_stream() override { return cudf::get_default_stream(); } + rmm::cuda_stream_view get_stream(stream_id_type stream_id) override + { + return cudf::get_default_stream(); + } + + std::vector get_streams(std::size_t count) override + { + return std::vector(count, cudf::get_default_stream()); + } + + std::size_t get_stream_pool_size() const override { return 1UL; } +}; + +/** + * @brief Initialize global stream pool. + */ +cuda_stream_pool* create_global_cuda_stream_pool() +{ + if (getenv("LIBCUDF_USE_DEBUG_STREAM_POOL")) return new debug_cuda_stream_pool(); + + return new rmm_cuda_stream_pool(); +} + +// FIXME: these will be available in rmm soon +inline int get_num_cuda_devices() +{ + rmm::cuda_device_id::value_type num_dev{}; + CUDF_CUDA_TRY(cudaGetDeviceCount(&num_dev)); + return num_dev; +} + +rmm::cuda_device_id get_current_cuda_device() +{ + int device_id; + CUDF_CUDA_TRY(cudaGetDevice(&device_id)); + return rmm::cuda_device_id{device_id}; +} + +/** + * @brief RAII struct to wrap a cuda event and ensure its proper destruction. + */ +struct cuda_event { + cuda_event() { CUDF_CUDA_TRY(cudaEventCreateWithFlags(&e_, cudaEventDisableTiming)); } + virtual ~cuda_event() { CUDF_ASSERT_CUDA_SUCCESS(cudaEventDestroy(e_)); } + + operator cudaEvent_t() { return e_; } + + private: + cudaEvent_t e_; +}; + +/** + * @brief Returns a cudaEvent_t for the current thread. + * + * The returned event is valid for the current device. + * + * @return A cudaEvent_t unique to the current thread and valid on the current device. + */ +cudaEvent_t event_for_thread() +{ + thread_local std::vector> thread_events(get_num_cuda_devices()); + auto const device_id = get_current_cuda_device(); + if (not thread_events[device_id.value()]) { + thread_events[device_id.value()] = std::make_unique(); + } + return *thread_events[device_id.value()]; +} + +/** + * @brief Returns a reference to the global stream pool for the current device. + * @return `cuda_stream_pool` valid on the current device. + */ +cuda_stream_pool& global_cuda_stream_pool() +{ + // using bare pointers here to deliberately allow them to leak. otherwise we wind up with + // seg faults trying to destroy stream objects after the context has shut down. + static std::vector pools(get_num_cuda_devices()); + static std::mutex mutex; + auto const device_id = get_current_cuda_device(); + + std::lock_guard lock(mutex); + if (pools[device_id.value()] == nullptr) { + pools[device_id.value()] = create_global_cuda_stream_pool(); + } + return *pools[device_id.value()]; +} + +} // anonymous namespace + +std::vector fork_streams(rmm::cuda_stream_view stream, std::size_t count) +{ + auto const streams = global_cuda_stream_pool().get_streams(count); + auto const event = event_for_thread(); + CUDF_CUDA_TRY(cudaEventRecord(event, stream)); + std::for_each(streams.begin(), streams.end(), [&](auto& strm) { + CUDF_CUDA_TRY(cudaStreamWaitEvent(strm, event, 0)); + }); + return streams; +} + +void join_streams(host_span streams, rmm::cuda_stream_view stream) +{ + auto const event = event_for_thread(); + std::for_each(streams.begin(), streams.end(), [&](auto& strm) { + CUDF_CUDA_TRY(cudaEventRecord(event, strm)); + CUDF_CUDA_TRY(cudaStreamWaitEvent(stream, event, 0)); + }); +} + +} // namespace cudf::detail From c13b78309cc9f07ffde7e4794fdc04cb0a90a1ab Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Tue, 12 Sep 2023 20:19:59 -1000 Subject: [PATCH 062/150] Validate ignore_index type in drop_duplicates (#14098) Currently allows odd behavior like ```python In [1]: import cudf In [4]: df = cudf.DataFrame({"a": [1, 2, 1, 3]}) In [6]: df.drop_duplicates(ignore_index="True") Out[6]: a 0 1 1 2 2 3 ``` Authors: - Matthew Roeschke (https://github.com/mroeschke) Approvers: - GALI PREM SAGAR (https://github.com/galipremsagar) URL: https://github.com/rapidsai/cudf/pull/14098 --- python/cudf/cudf/core/indexed_frame.py | 5 +++++ python/cudf/cudf/tests/test_duplicates.py | 6 ++++++ 2 files changed, 11 insertions(+) diff --git a/python/cudf/cudf/core/indexed_frame.py b/python/cudf/cudf/core/indexed_frame.py index 69b25c51a66..518262ae926 100644 --- a/python/cudf/cudf/core/indexed_frame.py +++ b/python/cudf/cudf/core/indexed_frame.py @@ -1961,6 +1961,11 @@ def drop_duplicates( ignore_index: bool, default False If True, the resulting axis will be labeled 0, 1, ..., n - 1. """ + if not isinstance(ignore_index, (np.bool_, bool)): + raise ValueError( + f"{ignore_index=} must be bool, " + f"not {type(ignore_index).__name__}" + ) subset = self._preprocess_subset(subset) subset_cols = [name for name in self._column_names if name in subset] if len(subset_cols) == 0: diff --git a/python/cudf/cudf/tests/test_duplicates.py b/python/cudf/cudf/tests/test_duplicates.py index 8a83ec150bc..f77e7b4d775 100644 --- a/python/cudf/cudf/tests/test_duplicates.py +++ b/python/cudf/cudf/tests/test_duplicates.py @@ -623,3 +623,9 @@ def test_drop_duplicates_multi_index(): gdf[col].drop_duplicates().to_pandas(), pdf[col].drop_duplicates(), ) + + +def test_drop_duplicates_ignore_index_wrong_type(): + gdf = cudf.DataFrame([1, 1, 2]) + with pytest.raises(ValueError): + gdf.drop_duplicates(ignore_index="True") From 99c77111a20a2aea849d234ebe4c36171dc885fc Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Wed, 13 Sep 2023 01:31:35 -0500 Subject: [PATCH 063/150] Add support for `__round__` in `Series` and `DataFrame` (#14099) Fixes: #14083 This PR fixes builtin function `round` call on `DataFrame` & `Series` to work by implementing `__round__` method. Authors: - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cudf/pull/14099 --- python/cudf/cudf/core/indexed_frame.py | 6 +++++ python/cudf/cudf/tests/test_dataframe.py | 23 +++++++++++++++++++ python/cudf/cudf/tests/test_series.py | 29 ++++++++++++++++++++++++ 3 files changed, 58 insertions(+) diff --git a/python/cudf/cudf/core/indexed_frame.py b/python/cudf/cudf/core/indexed_frame.py index 518262ae926..62e091b29b5 100644 --- a/python/cudf/cudf/core/indexed_frame.py +++ b/python/cudf/cudf/core/indexed_frame.py @@ -358,6 +358,12 @@ def _from_columns_like_self( override_dtypes=override_dtypes, ) + def __round__(self, digits=0): + # Shouldn't be added to BinaryOperand + # because pandas Index doesn't implement + # this method. + return self.round(decimals=digits) + def _mimic_inplace( self, result: Self, inplace: bool = False ) -> Optional[Self]: diff --git a/python/cudf/cudf/tests/test_dataframe.py b/python/cudf/cudf/tests/test_dataframe.py index 44d0b9249d0..61372bab3ad 100644 --- a/python/cudf/cudf/tests/test_dataframe.py +++ b/python/cudf/cudf/tests/test_dataframe.py @@ -10326,3 +10326,26 @@ def test_dataframe_nlargest_nsmallest_str_error(attr): ([], {"n": 1, "columns": ["a", "b"]}), ([], {"n": 1, "columns": ["a", "b"]}), ) + + +@pytest.mark.parametrize("digits", [0, 1, 3, 4, 10]) +def test_dataframe_round_builtin(digits): + pdf = pd.DataFrame( + { + "a": [1.2234242333234, 323432.3243423, np.nan], + "b": ["a", "b", "c"], + "c": pd.Series([34224, 324324, 324342], dtype="datetime64[ns]"), + "d": pd.Series([224.242, None, 2424.234324], dtype="category"), + "e": [ + decimal.Decimal("342.3243234234242"), + decimal.Decimal("89.32432497687622"), + None, + ], + } + ) + gdf = cudf.from_pandas(pdf, nan_as_null=False) + + expected = round(pdf, digits) + actual = round(gdf, digits) + + assert_eq(expected, actual) diff --git a/python/cudf/cudf/tests/test_series.py b/python/cudf/cudf/tests/test_series.py index 8a652caa6e2..798809b0ada 100644 --- a/python/cudf/cudf/tests/test_series.py +++ b/python/cudf/cudf/tests/test_series.py @@ -1,5 +1,6 @@ # Copyright (c) 2020-2023, NVIDIA CORPORATION. +import decimal import hashlib import operator import re @@ -2282,3 +2283,31 @@ def test_series_rename(initial_name, name): expected = psr.rename(name) assert_eq(actual, expected) + + +@pytest.mark.parametrize( + "data", + [ + [1.2234242333234, 323432.3243423, np.nan], + pd.Series([34224, 324324, 324342], dtype="datetime64[ns]"), + pd.Series([224.242, None, 2424.234324], dtype="category"), + [ + decimal.Decimal("342.3243234234242"), + decimal.Decimal("89.32432497687622"), + None, + ], + ], +) +@pytest.mark.parametrize("digits", [0, 1, 3, 4, 10]) +def test_series_round_builtin(data, digits): + ps = pd.Series(data) + gs = cudf.from_pandas(ps, nan_as_null=False) + + # TODO: Remove `to_frame` workaround + # after following issue is fixed: + # https://github.com/pandas-dev/pandas/issues/55114 + expected = round(ps.to_frame(), digits)[0] + expected.name = None + actual = round(gs, digits) + + assert_eq(expected, actual) From 1668c2caac27c5c92dfeddb20271b835b36c5615 Mon Sep 17 00:00:00 2001 From: Robert Maynard Date: Wed, 13 Sep 2023 12:09:10 -0400 Subject: [PATCH 064/150] Only use memory resources that haven't been freed (#14103) Fixes #13859 Authors: - Robert Maynard (https://github.com/robertmaynard) Approvers: - Bradley Dice (https://github.com/bdice) - David Wendt (https://github.com/davidwendt) URL: https://github.com/rapidsai/cudf/pull/14103 --- cpp/include/cudf_test/base_fixture.hpp | 1 + 1 file changed, 1 insertion(+) diff --git a/cpp/include/cudf_test/base_fixture.hpp b/cpp/include/cudf_test/base_fixture.hpp index 05319e03003..b622d7c6b78 100644 --- a/cpp/include/cudf_test/base_fixture.hpp +++ b/cpp/include/cudf_test/base_fixture.hpp @@ -392,6 +392,7 @@ inline auto parse_cudf_test_opts(int argc, char** argv) auto adaptor = make_stream_checking_resource_adaptor( \ resource.get(), error_on_invalid_stream, check_default_stream); \ rmm::mr::set_current_device_resource(&adaptor); \ + return RUN_ALL_TESTS(); \ } \ \ return RUN_ALL_TESTS(); \ From 60009a8005a8b9b69c2c870465b5cf46532d3388 Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Wed, 13 Sep 2023 17:12:44 -0500 Subject: [PATCH 065/150] Fix naming issues with `Index.to_frame` and `MultiIndex.to_frame` APIs (#14105) This PR: - [x] Introduces `allow_duplicates` for parity with `MultiIndex.to_frame` - however this parameter is non-functional since cudf doesn't support duplicate column names. - [x] Fixed handling of duplicate index names in `MultiIndex.to_frame` - [x] Added proper docs for `Index.to_frame` & `MultiIndex.to_frame` separately due to change in API signature. - [x] Added tests for `Index.to_frame` & `MultiIndex.to_frame` - [x] Introduced deprecations that will go away when pandas-2.0 support is enabled. Authors: - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cudf/pull/14105 --- python/cudf/cudf/core/_base_index.py | 57 +++++++++++-- python/cudf/cudf/core/multiindex.py | 99 ++++++++++++++++++++--- python/cudf/cudf/tests/test_index.py | 19 +++++ python/cudf/cudf/tests/test_multiindex.py | 83 +++++++++++++++++++ 4 files changed, 242 insertions(+), 16 deletions(-) diff --git a/python/cudf/cudf/core/_base_index.py b/python/cudf/cudf/core/_base_index.py index 2f6e864b51c..c0bd9ec6eee 100644 --- a/python/cudf/cudf/core/_base_index.py +++ b/python/cudf/cudf/core/_base_index.py @@ -19,6 +19,7 @@ drop_nulls, ) from cudf._lib.types import size_type_dtype +from cudf.api.extensions import no_default from cudf.api.types import ( is_bool_dtype, is_integer, @@ -701,21 +702,65 @@ def fillna(self, value, downcast=None): return super().fillna(value=value) - def to_frame(self, index=True, name=None): + def to_frame(self, index=True, name=no_default): """Create a DataFrame with a column containing this Index Parameters ---------- index : boolean, default True Set the index of the returned DataFrame as the original Index - name : str, default None - Name to be used for the column + name : object, defaults to index.name + The passed name should substitute for the index name (if it has + one). + Returns ------- DataFrame - cudf DataFrame - """ - if name is not None: + DataFrame containing the original Index data. + + See Also + -------- + Index.to_series : Convert an Index to a Series. + Series.to_frame : Convert Series to DataFrame. + + Examples + -------- + >>> import cudf + >>> idx = cudf.Index(['Ant', 'Bear', 'Cow'], name='animal') + >>> idx.to_frame() + animal + animal + Ant Ant + Bear Bear + Cow Cow + + By default, the original Index is reused. To enforce a new Index: + + >>> idx.to_frame(index=False) + animal + 0 Ant + 1 Bear + 2 Cow + + To override the name of the resulting column, specify `name`: + + >>> idx.to_frame(index=False, name='zoo') + zoo + 0 Ant + 1 Bear + 2 Cow + """ + if name is None: + warnings.warn( + "Explicitly passing `name=None` currently preserves " + "the Index's name or uses a default name of 0. This " + "behaviour is deprecated, and in the future `None` " + "will be used as the name of the " + "resulting DataFrame column.", + FutureWarning, + ) + name = no_default + if name is not no_default: col_name = name elif self.name is None: col_name = 0 diff --git a/python/cudf/cudf/core/multiindex.py b/python/cudf/cudf/core/multiindex.py index bc6726879c1..21380bb841c 100644 --- a/python/cudf/cudf/core/multiindex.py +++ b/python/cudf/cudf/core/multiindex.py @@ -20,6 +20,7 @@ import cudf from cudf import _lib as libcudf from cudf._typing import DataFrameOrSeries +from cudf.api.extensions import no_default from cudf.api.types import is_integer, is_list_like, is_object_dtype from cudf.core import column from cudf.core._compat import PANDAS_GE_150 @@ -1015,7 +1016,12 @@ def __getitem__(self, index): elif isinstance(index, slice): start, stop, step = index.indices(len(self)) index = column.arange(start, stop, step) - result = MultiIndex.from_frame(self.to_frame(index=False).take(index)) + result = MultiIndex.from_frame( + self.to_frame(index=False, name=range(0, self.nlevels)).take( + index + ), + names=self.names, + ) # we are indexing into a single row of the MultiIndex, # return that row as a tuple: @@ -1026,24 +1032,95 @@ def __getitem__(self, index): result._codes = self._codes.take(index) if self._levels is not None: result._levels = self._levels - result.names = self.names return result @_cudf_nvtx_annotate - def to_frame(self, index=True, name=None): + def to_frame(self, index=True, name=no_default, allow_duplicates=False): + """ + Create a DataFrame with the levels of the MultiIndex as columns. + + Column ordering is determined by the DataFrame constructor with data as + a dict. + + Parameters + ---------- + index : bool, default True + Set the index of the returned DataFrame as the original MultiIndex. + name : list / sequence of str, optional + The passed names should substitute index level names. + allow_duplicates : bool, optional default False + Allow duplicate column labels to be created. Note + that this parameter is non-functional because + duplicates column labels aren't supported in cudf. + + Returns + ------- + DataFrame + + Examples + -------- + >>> import cudf + >>> mi = cudf.MultiIndex.from_tuples([('a', 'c'), ('b', 'd')]) + >>> mi + MultiIndex([('a', 'c'), + ('b', 'd')], + ) + + >>> df = mi.to_frame() + >>> df + 0 1 + a c a c + b d b d + + >>> df = mi.to_frame(index=False) + >>> df + 0 1 + 0 a c + 1 b d + + >>> df = mi.to_frame(name=['x', 'y']) + >>> df + x y + a c a c + b d b d + """ # TODO: Currently this function makes a shallow copy, which is # incorrect. We want to make a deep copy, otherwise further # modifications of the resulting DataFrame will affect the MultiIndex. - df = cudf.DataFrame._from_data(data=self._data) - if index: - df = df.set_index(self) - if name is not None: + if name is None: + warnings.warn( + "Explicitly passing `name=None` currently preserves the " + "Index's name or uses a default name of 0. This behaviour " + "is deprecated, and in the future `None` will be used " + "as the name of the resulting DataFrame column.", + FutureWarning, + ) + name = no_default + + if name is not no_default: if len(name) != len(self.levels): raise ValueError( "'name' should have the same length as " "number of levels on index." ) - df.columns = name + column_names = name + else: + column_names = self.names + all_none_names = None + if not ( + all_none_names := all(x is None for x in column_names) + ) and len(column_names) != len(set(column_names)): + raise ValueError("Duplicate column names are not allowed") + df = cudf.DataFrame._from_data( + data=self._data, + columns=column_names + if name is not no_default and not all_none_names + else None, + ) + + if index: + df = df.set_index(self) + return df @_cudf_nvtx_annotate @@ -1504,7 +1581,9 @@ def droplevel(self, level=-1): @_cudf_nvtx_annotate def to_pandas(self, nullable=False, **kwargs): - result = self.to_frame(index=False).to_pandas(nullable=nullable) + result = self.to_frame( + index=False, name=list(range(self.nlevels)) + ).to_pandas(nullable=nullable) return pd.MultiIndex.from_frame(result, names=self.names) @classmethod @@ -1623,7 +1702,7 @@ def _clean_nulls_from_index(self): Convert all na values(if any) in MultiIndex object to `` as a preprocessing step to `__repr__` methods. """ - index_df = self.to_frame(index=False) + index_df = self.to_frame(index=False, name=list(range(self.nlevels))) return MultiIndex.from_frame( index_df._clean_nulls_from_dataframe(index_df), names=self.names ) diff --git a/python/cudf/cudf/tests/test_index.py b/python/cudf/cudf/tests/test_index.py index 6fb615c22e0..b3791cddce3 100644 --- a/python/cudf/cudf/tests/test_index.py +++ b/python/cudf/cudf/tests/test_index.py @@ -11,6 +11,7 @@ import pytest import cudf +from cudf.api.extensions import no_default from cudf.api.types import is_bool_dtype from cudf.core._compat import PANDAS_GE_133, PANDAS_GE_200 from cudf.core.index import ( @@ -2777,3 +2778,21 @@ def test_index_empty_from_pandas(request, dtype): gidx = cudf.from_pandas(pidx) assert_eq(pidx, gidx) + + +@pytest.mark.parametrize( + "data", [[1, 2, 3], ["ab", "cd", "e", None], range(0, 10)] +) +@pytest.mark.parametrize("data_name", [None, 1, "abc"]) +@pytest.mark.parametrize("index", [True, False]) +@pytest.mark.parametrize("name", [None, no_default, 1, "abc"]) +def test_index_to_frame(data, data_name, index, name): + pidx = pd.Index(data, name=data_name) + gidx = cudf.from_pandas(pidx) + + with expect_warning_if(name is None): + expected = pidx.to_frame(index=index, name=name) + with expect_warning_if(name is None): + actual = gidx.to_frame(index=index, name=name) + + assert_eq(expected, actual) diff --git a/python/cudf/cudf/tests/test_multiindex.py b/python/cudf/cudf/tests/test_multiindex.py index 3c843ace0a8..fb2b0c07efb 100644 --- a/python/cudf/cudf/tests/test_multiindex.py +++ b/python/cudf/cudf/tests/test_multiindex.py @@ -16,6 +16,7 @@ import pytest import cudf +from cudf.api.extensions import no_default from cudf.core._compat import PANDAS_GE_200 from cudf.core.column import as_column from cudf.core.index import as_index @@ -1926,3 +1927,85 @@ def test_multiindex_to_series_error(): midx = cudf.MultiIndex.from_tuples([("a", "b")]) with pytest.raises(NotImplementedError): midx.to_series() + + +@pytest.mark.parametrize( + "pidx", + [ + pd.MultiIndex.from_arrays( + [[1, 2, 3, 4], [5, 6, 7, 10], [11, 12, 12, 13]], + names=["a", "b", "c"], + ), + pd.MultiIndex.from_arrays( + [[1, 2, 3, 4], [5, 6, 7, 10], [11, 12, 12, 13]], + names=["a", "a", "a"], + ), + pd.MultiIndex.from_arrays( + [[1, 2, 3, 4], [5, 6, 7, 10], [11, 12, 12, 13]], + ), + ], +) +@pytest.mark.parametrize( + "name", [None, no_default, ["x", "y", "z"], ["rapids", "rapids", "rapids"]] +) +@pytest.mark.parametrize("allow_duplicates", [True, False]) +@pytest.mark.parametrize("index", [True, False]) +def test_multiindex_to_frame_allow_duplicates( + pidx, name, allow_duplicates, index +): + gidx = cudf.from_pandas(pidx) + + if ( + ( + len(pidx.names) != len(set(pidx.names)) + and not all(x is None for x in pidx.names) + ) + and not allow_duplicates + and (name is None or name is no_default) + ): + assert_exceptions_equal( + pidx.to_frame, + gidx.to_frame, + lfunc_args_and_kwargs=( + [], + { + "index": index, + "name": name, + "allow_duplicates": allow_duplicates, + }, + ), + rfunc_args_and_kwargs=( + [], + { + "index": index, + "name": name, + "allow_duplicates": allow_duplicates, + }, + ), + ) + else: + if ( + len(pidx.names) != len(set(pidx.names)) + and not all(x is None for x in pidx.names) + and not isinstance(name, list) + ) or (isinstance(name, list) and len(name) != len(set(name))): + # cudf doesn't have the ability to construct dataframes + # with duplicate column names + with expect_warning_if(name is None): + with pytest.raises(ValueError): + gidx.to_frame( + index=index, + name=name, + allow_duplicates=allow_duplicates, + ) + else: + with expect_warning_if(name is None): + expected = pidx.to_frame( + index=index, name=name, allow_duplicates=allow_duplicates + ) + with expect_warning_if(name is None): + actual = gidx.to_frame( + index=index, name=name, allow_duplicates=allow_duplicates + ) + + assert_eq(expected, actual) From edfef800d98491ee61b390645548f9223bbfb049 Mon Sep 17 00:00:00 2001 From: Nghia Truong <7416935+ttnghia@users.noreply.github.com> Date: Wed, 13 Sep 2023 16:54:45 -0700 Subject: [PATCH 066/150] Refactor `hash_reduce_by_row` (#14095) This PR extracts `hash_reduce_by_row` function from `distinct_reduce.*` files. Previously, that function was designed specifically to work with `distinct` in stream compaction with `size_type` output. Now, it becomes more generic and can support more generic reduction operations and various output types. No new functionality was added. The changes in this work pave the way for implementing histogram/merge histogram aggregations, which also rely on hash-base reduction. Authors: - Nghia Truong (https://github.com/ttnghia) Approvers: - Karthikeyan (https://github.com/karthikeyann) - Yunsong Wang (https://github.com/PointKernel) - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cudf/pull/14095 --- cpp/CMakeLists.txt | 2 +- .../cudf/detail/hash_reduce_by_row.cuh | 167 ++++++++++++++++++ cpp/src/stream_compaction/distinct.cu | 28 +-- cpp/src/stream_compaction/distinct_count.cu | 4 +- cpp/src/stream_compaction/distinct_helpers.cu | 109 ++++++++++++ ...stinct_reduce.cuh => distinct_helpers.hpp} | 12 +- cpp/src/stream_compaction/distinct_reduce.cu | 150 ---------------- .../stream_compaction_common.cuh | 22 --- .../stream_compaction_common.hpp | 5 - 9 files changed, 299 insertions(+), 200 deletions(-) create mode 100644 cpp/include/cudf/detail/hash_reduce_by_row.cuh create mode 100644 cpp/src/stream_compaction/distinct_helpers.cu rename cpp/src/stream_compaction/{distinct_reduce.cuh => distinct_helpers.hpp} (92%) delete mode 100644 cpp/src/stream_compaction/distinct_reduce.cu diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index c37d05a21c7..900e9eed98e 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -530,7 +530,7 @@ add_library( src/stream_compaction/apply_boolean_mask.cu src/stream_compaction/distinct.cu src/stream_compaction/distinct_count.cu - src/stream_compaction/distinct_reduce.cu + src/stream_compaction/distinct_helpers.cu src/stream_compaction/drop_nans.cu src/stream_compaction/drop_nulls.cu src/stream_compaction/stable_distinct.cu diff --git a/cpp/include/cudf/detail/hash_reduce_by_row.cuh b/cpp/include/cudf/detail/hash_reduce_by_row.cuh new file mode 100644 index 00000000000..2d2b43f1d4a --- /dev/null +++ b/cpp/include/cudf/detail/hash_reduce_by_row.cuh @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2022-2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include + +namespace cudf::detail { + +using hash_map_type = + cuco::static_map; + +/** + * @brief The base struct for customized reduction functor to perform reduce-by-key with keys are + * rows that compared equal. + * + * TODO: We need to switch to use `static_reduction_map` when it is ready + * (https://github.com/NVIDIA/cuCollections/pull/98). + */ +template +struct reduce_by_row_fn_base { + protected: + MapView const d_map; + KeyHasher const d_hasher; + KeyEqual const d_equal; + OutputType* const d_output; + + reduce_by_row_fn_base(MapView const& d_map, + KeyHasher const& d_hasher, + KeyEqual const& d_equal, + OutputType* const d_output) + : d_map{d_map}, d_hasher{d_hasher}, d_equal{d_equal}, d_output{d_output} + { + } + + /** + * @brief Return a pointer to the output array at the given index. + * + * @param idx The access index + * @return A pointer to the given index in the output array + */ + __device__ OutputType* get_output_ptr(size_type const idx) const + { + auto const iter = d_map.find(idx, d_hasher, d_equal); + + if (iter != d_map.end()) { + // Only one (undetermined) index value of the duplicate rows could be inserted into the map. + // As such, looking up for all indices of duplicate rows always returns the same value. + auto const inserted_idx = iter->second.load(cuda::std::memory_order_relaxed); + + // All duplicate rows will have concurrent access to this same output slot. + return &d_output[inserted_idx]; + } else { + // All input `idx` values have been inserted into the map before. + // Thus, searching for an `idx` key resulting in the `end()` iterator only happens if + // `d_equal(idx, idx) == false`. + // Such situations are due to comparing nulls or NaNs which are considered as always unequal. + // In those cases, all rows containing nulls or NaNs are distinct. Just return their direct + // output slot. + return &d_output[idx]; + } + } +}; + +/** + * @brief Perform a reduction on groups of rows that are compared equal. + * + * This is essentially a reduce-by-key operation with keys are non-contiguous rows and are compared + * equal. A hash table is used to find groups of equal rows. + * + * At the beginning of the operation, the entire output array is filled with a value given by + * the `init` parameter. Then, the reduction result for each row group is written into the output + * array at the index of an unspecified row in the group. + * + * @tparam ReduceFuncBuilder The builder class that must have a `build()` method returning a + * reduction functor derived from `reduce_by_row_fn_base` + * @tparam OutputType Type of the reduction results + * @param map The auxiliary map to perform reduction + * @param preprocessed_input The preprocessed of the input rows for computing row hashing and row + * comparisons + * @param num_rows The number of all input rows + * @param has_nulls Indicate whether the input rows has any nulls at any nested levels + * @param has_nested_columns Indicates whether the input table has any nested columns + * @param nulls_equal Flag to specify whether null elements should be considered as equal + * @param nans_equal Flag to specify whether NaN values in floating point column should be + * considered equal. + * @param init The initial value for reduction of each row group + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned vector + * @return A device_uvector containing the reduction results + */ +template +rmm::device_uvector hash_reduce_by_row( + hash_map_type const& map, + std::shared_ptr const preprocessed_input, + size_type num_rows, + cudf::nullate::DYNAMIC has_nulls, + bool has_nested_columns, + null_equality nulls_equal, + nan_equality nans_equal, + ReduceFuncBuilder func_builder, + OutputType init, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + auto const map_dview = map.get_device_view(); + auto const row_hasher = cudf::experimental::row::hash::row_hasher(preprocessed_input); + auto const key_hasher = row_hasher.device_hasher(has_nulls); + auto const row_comp = cudf::experimental::row::equality::self_comparator(preprocessed_input); + + auto reduction_results = rmm::device_uvector(num_rows, stream, mr); + thrust::uninitialized_fill( + rmm::exec_policy(stream), reduction_results.begin(), reduction_results.end(), init); + + auto const reduce_by_row = [&](auto const value_comp) { + if (has_nested_columns) { + auto const key_equal = row_comp.equal_to(has_nulls, nulls_equal, value_comp); + thrust::for_each( + rmm::exec_policy(stream), + thrust::make_counting_iterator(0), + thrust::make_counting_iterator(num_rows), + func_builder.build(map_dview, key_hasher, key_equal, reduction_results.begin())); + } else { + auto const key_equal = row_comp.equal_to(has_nulls, nulls_equal, value_comp); + thrust::for_each( + rmm::exec_policy(stream), + thrust::make_counting_iterator(0), + thrust::make_counting_iterator(num_rows), + func_builder.build(map_dview, key_hasher, key_equal, reduction_results.begin())); + } + }; + + if (nans_equal == nan_equality::ALL_EQUAL) { + using nan_equal_comparator = + cudf::experimental::row::equality::nan_equal_physical_equality_comparator; + reduce_by_row(nan_equal_comparator{}); + } else { + using nan_unequal_comparator = cudf::experimental::row::equality::physical_equality_comparator; + reduce_by_row(nan_unequal_comparator{}); + } + + return reduction_results; +} + +} // namespace cudf::detail diff --git a/cpp/src/stream_compaction/distinct.cu b/cpp/src/stream_compaction/distinct.cu index cc60b2a12ea..cc1e3423d42 100644 --- a/cpp/src/stream_compaction/distinct.cu +++ b/cpp/src/stream_compaction/distinct.cu @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "distinct_reduce.cuh" +#include "distinct_helpers.hpp" #include #include @@ -50,8 +50,8 @@ rmm::device_uvector get_distinct_indices(table_view const& input, } auto map = hash_map_type{compute_hash_table_size(input.num_rows()), - cuco::empty_key{COMPACTION_EMPTY_KEY_SENTINEL}, - cuco::empty_value{COMPACTION_EMPTY_VALUE_SENTINEL}, + cuco::empty_key{-1}, + cuco::empty_value{std::numeric_limits::min()}, detail::hash_table_allocator_type{default_allocator{}, stream}, stream.value()}; @@ -61,7 +61,7 @@ rmm::device_uvector get_distinct_indices(table_view const& input, auto const has_nested_columns = cudf::detail::has_nested_columns(input); auto const row_hasher = cudf::experimental::row::hash::row_hasher(preprocessed_input); - auto const key_hasher = experimental::compaction_hash(row_hasher.device_hasher(has_nulls)); + auto const key_hasher = row_hasher.device_hasher(has_nulls); auto const row_comp = cudf::experimental::row::equality::self_comparator(preprocessed_input); @@ -96,16 +96,16 @@ rmm::device_uvector get_distinct_indices(table_view const& input, } // For other keep options, reduce by row on rows that compare equal. - auto const reduction_results = hash_reduce_by_row(map, - std::move(preprocessed_input), - input.num_rows(), - has_nulls, - has_nested_columns, - keep, - nulls_equal, - nans_equal, - stream, - rmm::mr::get_current_device_resource()); + auto const reduction_results = reduce_by_row(map, + std::move(preprocessed_input), + input.num_rows(), + has_nulls, + has_nested_columns, + keep, + nulls_equal, + nans_equal, + stream, + rmm::mr::get_current_device_resource()); // Extract the desired output indices from reduction results. auto const map_end = [&] { diff --git a/cpp/src/stream_compaction/distinct_count.cu b/cpp/src/stream_compaction/distinct_count.cu index 4bca0827efe..ac4811ad279 100644 --- a/cpp/src/stream_compaction/distinct_count.cu +++ b/cpp/src/stream_compaction/distinct_count.cu @@ -136,14 +136,14 @@ cudf::size_type distinct_count(table_view const& keys, auto const preprocessed_input = cudf::experimental::row::hash::preprocessed_table::create(keys, stream); auto const row_hasher = cudf::experimental::row::hash::row_hasher(preprocessed_input); - auto const hash_key = experimental::compaction_hash(row_hasher.device_hasher(has_nulls)); + auto const hash_key = row_hasher.device_hasher(has_nulls); auto const row_comp = cudf::experimental::row::equality::self_comparator(preprocessed_input); auto const comparator_helper = [&](auto const row_equal) { using hasher_type = decltype(hash_key); auto key_set = cuco::experimental::static_set{ cuco::experimental::extent{compute_hash_table_size(num_rows)}, - cuco::empty_key{COMPACTION_EMPTY_KEY_SENTINEL}, + cuco::empty_key{-1}, row_equal, cuco::experimental::linear_probing<1, hasher_type>{hash_key}, detail::hash_table_allocator_type{default_allocator{}, stream}, diff --git a/cpp/src/stream_compaction/distinct_helpers.cu b/cpp/src/stream_compaction/distinct_helpers.cu new file mode 100644 index 00000000000..8f36ec98f4a --- /dev/null +++ b/cpp/src/stream_compaction/distinct_helpers.cu @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2022-2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "distinct_helpers.hpp" + +#include + +namespace cudf::detail { + +namespace { +/** + * @brief The functor to find the first/last/all duplicate row for rows that compared equal. + */ +template +struct reduce_fn : reduce_by_row_fn_base { + duplicate_keep_option const keep; + + reduce_fn(MapView const& d_map, + KeyHasher const& d_hasher, + KeyEqual const& d_equal, + duplicate_keep_option const keep, + size_type* const d_output) + : reduce_by_row_fn_base{d_map, + d_hasher, + d_equal, + d_output}, + keep{keep} + { + } + + __device__ void operator()(size_type const idx) const + { + auto const out_ptr = this->get_output_ptr(idx); + + if (keep == duplicate_keep_option::KEEP_FIRST) { + // Store the smallest index of all rows that are equal. + atomicMin(out_ptr, idx); + } else if (keep == duplicate_keep_option::KEEP_LAST) { + // Store the greatest index of all rows that are equal. + atomicMax(out_ptr, idx); + } else { + // Count the number of rows in each group of rows that are compared equal. + atomicAdd(out_ptr, size_type{1}); + } + } +}; + +/** + * @brief The builder to construct an instance of `reduce_fn` functor base on the given + * value of the `duplicate_keep_option` member variable. + */ +struct reduce_func_builder { + duplicate_keep_option const keep; + + template + auto build(MapView const& d_map, + KeyHasher const& d_hasher, + KeyEqual const& d_equal, + size_type* const d_output) + { + return reduce_fn{d_map, d_hasher, d_equal, keep, d_output}; + } +}; + +} // namespace + +// This function is split from `distinct.cu` to improve compile time. +rmm::device_uvector reduce_by_row( + hash_map_type const& map, + std::shared_ptr const preprocessed_input, + size_type num_rows, + cudf::nullate::DYNAMIC has_nulls, + bool has_nested_columns, + duplicate_keep_option keep, + null_equality nulls_equal, + nan_equality nans_equal, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + CUDF_EXPECTS(keep != duplicate_keep_option::KEEP_ANY, + "This function should not be called with KEEP_ANY"); + + return hash_reduce_by_row(map, + preprocessed_input, + num_rows, + has_nulls, + has_nested_columns, + nulls_equal, + nans_equal, + reduce_func_builder{keep}, + reduction_init_value(keep), + stream, + mr); +} + +} // namespace cudf::detail diff --git a/cpp/src/stream_compaction/distinct_reduce.cuh b/cpp/src/stream_compaction/distinct_helpers.hpp similarity index 92% rename from cpp/src/stream_compaction/distinct_reduce.cuh rename to cpp/src/stream_compaction/distinct_helpers.hpp index 8ec1fa18205..b667d0b04f0 100644 --- a/cpp/src/stream_compaction/distinct_reduce.cuh +++ b/cpp/src/stream_compaction/distinct_helpers.hpp @@ -14,18 +14,14 @@ * limitations under the License. */ -#include "stream_compaction_common.cuh" +#include "stream_compaction_common.hpp" -#include #include #include #include #include #include -#include - -#include namespace cudf::detail { @@ -56,6 +52,8 @@ auto constexpr reduction_init_value(duplicate_keep_option keep) * - If `keep == KEEP_LAST`: max of row indices in the group. * - If `keep == KEEP_NONE`: count of equivalent rows (group size). * + * Note that this function is not needed when `keep == KEEP_NONE`. + * * At the beginning of the operation, the entire output array is filled with a value given by * the `reduction_init_value()` function. Then, the reduction result for each row group is written * into the output array at the index of an unspecified row in the group. @@ -68,11 +66,13 @@ auto constexpr reduction_init_value(duplicate_keep_option keep) * @param has_nested_columns Indicates whether the input table has any nested columns * @param keep The parameter to determine what type of reduction to perform * @param nulls_equal Flag to specify whether null elements should be considered as equal + * @param nans_equal Flag to specify whether NaN values in floating point column should be + * considered equal. * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned vector * @return A device_uvector containing the reduction results */ -rmm::device_uvector hash_reduce_by_row( +rmm::device_uvector reduce_by_row( hash_map_type const& map, std::shared_ptr const preprocessed_input, size_type num_rows, diff --git a/cpp/src/stream_compaction/distinct_reduce.cu b/cpp/src/stream_compaction/distinct_reduce.cu deleted file mode 100644 index 020e6a495bc..00000000000 --- a/cpp/src/stream_compaction/distinct_reduce.cu +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Copyright (c) 2022-2023, NVIDIA CORPORATION. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "distinct_reduce.cuh" - -#include -#include -#include - -namespace cudf::detail { - -namespace { -/** - * @brief A functor to perform reduce-by-key with keys are rows that compared equal. - * - * TODO: We need to switch to use `static_reduction_map` when it is ready - * (https://github.com/NVIDIA/cuCollections/pull/98). - */ -template -struct reduce_by_row_fn { - MapView const d_map; - KeyHasher const d_hasher; - KeyEqual const d_equal; - duplicate_keep_option const keep; - size_type* const d_output; - - reduce_by_row_fn(MapView const& d_map, - KeyHasher const& d_hasher, - KeyEqual const& d_equal, - duplicate_keep_option const keep, - size_type* const d_output) - : d_map{d_map}, d_hasher{d_hasher}, d_equal{d_equal}, keep{keep}, d_output{d_output} - { - } - - __device__ void operator()(size_type const idx) const - { - auto const out_ptr = get_output_ptr(idx); - - if (keep == duplicate_keep_option::KEEP_FIRST) { - // Store the smallest index of all rows that are equal. - atomicMin(out_ptr, idx); - } else if (keep == duplicate_keep_option::KEEP_LAST) { - // Store the greatest index of all rows that are equal. - atomicMax(out_ptr, idx); - } else { - // Count the number of rows in each group of rows that are compared equal. - atomicAdd(out_ptr, size_type{1}); - } - } - - private: - __device__ size_type* get_output_ptr(size_type const idx) const - { - auto const iter = d_map.find(idx, d_hasher, d_equal); - - if (iter != d_map.end()) { - // Only one index value of the duplicate rows could be inserted into the map. - // As such, looking up for all indices of duplicate rows always returns the same value. - auto const inserted_idx = iter->second.load(cuda::std::memory_order_relaxed); - - // All duplicate rows will have concurrent access to this same output slot. - return &d_output[inserted_idx]; - } else { - // All input `idx` values have been inserted into the map before. - // Thus, searching for an `idx` key resulting in the `end()` iterator only happens if - // `d_equal(idx, idx) == false`. - // Such situations are due to comparing nulls or NaNs which are considered as always unequal. - // In those cases, all rows containing nulls or NaNs are distinct. Just return their direct - // output slot. - return &d_output[idx]; - } - } -}; - -} // namespace - -rmm::device_uvector hash_reduce_by_row( - hash_map_type const& map, - std::shared_ptr const preprocessed_input, - size_type num_rows, - cudf::nullate::DYNAMIC has_nulls, - bool has_nested_columns, - duplicate_keep_option keep, - null_equality nulls_equal, - nan_equality nans_equal, - rmm::cuda_stream_view stream, - rmm::mr::device_memory_resource* mr) -{ - CUDF_EXPECTS(keep != duplicate_keep_option::KEEP_ANY, - "This function should not be called with KEEP_ANY"); - - auto reduction_results = rmm::device_uvector(num_rows, stream, mr); - - thrust::uninitialized_fill(rmm::exec_policy(stream), - reduction_results.begin(), - reduction_results.end(), - reduction_init_value(keep)); - - auto const row_hasher = cudf::experimental::row::hash::row_hasher(preprocessed_input); - auto const key_hasher = experimental::compaction_hash(row_hasher.device_hasher(has_nulls)); - - auto const row_comp = cudf::experimental::row::equality::self_comparator(preprocessed_input); - - auto const reduce_by_row = [&](auto const value_comp) { - if (has_nested_columns) { - auto const key_equal = row_comp.equal_to(has_nulls, nulls_equal, value_comp); - thrust::for_each( - rmm::exec_policy(stream), - thrust::make_counting_iterator(0), - thrust::make_counting_iterator(num_rows), - reduce_by_row_fn{ - map.get_device_view(), key_hasher, key_equal, keep, reduction_results.begin()}); - } else { - auto const key_equal = row_comp.equal_to(has_nulls, nulls_equal, value_comp); - thrust::for_each( - rmm::exec_policy(stream), - thrust::make_counting_iterator(0), - thrust::make_counting_iterator(num_rows), - reduce_by_row_fn{ - map.get_device_view(), key_hasher, key_equal, keep, reduction_results.begin()}); - } - }; - - if (nans_equal == nan_equality::ALL_EQUAL) { - using nan_equal_comparator = - cudf::experimental::row::equality::nan_equal_physical_equality_comparator; - reduce_by_row(nan_equal_comparator{}); - } else { - using nan_unequal_comparator = cudf::experimental::row::equality::physical_equality_comparator; - reduce_by_row(nan_unequal_comparator{}); - } - - return reduction_results; -} - -} // namespace cudf::detail diff --git a/cpp/src/stream_compaction/stream_compaction_common.cuh b/cpp/src/stream_compaction/stream_compaction_common.cuh index 4779cd990fd..839672d6a56 100644 --- a/cpp/src/stream_compaction/stream_compaction_common.cuh +++ b/cpp/src/stream_compaction/stream_compaction_common.cuh @@ -29,28 +29,6 @@ namespace cudf { namespace detail { -namespace experimental { - -/** - * @brief Device callable to hash a given row. - */ -template -class compaction_hash { - public: - compaction_hash(RowHash row_hasher) : _hash{row_hasher} {} - - __device__ inline auto operator()(size_type i) const noexcept - { - auto hash = _hash(i); - return (hash == COMPACTION_EMPTY_KEY_SENTINEL) ? (hash - 1) : hash; - } - - private: - RowHash _hash; -}; - -} // namespace experimental - /**  * @brief Device functor to determine if a row is valid.  */ diff --git a/cpp/src/stream_compaction/stream_compaction_common.hpp b/cpp/src/stream_compaction/stream_compaction_common.hpp index 0cd2d8f4b14..58d958d2ff4 100644 --- a/cpp/src/stream_compaction/stream_compaction_common.hpp +++ b/cpp/src/stream_compaction/stream_compaction_common.hpp @@ -30,11 +30,6 @@ namespace cudf { namespace detail { -constexpr auto COMPACTION_EMPTY_KEY_SENTINEL = std::numeric_limits::max(); -constexpr auto COMPACTION_EMPTY_VALUE_SENTINEL = std::numeric_limits::min(); - -using hash_type = cuco::murmurhash3_32; - using hash_table_allocator_type = rmm::mr::stream_allocator_adaptor>; using hash_map_type = From 664dfc33a29ddb86e671c19f12e2b56e32d46a8b Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Wed, 13 Sep 2023 14:21:57 -1000 Subject: [PATCH 067/150] Raise NotImplementedError in to_datetime if Z (or tz component) in string (#14074) closes #14039 Avoids this discrepancy when a date string has a tz component ```python In [1]: import pandas In [2]: import cudf In [3]: data = ["2019-01-01T00:00:00.000Z"] In [4]: cudf.to_datetime(data) Out[4]: DatetimeIndex(['2019-01-01'], dtype='datetime64[ns]') In [5]: pandas.to_datetime(data) Out[5]: DatetimeIndex(['2019-01-01 00:00:00+00:00'], dtype='datetime64[ns, UTC]', freq=None) ``` Authors: - Matthew Roeschke (https://github.com/mroeschke) Approvers: - GALI PREM SAGAR (https://github.com/galipremsagar) - Vyas Ramasubramani (https://github.com/vyasr) URL: https://github.com/rapidsai/cudf/pull/14074 --- python/cudf/cudf/core/column/datetime.py | 15 +++++--- python/cudf/cudf/tests/test_datetime.py | 49 +++++++++++------------- python/cudf/cudf/tests/test_string.py | 12 +++--- 3 files changed, 39 insertions(+), 37 deletions(-) diff --git a/python/cudf/cudf/core/column/datetime.py b/python/cudf/cudf/core/column/datetime.py index da6c4fb858c..7775723e267 100644 --- a/python/cudf/cudf/core/column/datetime.py +++ b/python/cudf/cudf/core/column/datetime.py @@ -631,6 +631,10 @@ def infer_format(element: str, **kwargs) -> str: fmt = _guess_datetime_format(element, **kwargs) if fmt is not None: + if "%z" in fmt or "%Z" in fmt: + raise NotImplementedError( + "cuDF does not yet support timezone-aware datetimes" + ) return fmt element_parts = element.split(".") @@ -651,11 +655,12 @@ def infer_format(element: str, **kwargs) -> str: raise ValueError("Unable to infer the timestamp format from the data") if len(second_parts) > 1: - # "Z" indicates Zulu time(widely used in aviation) - Which is - # UTC timezone that currently cudf only supports. Having any other - # unsupported timezone will let the code fail below - # with a ValueError. - second_parts.remove("Z") + # We may have a non-digit, timezone-like component + # like Z, UTC-3, +01:00 + if any(re.search(r"\D", part) for part in second_parts): + raise NotImplementedError( + "cuDF does not yet support timezone-aware datetimes" + ) second_part = "".join(second_parts[1:]) if len(second_part) > 1: diff --git a/python/cudf/cudf/tests/test_datetime.py b/python/cudf/cudf/tests/test_datetime.py index 4c20258ae67..5cab19eedc6 100644 --- a/python/cudf/cudf/tests/test_datetime.py +++ b/python/cudf/cudf/tests/test_datetime.py @@ -1250,40 +1250,31 @@ def test_datetime_reductions(data, op, dtype): assert_eq(expected, actual) +@pytest.mark.parametrize("timezone", ["naive", "UTC"]) @pytest.mark.parametrize( "data", [ - np.datetime_as_string( - np.arange("2002-10-27T04:30", 4 * 60, 60, dtype="M8[m]"), - timezone="UTC", - ), - np.datetime_as_string( - np.arange("2002-10-27T04:30", 10 * 60, 1, dtype="M8[m]"), - timezone="UTC", - ), - np.datetime_as_string( - np.arange("2002-10-27T04:30", 10 * 60, 1, dtype="M8[ns]"), - timezone="UTC", - ), - np.datetime_as_string( - np.arange("2002-10-27T04:30", 10 * 60, 1, dtype="M8[us]"), - timezone="UTC", - ), - np.datetime_as_string( - np.arange("2002-10-27T04:30", 4 * 60, 60, dtype="M8[s]"), - timezone="UTC", - ), + np.arange("2002-10-27T04:30", 4 * 60, 60, dtype="M8[m]"), + np.arange("2002-10-27T04:30", 10 * 60, 1, dtype="M8[m]"), + np.arange("2002-10-27T04:30", 10 * 60, 1, dtype="M8[ns]"), + np.arange("2002-10-27T04:30", 10 * 60, 1, dtype="M8[us]"), + np.arange("2002-10-27T04:30", 4 * 60, 60, dtype="M8[s]"), ], ) @pytest.mark.parametrize("dtype", DATETIME_TYPES) -def test_datetime_infer_format(data, dtype): - sr = cudf.Series(data) - psr = pd.Series(data) +def test_datetime_infer_format(data, timezone, dtype): + ts_data = np.datetime_as_string(data, timezone=timezone) + sr = cudf.Series(ts_data) + if timezone == "naive": + psr = pd.Series(ts_data) - expected = psr.astype(dtype) - actual = sr.astype(dtype) + expected = psr.astype(dtype) + actual = sr.astype(dtype) - assert_eq(expected, actual) + assert_eq(expected, actual) + else: + with pytest.raises(NotImplementedError): + sr.astype(dtype) def test_dateoffset_instance_subclass_check(): @@ -2158,6 +2149,12 @@ def test_format_timezone_not_implemented(code): ) +@pytest.mark.parametrize("tz", ["Z", "UTC-3", "+01:00"]) +def test_no_format_timezone_not_implemented(tz): + with pytest.raises(NotImplementedError): + cudf.to_datetime([f"2020-01-01 00:00:00{tz}"]) + + @pytest.mark.parametrize("arg", [True, False]) def test_args_not_datetime_typerror(arg): with pytest.raises(TypeError): diff --git a/python/cudf/cudf/tests/test_string.py b/python/cudf/cudf/tests/test_string.py index 2bddd93ccb8..d54027eb707 100644 --- a/python/cudf/cudf/tests/test_string.py +++ b/python/cudf/cudf/tests/test_string.py @@ -200,12 +200,12 @@ def test_string_astype(dtype): data = ["True", "False", "True", "False", "False"] elif dtype.startswith("datetime64"): data = [ - "2019-06-04T00:00:00Z", - "2019-06-04T12:12:12Z", - "2019-06-03T00:00:00Z", - "2019-05-04T00:00:00Z", - "2018-06-04T00:00:00Z", - "1922-07-21T01:02:03Z", + "2019-06-04T00:00:00", + "2019-06-04T12:12:12", + "2019-06-03T00:00:00", + "2019-05-04T00:00:00", + "2018-06-04T00:00:00", + "1922-07-21T01:02:03", ] elif dtype == "str" or dtype == "object": data = ["ab", "cd", "ef", "gh", "ij"] From 89557bb0efad2d32098ba86b78e4f4706e7fe88f Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Wed, 13 Sep 2023 19:22:46 -0500 Subject: [PATCH 068/150] Allow `numeric_only=True` for reduction operations on numeric types (#14111) Fixes: #14090 This PR allows passing `numeric_only=True` for reduction operation on numerical columns. Authors: - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Matthew Roeschke (https://github.com/mroeschke) URL: https://github.com/rapidsai/cudf/pull/14111 --- python/cudf/cudf/core/single_column_frame.py | 6 ++- python/cudf/cudf/tests/test_stats.py | 44 ++++++++++---------- 2 files changed, 26 insertions(+), 24 deletions(-) diff --git a/python/cudf/cudf/core/single_column_frame.py b/python/cudf/cudf/core/single_column_frame.py index 7c019f0722c..6a56ab8f3a5 100644 --- a/python/cudf/cudf/core/single_column_frame.py +++ b/python/cudf/cudf/core/single_column_frame.py @@ -49,9 +49,11 @@ def _reduce( if level is not None: raise NotImplementedError("level parameter is not implemented yet") - if numeric_only: + if numeric_only and not isinstance( + self._column, cudf.core.column.numerical_base.NumericalBaseColumn + ): raise NotImplementedError( - f"Series.{op} does not implement numeric_only" + f"Series.{op} does not implement numeric_only." ) try: return getattr(self._column, op)(**kwargs) diff --git a/python/cudf/cudf/tests/test_stats.py b/python/cudf/cudf/tests/test_stats.py index 6478fbaad95..463cdb8a7f4 100644 --- a/python/cudf/cudf/tests/test_stats.py +++ b/python/cudf/cudf/tests/test_stats.py @@ -247,30 +247,37 @@ def test_misc_quantiles(data, q): ], ) @pytest.mark.parametrize("null_flag", [False, True]) -def test_kurtosis_series(data, null_flag): +@pytest.mark.parametrize("numeric_only", [False, True]) +def test_kurtosis_series(data, null_flag, numeric_only): pdata = data.to_pandas() if null_flag and len(data) > 2: data.iloc[[0, 2]] = None pdata.iloc[[0, 2]] = None - got = data.kurtosis() + got = data.kurtosis(numeric_only=numeric_only) got = got if np.isscalar(got) else got.to_numpy() - expected = pdata.kurtosis() + expected = pdata.kurtosis(numeric_only=numeric_only) np.testing.assert_array_almost_equal(got, expected) - got = data.kurt() + got = data.kurt(numeric_only=numeric_only) got = got if np.isscalar(got) else got.to_numpy() - expected = pdata.kurt() + expected = pdata.kurt(numeric_only=numeric_only) np.testing.assert_array_almost_equal(got, expected) - got = data.kurt(numeric_only=False) - got = got if np.isscalar(got) else got.to_numpy() - expected = pdata.kurt(numeric_only=False) - np.testing.assert_array_almost_equal(got, expected) - with pytest.raises(NotImplementedError): - data.kurt(numeric_only=True) +@pytest.mark.parametrize("op", ["skew", "kurt"]) +def test_kurt_skew_error(op): + gs = cudf.Series(["ab", "cd"]) + ps = gs.to_pandas() + + with pytest.raises(FutureWarning): + assert_exceptions_equal( + getattr(gs, op), + getattr(ps, op), + lfunc_args_and_kwargs=([], {"numeric_only": True}), + rfunc_args_and_kwargs=([], {"numeric_only": True}), + ) @pytest.mark.parametrize( @@ -290,26 +297,19 @@ def test_kurtosis_series(data, null_flag): ], ) @pytest.mark.parametrize("null_flag", [False, True]) -def test_skew_series(data, null_flag): +@pytest.mark.parametrize("numeric_only", [False, True]) +def test_skew_series(data, null_flag, numeric_only): pdata = data.to_pandas() if null_flag and len(data) > 2: data.iloc[[0, 2]] = None pdata.iloc[[0, 2]] = None - got = data.skew() - expected = pdata.skew() + got = data.skew(numeric_only=numeric_only) + expected = pdata.skew(numeric_only=numeric_only) got = got if np.isscalar(got) else got.to_numpy() np.testing.assert_array_almost_equal(got, expected) - got = data.skew(numeric_only=False) - expected = pdata.skew(numeric_only=False) - got = got if np.isscalar(got) else got.to_numpy() - np.testing.assert_array_almost_equal(got, expected) - - with pytest.raises(NotImplementedError): - data.skew(numeric_only=True) - @pytest.mark.parametrize("dtype", params_dtypes) @pytest.mark.parametrize("num_na", [0, 1, 50, 99, 100]) From 1bfeee7575e137bc75741cb2caf015e55ecab2cd Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Wed, 13 Sep 2023 14:23:14 -1000 Subject: [PATCH 069/150] Raise NotImplementedError for datetime strings with UTC offset (#14070) Avoids e.g. DatetimeIndex(["2022-07-22 00:00:00+02:00"]) from dropping the +02:00 since timezones are not supported Authors: - Matthew Roeschke (https://github.com/mroeschke) Approvers: - Lawrence Mitchell (https://github.com/wence-) - Vyas Ramasubramani (https://github.com/vyasr) URL: https://github.com/rapidsai/cudf/pull/14070 --- python/cudf/cudf/core/column/column.py | 18 ++++++++++++++++-- python/cudf/cudf/tests/test_datetime.py | 6 ++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/python/cudf/cudf/core/column/column.py b/python/cudf/cudf/core/column/column.py index 59ab3569814..d2e2f11a12e 100644 --- a/python/cudf/cudf/core/column/column.py +++ b/python/cudf/cudf/core/column/column.py @@ -2519,11 +2519,11 @@ def _construct_array( arbitrary = cupy.asarray(arbitrary, dtype=dtype) except (TypeError, ValueError): native_dtype = dtype - inferred_dtype = None + inferred_dtype = infer_dtype(arbitrary, skipna=False) if ( dtype is None and not cudf._lib.scalar._is_null_host_scalar(arbitrary) - and (inferred_dtype := infer_dtype(arbitrary, skipna=False)) + and inferred_dtype in ( "mixed", "mixed-integer", @@ -2533,6 +2533,20 @@ def _construct_array( if inferred_dtype == "interval": # Only way to construct an Interval column. return pd.array(arbitrary) + elif ( + inferred_dtype == "string" and getattr(dtype, "kind", None) == "M" + ): + # We may have date-like strings with timezones + try: + pd_arbitrary = pd.to_datetime(arbitrary) + if isinstance(pd_arbitrary.dtype, pd.DatetimeTZDtype): + raise NotImplementedError( + "cuDF does not yet support timezone-aware datetimes" + ) + except pd.errors.OutOfBoundsDatetime: + # https://github.com/pandas-dev/pandas/issues/55096 + pass + arbitrary = np.asarray( arbitrary, dtype=native_dtype diff --git a/python/cudf/cudf/tests/test_datetime.py b/python/cudf/cudf/tests/test_datetime.py index 5cab19eedc6..0cc7112454c 100644 --- a/python/cudf/cudf/tests/test_datetime.py +++ b/python/cudf/cudf/tests/test_datetime.py @@ -2141,6 +2141,12 @@ def test_daterange_pandas_compatibility(): assert_eq(expected, actual) +def test_strings_with_utc_offset_not_implemented(): + with pytest.warns(DeprecationWarning, match="parsing timezone"): # cupy + with pytest.raises(NotImplementedError): + DatetimeIndex(["2022-07-22 00:00:00+02:00"]) + + @pytest.mark.parametrize("code", ["z", "Z"]) def test_format_timezone_not_implemented(code): with pytest.raises(NotImplementedError): From 3b691f4be744ff1155df3634cd334211e738e37d Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Fri, 15 Sep 2023 10:03:52 -1000 Subject: [PATCH 070/150] Raise NotImplementedError in to_datetime with dayfirst without infer_format (#14058) Raises a `NotImplementedError` to avoid this incorrect behavior (which seems to actually not be implemented) ```python In [6]: cudf.to_datetime(["10-02-2014"], dayfirst=True) Out[6]: DatetimeIndex(['2014-10-02'], dtype='datetime64[ns]') ``` closes https://github.com/rapidsai/cudf/issues/14042 Authors: - Matthew Roeschke (https://github.com/mroeschke) Approvers: - Vyas Ramasubramani (https://github.com/vyasr) URL: https://github.com/rapidsai/cudf/pull/14058 --- python/cudf/cudf/core/tools/datetimes.py | 11 +++---- python/cudf/cudf/tests/test_datetime.py | 38 +++++++++++++++++++----- 2 files changed, 36 insertions(+), 13 deletions(-) diff --git a/python/cudf/cudf/core/tools/datetimes.py b/python/cudf/cudf/core/tools/datetimes.py index f736e055163..a3f4bacf206 100644 --- a/python/cudf/cudf/core/tools/datetimes.py +++ b/python/cudf/cudf/core/tools/datetimes.py @@ -353,15 +353,16 @@ def _process_col(col, unit, dayfirst, infer_datetime_format, format): format=format, ) else: - if infer_datetime_format and format is None: + if format is None: + if not infer_datetime_format and dayfirst: + raise NotImplementedError( + f"{dayfirst=} not implemented " + f"when {format=} and {infer_datetime_format=}." + ) format = column.datetime.infer_format( element=col.element_indexing(0), dayfirst=dayfirst, ) - elif format is None: - format = column.datetime.infer_format( - element=col.element_indexing(0) - ) return col.as_datetime_column( dtype=_unit_dtype_map[unit], format=format, diff --git a/python/cudf/cudf/tests/test_datetime.py b/python/cudf/cudf/tests/test_datetime.py index 0cc7112454c..164856ed6f5 100644 --- a/python/cudf/cudf/tests/test_datetime.py +++ b/python/cudf/cudf/tests/test_datetime.py @@ -617,22 +617,44 @@ def test_datetime_dataframe(): @pytest.mark.parametrize("infer_datetime_format", [True, False]) def test_cudf_to_datetime(data, dayfirst, infer_datetime_format): pd_data = data + is_string_data = False if isinstance(pd_data, (pd.Series, pd.DataFrame, pd.Index)): gd_data = cudf.from_pandas(pd_data) + is_string_data = ( + gd_data.ndim == 1 + and not gd_data.empty + and gd_data.dtype.kind == "O" + ) else: if type(pd_data).__module__ == np.__name__: gd_data = cp.array(pd_data) else: gd_data = pd_data + is_string_data = isinstance(gd_data, list) and isinstance( + next(iter(gd_data), None), str + ) - expected = pd.to_datetime( - pd_data, dayfirst=dayfirst, infer_datetime_format=infer_datetime_format - ) - actual = cudf.to_datetime( - gd_data, dayfirst=dayfirst, infer_datetime_format=infer_datetime_format - ) - - assert_eq(actual, expected) + if dayfirst and not infer_datetime_format and is_string_data: + # Note: pandas<2.0 also does not respect dayfirst=True correctly + # for object data + with pytest.raises(NotImplementedError): + cudf.to_datetime( + gd_data, + dayfirst=dayfirst, + infer_datetime_format=infer_datetime_format, + ) + else: + expected = pd.to_datetime( + pd_data, + dayfirst=dayfirst, + infer_datetime_format=infer_datetime_format, + ) + actual = cudf.to_datetime( + gd_data, + dayfirst=dayfirst, + infer_datetime_format=infer_datetime_format, + ) + assert_eq(actual, expected) @pytest.mark.parametrize( From 4ca568e764a3898bf619a221cdb91a9261df22bf Mon Sep 17 00:00:00 2001 From: "Richard (Rick) Zamora" Date: Mon, 18 Sep 2023 09:00:39 -0500 Subject: [PATCH 071/150] Update pyarrow-related dispatch logic in dask_cudf (#14069) Updates `dask_cudf` dispatch logic to avoid breakage from https://github.com/dask/dask/pull/10500. Also removes stale `try`/`except` logic. Authors: - Richard (Rick) Zamora (https://github.com/rjzamora) - Ray Douglass (https://github.com/raydouglass) - gpuCI (https://github.com/GPUtester) - Mike Wendt (https://github.com/mike-wendt) - AJ Schmidt (https://github.com/ajschmidt8) - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Lawrence Mitchell (https://github.com/wence-) URL: https://github.com/rapidsai/cudf/pull/14069 --- python/dask_cudf/dask_cudf/backends.py | 69 +++++++++---------- .../dask_cudf/tests/test_dispatch.py | 21 ++++-- 2 files changed, 47 insertions(+), 43 deletions(-) diff --git a/python/dask_cudf/dask_cudf/backends.py b/python/dask_cudf/dask_cudf/backends.py index 2470b4d50f1..e3f4f04eb85 100644 --- a/python/dask_cudf/dask_cudf/backends.py +++ b/python/dask_cudf/dask_cudf/backends.py @@ -20,11 +20,14 @@ from dask.dataframe.dispatch import ( categorical_dtype_dispatch, concat_dispatch, + from_pyarrow_table_dispatch, group_split_dispatch, grouper_dispatch, hash_object_dispatch, is_categorical_dtype_dispatch, make_meta_dispatch, + pyarrow_schema_dispatch, + to_pyarrow_table_dispatch, tolist_dispatch, union_categoricals_dispatch, ) @@ -317,16 +320,6 @@ def get_grouper_cudf(obj): return cudf.core.groupby.Grouper -try: - from dask.dataframe.dispatch import pyarrow_schema_dispatch - - @pyarrow_schema_dispatch.register((cudf.DataFrame,)) - def get_pyarrow_schema_cudf(obj): - return obj.to_arrow().schema - -except ImportError: - pass - try: try: from dask.array.dispatch import percentile_lookup @@ -378,35 +371,37 @@ def percentile_cudf(a, q, interpolation="linear"): except ImportError: pass -try: - # Requires dask>2023.6.0 - from dask.dataframe.dispatch import ( - from_pyarrow_table_dispatch, - to_pyarrow_table_dispatch, - ) - @to_pyarrow_table_dispatch.register(cudf.DataFrame) - def _cudf_to_table(obj, preserve_index=True, **kwargs): - if kwargs: - warnings.warn( - "Ignoring the following arguments to " - f"`to_pyarrow_table_dispatch`: {list(kwargs)}" - ) - return obj.to_arrow(preserve_index=preserve_index) - - @from_pyarrow_table_dispatch.register(cudf.DataFrame) - def _table_to_cudf(obj, table, self_destruct=None, **kwargs): - # cudf ignores self_destruct. - kwargs.pop("self_destruct", None) - if kwargs: - warnings.warn( - f"Ignoring the following arguments to " - f"`from_pyarrow_table_dispatch`: {list(kwargs)}" - ) - return obj.from_arrow(table) +@pyarrow_schema_dispatch.register((cudf.DataFrame,)) +def _get_pyarrow_schema_cudf(obj, preserve_index=True, **kwargs): + if kwargs: + warnings.warn( + "Ignoring the following arguments to " + f"`pyarrow_schema_dispatch`: {list(kwargs)}" + ) + return meta_nonempty(obj).to_arrow(preserve_index=preserve_index).schema -except ImportError: - pass + +@to_pyarrow_table_dispatch.register(cudf.DataFrame) +def _cudf_to_table(obj, preserve_index=True, **kwargs): + if kwargs: + warnings.warn( + "Ignoring the following arguments to " + f"`to_pyarrow_table_dispatch`: {list(kwargs)}" + ) + return obj.to_arrow(preserve_index=preserve_index) + + +@from_pyarrow_table_dispatch.register(cudf.DataFrame) +def _table_to_cudf(obj, table, self_destruct=None, **kwargs): + # cudf ignores self_destruct. + kwargs.pop("self_destruct", None) + if kwargs: + warnings.warn( + f"Ignoring the following arguments to " + f"`from_pyarrow_table_dispatch`: {list(kwargs)}" + ) + return obj.from_arrow(table) @union_categoricals_dispatch.register((cudf.Series, cudf.BaseIndex)) diff --git a/python/dask_cudf/dask_cudf/tests/test_dispatch.py b/python/dask_cudf/dask_cudf/tests/test_dispatch.py index 22cc0f161e2..cf49b1df4f4 100644 --- a/python/dask_cudf/dask_cudf/tests/test_dispatch.py +++ b/python/dask_cudf/dask_cudf/tests/test_dispatch.py @@ -3,9 +3,7 @@ import numpy as np import pandas as pd import pytest -from packaging import version -import dask from dask.base import tokenize from dask.dataframe import assert_eq from dask.dataframe.methods import is_categorical_dtype @@ -24,10 +22,6 @@ def test_is_categorical_dispatch(): assert is_categorical_dtype(cudf.Index([1, 2, 3], dtype="category")) -@pytest.mark.skipif( - version.parse(dask.__version__) <= version.parse("2023.6.0"), - reason="Pyarrow-conversion dispatch requires dask>2023.6.0", -) def test_pyarrow_conversion_dispatch(): from dask.dataframe.dispatch import ( from_pyarrow_table_dispatch, @@ -79,3 +73,18 @@ def test_deterministic_tokenize(index): df2 = df.set_index(["B", "C"], drop=False) assert tokenize(df) != tokenize(df2) assert tokenize(df2) == tokenize(df2) + + +@pytest.mark.parametrize("preserve_index", [True, False]) +def test_pyarrow_schema_dispatch(preserve_index): + from dask.dataframe.dispatch import ( + pyarrow_schema_dispatch, + to_pyarrow_table_dispatch, + ) + + df = cudf.DataFrame(np.random.randn(10, 3), columns=list("abc")) + df["d"] = cudf.Series(["cat", "dog"] * 5) + table = to_pyarrow_table_dispatch(df, preserve_index=preserve_index) + schema = pyarrow_schema_dispatch(df, preserve_index=preserve_index) + + assert schema.equals(table.schema) From 5935ef3ce26b1eb7136dcaa989a36b15071a9d0d Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Mon, 18 Sep 2023 09:53:18 -0500 Subject: [PATCH 072/150] Drop `kwargs` from `Series.count` (#14106) Fixes: #14089 This PR drops `kwargs` from `Series.count` method signature. Authors: - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Matthew Roeschke (https://github.com/mroeschke) - Benjamin Zaitlen (https://github.com/quasiben) URL: https://github.com/rapidsai/cudf/pull/14106 --- python/cudf/cudf/core/series.py | 2 +- python/cudf/cudf/tests/test_series.py | 6 ++++++ python/dask_cudf/dask_cudf/core.py | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/python/cudf/cudf/core/series.py b/python/cudf/cudf/core/series.py index f44a3123dd3..7692d3015f8 100644 --- a/python/cudf/cudf/core/series.py +++ b/python/cudf/cudf/core/series.py @@ -2549,7 +2549,7 @@ def apply(self, func, convert_dtype=True, args=(), **kwargs): # Stats # @_cudf_nvtx_annotate - def count(self, level=None, **kwargs): + def count(self, level=None): """ Return number of non-NA/null observations in the Series diff --git a/python/cudf/cudf/tests/test_series.py b/python/cudf/cudf/tests/test_series.py index 798809b0ada..b1e991106ee 100644 --- a/python/cudf/cudf/tests/test_series.py +++ b/python/cudf/cudf/tests/test_series.py @@ -2311,3 +2311,9 @@ def test_series_round_builtin(data, digits): actual = round(gs, digits) assert_eq(expected, actual) + + +def test_series_count_invalid_param(): + s = cudf.Series([]) + with pytest.raises(TypeError): + s.count(skipna=True) diff --git a/python/dask_cudf/dask_cudf/core.py b/python/dask_cudf/dask_cudf/core.py index d2858876fcd..5b37e6e825c 100644 --- a/python/dask_cudf/dask_cudf/core.py +++ b/python/dask_cudf/dask_cudf/core.py @@ -421,7 +421,7 @@ def _naive_var(ddf, meta, skipna, ddof, split_every, out): def _parallel_var(ddf, meta, skipna, split_every, out): def _local_var(x, skipna): if skipna: - n = x.count(skipna=skipna) + n = x.count() avg = x.mean(skipna=skipna) else: # Not skipping nulls, so might as well From 8e081c015417c5a8d2a99f9db6bbc9a2c438e477 Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Mon, 18 Sep 2023 12:51:08 -0500 Subject: [PATCH 073/150] Add support for nested dict in `DataFrame` constructor (#14119) Fixes: #14096 This PR enables nested dict initialization support in `DataFrame` constructor. Authors: - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Matthew Roeschke (https://github.com/mroeschke) URL: https://github.com/rapidsai/cudf/pull/14119 --- python/cudf/cudf/core/dataframe.py | 4 ++-- python/cudf/cudf/tests/test_dataframe.py | 19 +++++++++++++++++++ 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/python/cudf/cudf/core/dataframe.py b/python/cudf/cudf/core/dataframe.py index 5a3d25a08a7..4fc175512a0 100644 --- a/python/cudf/cudf/core/dataframe.py +++ b/python/cudf/cudf/core/dataframe.py @@ -977,7 +977,7 @@ def _align_input_series_indices(data, index): input_series = [ Series(val) for val in data.values() - if isinstance(val, (pd.Series, Series)) + if isinstance(val, (pd.Series, Series, dict)) ] if input_series: @@ -994,7 +994,7 @@ def _align_input_series_indices(data, index): index = aligned_input_series[0].index for name, val in data.items(): - if isinstance(val, (pd.Series, Series)): + if isinstance(val, (pd.Series, Series, dict)): data[name] = aligned_input_series.pop(0) return data, index diff --git a/python/cudf/cudf/tests/test_dataframe.py b/python/cudf/cudf/tests/test_dataframe.py index 61372bab3ad..652bdbbee45 100644 --- a/python/cudf/cudf/tests/test_dataframe.py +++ b/python/cudf/cudf/tests/test_dataframe.py @@ -10349,3 +10349,22 @@ def test_dataframe_round_builtin(digits): actual = round(gdf, digits) assert_eq(expected, actual) + + +def test_dataframe_init_from_nested_dict(): + ordered_dict = OrderedDict( + [ + ("one", OrderedDict([("col_a", "foo1"), ("col_b", "bar1")])), + ("two", OrderedDict([("col_a", "foo2"), ("col_b", "bar2")])), + ("three", OrderedDict([("col_a", "foo3"), ("col_b", "bar3")])), + ] + ) + pdf = pd.DataFrame(ordered_dict) + gdf = cudf.DataFrame(ordered_dict) + + assert_eq(pdf, gdf) + regular_dict = {key: dict(value) for key, value in ordered_dict.items()} + + pdf = pd.DataFrame(regular_dict) + gdf = cudf.DataFrame(regular_dict) + assert_eq(pdf, gdf) From 4467066c952111c0131383784d3eb6bf3248f0ac Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Mon, 18 Sep 2023 12:51:53 -0500 Subject: [PATCH 074/150] Restrict iterables of `DataFrame`'s as input to `DataFrame` constructor (#14118) Fixes: #14094 This PR raises an error when an iterates of `DataFrame`'s is detected in `DataFrame` constructor. Authors: - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Matthew Roeschke (https://github.com/mroeschke) URL: https://github.com/rapidsai/cudf/pull/14118 --- python/cudf/cudf/core/dataframe.py | 11 ++++++----- python/cudf/cudf/tests/test_dataframe.py | 6 ++++++ 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/python/cudf/cudf/core/dataframe.py b/python/cudf/cudf/core/dataframe.py index 4fc175512a0..84c16b71997 100644 --- a/python/cudf/cudf/core/dataframe.py +++ b/python/cudf/cudf/core/dataframe.py @@ -852,12 +852,13 @@ def _init_from_list_like(self, data, index=None, columns=None): elif len(data) > 0 and isinstance(data[0], pd._libs.interval.Interval): data = DataFrame.from_pandas(pd.DataFrame(data)) self._data = data._data + elif any( + not isinstance(col, (abc.Iterable, abc.Sequence)) for col in data + ): + raise TypeError("Inputs should be an iterable or sequence.") + elif len(data) > 0 and not can_convert_to_column(data[0]): + raise ValueError("Must pass 2-d input.") else: - if any( - not isinstance(col, (abc.Iterable, abc.Sequence)) - for col in data - ): - raise TypeError("Inputs should be an iterable or sequence.") if ( len(data) > 0 and columns is None diff --git a/python/cudf/cudf/tests/test_dataframe.py b/python/cudf/cudf/tests/test_dataframe.py index 652bdbbee45..cbef9bfa2d8 100644 --- a/python/cudf/cudf/tests/test_dataframe.py +++ b/python/cudf/cudf/tests/test_dataframe.py @@ -10260,6 +10260,12 @@ def __getitem__(self, key): cudf.DataFrame({"a": A()}) +def test_dataframe_constructor_dataframe_list(): + df = cudf.DataFrame(range(2)) + with pytest.raises(ValueError): + cudf.DataFrame([df]) + + def test_dataframe_constructor_from_namedtuple(): Point1 = namedtuple("Point1", ["a", "b", "c"]) Point2 = namedtuple("Point1", ["x", "y"]) From 2acd3dfa9e859feb4d803d9446c89b80f10bd54a Mon Sep 17 00:00:00 2001 From: Vukasin Milovanovic Date: Mon, 18 Sep 2023 14:10:14 -0700 Subject: [PATCH 075/150] Expand statistics support in ORC writer (#13848) Closes #7087, closes #13793, closes #13899 This PR adds support for several cases and statistics types: - sum statistics are included even when all elements are null (no minmax); - sum statistics are included in double stats; - minimum/maximum and minimumNanos/maximumNanos are included in timestamp stats; - hasNull field is written for all columns. - decimal statistics Added tests for all supported stats. Authors: - Vukasin Milovanovic (https://github.com/vuule) - Karthikeyan (https://github.com/karthikeyann) Approvers: - Lawrence Mitchell (https://github.com/wence-) - Robert (Bobby) Evans (https://github.com/revans2) - Vyas Ramasubramani (https://github.com/vyasr) - Karthikeyan (https://github.com/karthikeyann) URL: https://github.com/rapidsai/cudf/pull/13848 --- cpp/include/cudf/io/orc_metadata.hpp | 10 +- .../detail/convert/fixed_point_to_string.cuh | 80 +++++++++ cpp/src/io/orc/orc.cpp | 4 +- cpp/src/io/orc/stats_enc.cu | 169 +++++++++++++----- cpp/src/io/parquet/page_enc.cu | 4 +- .../statistics_type_identification.cuh | 19 +- .../io/statistics/typed_statistics_chunk.cuh | 2 +- .../strings/convert/convert_fixed_point.cu | 54 +----- cpp/tests/io/orc_test.cpp | 109 +++++++++-- python/cudf/cudf/tests/test_orc.py | 60 ++++--- 10 files changed, 356 insertions(+), 155 deletions(-) create mode 100644 cpp/include/cudf/strings/detail/convert/fixed_point_to_string.cuh diff --git a/cpp/include/cudf/io/orc_metadata.hpp b/cpp/include/cudf/io/orc_metadata.hpp index 623ee2e49fc..82d59803c25 100644 --- a/cpp/include/cudf/io/orc_metadata.hpp +++ b/cpp/include/cudf/io/orc_metadata.hpp @@ -111,10 +111,10 @@ struct string_statistics : minmax_statistics, sum_statistics count; ///< Count of `false` and `true` values + std::vector count; ///< count of `true` values }; /** @@ -141,8 +141,10 @@ using binary_statistics = sum_statistics; * the UNIX epoch. The `minimum_utc` and `maximum_utc` are the same values adjusted to UTC. */ struct timestamp_statistics : minmax_statistics { - std::optional minimum_utc; ///< minimum in milliseconds - std::optional maximum_utc; ///< maximum in milliseconds + std::optional minimum_utc; ///< minimum in milliseconds + std::optional maximum_utc; ///< maximum in milliseconds + std::optional minimum_nanos; ///< nanoseconds part of the minimum + std::optional maximum_nanos; ///< nanoseconds part of the maximum }; namespace orc { diff --git a/cpp/include/cudf/strings/detail/convert/fixed_point_to_string.cuh b/cpp/include/cudf/strings/detail/convert/fixed_point_to_string.cuh new file mode 100644 index 00000000000..0ee26ec9ee2 --- /dev/null +++ b/cpp/include/cudf/strings/detail/convert/fixed_point_to_string.cuh @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include + +namespace cudf::strings::detail { + +/** + * @brief Returns the number of digits in the given fixed point number. + * + * @param value The value of the fixed point number + * @param scale The scale of the fixed point number + * @return int32_t The number of digits required to represent the fixed point number + */ +__device__ inline int32_t fixed_point_string_size(__int128_t const& value, int32_t scale) +{ + if (scale >= 0) return count_digits(value) + scale; + + auto const abs_value = numeric::detail::abs(value); + auto const exp_ten = numeric::detail::exp10<__int128_t>(-scale); + auto const fraction = count_digits(abs_value % exp_ten); + auto const num_zeros = std::max(0, (-scale - fraction)); + return static_cast(value < 0) + // sign if negative + count_digits(abs_value / exp_ten) + // integer + 1 + // decimal point + num_zeros + // zeros padding + fraction; // size of fraction +} + +/** + * @brief Converts the given fixed point number to a string. + * + * Caller is responsible for ensuring that the output buffer is large enough. The required output + * buffer size can be obtained by calling `fixed_point_string_size`. + * + * @param value The value of the fixed point number + * @param scale The scale of the fixed point number + * @param out_ptr The pointer to the output string + */ +__device__ inline void fixed_point_to_string(__int128_t const& value, int32_t scale, char* out_ptr) +{ + if (scale >= 0) { + out_ptr += integer_to_string(value, out_ptr); + thrust::generate_n(thrust::seq, out_ptr, scale, []() { return '0'; }); // add zeros + return; + } + + // scale < 0 + // write format: [-]integer.fraction + // where integer = abs(value) / (10^abs(scale)) + // fraction = abs(value) % (10^abs(scale)) + if (value < 0) *out_ptr++ = '-'; // add sign + auto const abs_value = numeric::detail::abs(value); + auto const exp_ten = numeric::detail::exp10<__int128_t>(-scale); + auto const num_zeros = std::max(0, (-scale - count_digits(abs_value % exp_ten))); + + out_ptr += integer_to_string(abs_value / exp_ten, out_ptr); // add the integer part + *out_ptr++ = '.'; // add decimal point + + thrust::generate_n(thrust::seq, out_ptr, num_zeros, []() { return '0'; }); // add zeros + out_ptr += num_zeros; + + integer_to_string(abs_value % exp_ten, out_ptr); // add the fraction part +} + +} // namespace cudf::strings::detail diff --git a/cpp/src/io/orc/orc.cpp b/cpp/src/io/orc/orc.cpp index fc50b7118be..bc399b75ef9 100644 --- a/cpp/src/io/orc/orc.cpp +++ b/cpp/src/io/orc/orc.cpp @@ -178,7 +178,9 @@ void ProtobufReader::read(timestamp_statistics& s, size_t maxlen) auto op = std::tuple(field_reader(1, s.minimum), field_reader(2, s.maximum), field_reader(3, s.minimum_utc), - field_reader(4, s.maximum_utc)); + field_reader(4, s.maximum_utc), + field_reader(5, s.minimum_nanos), + field_reader(6, s.maximum_nanos)); function_builder(s, maxlen, op); } diff --git a/cpp/src/io/orc/stats_enc.cu b/cpp/src/io/orc/stats_enc.cu index 069841980c1..69d7ec95acd 100644 --- a/cpp/src/io/orc/stats_enc.cu +++ b/cpp/src/io/orc/stats_enc.cu @@ -16,15 +16,16 @@ #include "orc_gpu.hpp" -#include #include +#include +#include + #include -namespace cudf { -namespace io { -namespace orc { -namespace gpu { +namespace cudf::io::orc::gpu { + +using strings::detail::fixed_point_string_size; constexpr unsigned int init_threads_per_group = 32; constexpr unsigned int init_groups_per_block = 4; @@ -58,13 +59,14 @@ __global__ void __launch_bounds__(init_threads_per_block) constexpr unsigned int buffersize_reduction_dim = 32; constexpr unsigned int block_size = buffersize_reduction_dim * buffersize_reduction_dim; constexpr unsigned int pb_fld_hdrlen = 1; -constexpr unsigned int pb_fld_hdrlen16 = 2; // > 127-byte length -constexpr unsigned int pb_fld_hdrlen32 = 5; // > 16KB length +constexpr unsigned int pb_fld_hdrlen32 = 5; +constexpr unsigned int pb_fldlen_int32 = 5; constexpr unsigned int pb_fldlen_int64 = 10; constexpr unsigned int pb_fldlen_float64 = 8; -constexpr unsigned int pb_fldlen_decimal = 40; // Assume decimal2string fits in 40 characters constexpr unsigned int pb_fldlen_bucket1 = 1 + pb_fldlen_int64; -constexpr unsigned int pb_fldlen_common = 2 * pb_fld_hdrlen + pb_fldlen_int64; +// statistics field number + number of values + has null +constexpr unsigned int pb_fldlen_common = + pb_fld_hdrlen + (pb_fld_hdrlen + pb_fldlen_int64) + 2 * pb_fld_hdrlen; template __global__ void __launch_bounds__(block_size, 1) @@ -87,21 +89,32 @@ __global__ void __launch_bounds__(block_size, 1) case dtype_int8: case dtype_int16: case dtype_int32: - case dtype_date32: case dtype_int64: - case dtype_timestamp64: stats_len = pb_fldlen_common + pb_fld_hdrlen + 3 * (pb_fld_hdrlen + pb_fldlen_int64); break; + case dtype_date32: + stats_len = pb_fldlen_common + pb_fld_hdrlen + 2 * (pb_fld_hdrlen + pb_fldlen_int64); + break; + case dtype_timestamp64: + stats_len = pb_fldlen_common + pb_fld_hdrlen + 4 * (pb_fld_hdrlen + pb_fldlen_int64) + + 2 * (pb_fld_hdrlen + pb_fldlen_int32); + break; case dtype_float32: case dtype_float64: stats_len = pb_fldlen_common + pb_fld_hdrlen + 3 * (pb_fld_hdrlen + pb_fldlen_float64); break; case dtype_decimal64: - case dtype_decimal128: - stats_len = pb_fldlen_common + pb_fld_hdrlen16 + 3 * (pb_fld_hdrlen + pb_fldlen_decimal); - break; + case dtype_decimal128: { + auto const scale = groups[idx].col_dtype.scale(); + auto const min_size = fixed_point_string_size(chunks[idx].min_value.d128_val, scale); + auto const max_size = fixed_point_string_size(chunks[idx].max_value.d128_val, scale); + auto const sum_size = fixed_point_string_size(chunks[idx].sum.d128_val, scale); + // common + total field length + encoded string lengths + strings + stats_len = pb_fldlen_common + pb_fld_hdrlen32 + 3 * (pb_fld_hdrlen + pb_fld_hdrlen32) + + min_size + max_size + sum_size; + } break; case dtype_string: - stats_len = pb_fldlen_common + pb_fld_hdrlen32 + 3 * (pb_fld_hdrlen + pb_fldlen_int64) + + stats_len = pb_fldlen_common + pb_fld_hdrlen32 + 3 * (pb_fld_hdrlen + pb_fld_hdrlen32) + chunks[idx].min_value.str_val.length + chunks[idx].max_value.str_val.length; break; case dtype_none: stats_len = pb_fldlen_common; @@ -126,9 +139,6 @@ struct stats_state_s { statistics_chunk chunk; statistics_merge_group group; statistics_dtype stats_dtype; //!< Statistics data type for this column - // ORC stats - uint64_t numberOfValues; - uint8_t hasNull; }; /* @@ -178,6 +188,15 @@ __device__ inline uint8_t* pb_put_binary(uint8_t* p, uint32_t id, void const* by return p + len; } +__device__ inline uint8_t* pb_put_decimal( + uint8_t* p, uint32_t id, __int128_t value, int32_t scale, int32_t len) +{ + p[0] = id * 8 + ProtofType::FIXEDLEN; + p = pb_encode_uint(p + 1, len); + strings::detail::fixed_point_to_string(value, scale, reinterpret_cast(p)); + return p + len; +} + // Protobuf field encoding for 64-bit raw encoding (double) __device__ inline uint8_t* pb_put_fixed64(uint8_t* p, uint32_t id, void const* raw64) { @@ -186,6 +205,15 @@ __device__ inline uint8_t* pb_put_fixed64(uint8_t* p, uint32_t id, void const* r return p + 9; } +// Splits a nanosecond timestamp into milliseconds and nanoseconds +__device__ std::pair split_nanosecond_timestamp(int64_t nano_count) +{ + auto const ns = cuda::std::chrono::nanoseconds(nano_count); + auto const ms_floor = cuda::std::chrono::floor(ns); + auto const ns_remainder = ns - ms_floor; + return {ms_floor.count(), ns_remainder.count()}; +} + /** * @brief Encode statistics in ORC protobuf format * @@ -228,12 +256,14 @@ __global__ void __launch_bounds__(encode_threads_per_block) // Encode and update actual bfr size if (idx < statistics_count && t == 0) { - s->chunk = chunks[idx]; - s->group = groups[idx]; - s->stats_dtype = s->group.stats_dtype; - s->base = blob_bfr + s->group.start_chunk; - s->end = blob_bfr + s->group.start_chunk + s->group.num_chunks; - uint8_t* cur = pb_put_uint(s->base, 1, s->chunk.non_nulls); + s->chunk = chunks[idx]; + s->group = groups[idx]; + s->stats_dtype = s->group.stats_dtype; + s->base = blob_bfr + s->group.start_chunk; + s->end = blob_bfr + s->group.start_chunk + s->group.num_chunks; + uint8_t* cur = pb_put_uint(s->base, 1, s->chunk.non_nulls); + cur = pb_put_uint(cur, 10, s->chunk.null_count != 0); // hasNull (bool) + uint8_t* fld_start = cur; switch (s->stats_dtype) { case dtype_int8: @@ -265,11 +295,14 @@ __global__ void __launch_bounds__(encode_threads_per_block) // optional double maximum = 2; // optional double sum = 3; // } - if (s->chunk.has_minmax) { + if (s->chunk.has_minmax || s->chunk.has_sum) { *cur = 3 * 8 + ProtofType::FIXEDLEN; cur += 2; - cur = pb_put_fixed64(cur, 1, &s->chunk.min_value.fp_val); - cur = pb_put_fixed64(cur, 2, &s->chunk.max_value.fp_val); + if (s->chunk.has_minmax) { + cur = pb_put_fixed64(cur, 1, &s->chunk.min_value.fp_val); + cur = pb_put_fixed64(cur, 2, &s->chunk.max_value.fp_val); + } + if (s->chunk.has_sum) { cur = pb_put_fixed64(cur, 3, &s->chunk.sum.fp_val); } fld_start[1] = cur - (fld_start + 2); } break; @@ -280,18 +313,25 @@ __global__ void __launch_bounds__(encode_threads_per_block) // optional string maximum = 2; // optional sint64 sum = 3; // sum will store the total length of all strings // } - if (s->chunk.has_minmax && s->chunk.has_sum) { - uint32_t sz = (pb_put_int(cur, 3, s->chunk.sum.i_val) - cur) + - (pb_put_uint(cur, 1, s->chunk.min_value.str_val.length) - cur) + - (pb_put_uint(cur, 2, s->chunk.max_value.str_val.length) - cur) + - s->chunk.min_value.str_val.length + s->chunk.max_value.str_val.length; + if (s->chunk.has_minmax || s->chunk.has_sum) { + uint32_t sz = 0; + if (s->chunk.has_minmax) { + sz += (pb_put_uint(cur, 1, s->chunk.min_value.str_val.length) - cur) + + (pb_put_uint(cur, 2, s->chunk.max_value.str_val.length) - cur) + + s->chunk.min_value.str_val.length + s->chunk.max_value.str_val.length; + } + if (s->chunk.has_sum) { sz += pb_put_int(cur, 3, s->chunk.sum.i_val) - cur; } + cur[0] = 4 * 8 + ProtofType::FIXEDLEN; cur = pb_encode_uint(cur + 1, sz); - cur = pb_put_binary( - cur, 1, s->chunk.min_value.str_val.ptr, s->chunk.min_value.str_val.length); - cur = pb_put_binary( - cur, 2, s->chunk.max_value.str_val.ptr, s->chunk.max_value.str_val.length); - cur = pb_put_int(cur, 3, s->chunk.sum.i_val); + + if (s->chunk.has_minmax) { + cur = pb_put_binary( + cur, 1, s->chunk.min_value.str_val.ptr, s->chunk.min_value.str_val.length); + cur = pb_put_binary( + cur, 2, s->chunk.max_value.str_val.ptr, s->chunk.max_value.str_val.length); + } + if (s->chunk.has_sum) { cur = pb_put_int(cur, 3, s->chunk.sum.i_val); } } break; case dtype_bool: @@ -299,8 +339,9 @@ __global__ void __launch_bounds__(encode_threads_per_block) // message BucketStatistics { // repeated uint64 count = 1 [packed=true]; // } - if (s->chunk.has_sum) { // Sum is equal to the number of 'true' values - cur[0] = 5 * 8 + ProtofType::FIXEDLEN; + if (s->chunk.has_sum) { + cur[0] = 5 * 8 + ProtofType::FIXEDLEN; + // count is equal to the number of 'true' values, despite what specs say cur = pb_put_packed_uint(cur + 2, 1, s->chunk.sum.u_val); fld_start[1] = cur - (fld_start + 2); } @@ -313,8 +354,33 @@ __global__ void __launch_bounds__(encode_threads_per_block) // optional string maximum = 2; // optional string sum = 3; // } - if (s->chunk.has_minmax) { - // TODO: Decimal support (decimal min/max stored as strings) + if (s->chunk.has_minmax or s->chunk.has_sum) { + auto const scale = s->group.col_dtype.scale(); + + uint32_t sz = 0; + auto const min_size = + s->chunk.has_minmax ? fixed_point_string_size(s->chunk.min_value.d128_val, scale) : 0; + auto const max_size = + s->chunk.has_minmax ? fixed_point_string_size(s->chunk.max_value.d128_val, scale) : 0; + if (s->chunk.has_minmax) { + // encoded string lengths, plus the strings + sz += (pb_put_uint(cur, 1, min_size) - cur) + min_size + + (pb_put_uint(cur, 1, max_size) - cur) + max_size; + } + auto const sum_size = + s->chunk.has_sum ? fixed_point_string_size(s->chunk.sum.d128_val, scale) : 0; + if (s->chunk.has_sum) { sz += (pb_put_uint(cur, 1, sum_size) - cur) + sum_size; } + + cur[0] = 6 * 8 + ProtofType::FIXEDLEN; + cur = pb_encode_uint(cur + 1, sz); + + if (s->chunk.has_minmax) { + cur = pb_put_decimal(cur, 1, s->chunk.min_value.d128_val, scale, min_size); // minimum + cur = pb_put_decimal(cur, 2, s->chunk.max_value.d128_val, scale, max_size); // maximum + } + if (s->chunk.has_sum) { + cur = pb_put_decimal(cur, 3, s->chunk.sum.d128_val, scale, sum_size); // sum + } } break; case dtype_date32: @@ -338,12 +404,24 @@ __global__ void __launch_bounds__(encode_threads_per_block) // optional sint64 maximum = 2; // optional sint64 minimumUtc = 3; // min,max values saved as milliseconds since UNIX epoch // optional sint64 maximumUtc = 4; + // optional int32 minimumNanos = 5; // lower 6 TS digits for min/max to achieve nanosecond + // precision optional int32 maximumNanos = 6; // } if (s->chunk.has_minmax) { cur[0] = 9 * 8 + ProtofType::FIXEDLEN; cur += 2; - cur = pb_put_int(cur, 3, s->chunk.min_value.i_val); // minimumUtc - cur = pb_put_int(cur, 4, s->chunk.max_value.i_val); // maximumUtc + auto const [min_ms, min_ns_remainder] = + split_nanosecond_timestamp(s->chunk.min_value.i_val); + auto const [max_ms, max_ns_remainder] = + split_nanosecond_timestamp(s->chunk.max_value.i_val); + + // minimum/maximum are the same as minimumUtc/maximumUtc as we always write files in UTC + cur = pb_put_int(cur, 1, min_ms); // minimum + cur = pb_put_int(cur, 2, max_ms); // maximum + cur = pb_put_int(cur, 3, min_ms); // minimumUtc + cur = pb_put_int(cur, 4, max_ms); // maximumUtc + cur = pb_put_int(cur, 5, min_ns_remainder); // minimumNanos + cur = pb_put_int(cur, 6, max_ns_remainder); // maximumNanos fld_start[1] = cur - (fld_start + 2); } break; @@ -403,7 +481,4 @@ void orc_encode_statistics(uint8_t* blob_bfr, blob_bfr, groups, chunks, statistics_count); } -} // namespace gpu -} // namespace orc -} // namespace io -} // namespace cudf +} // namespace cudf::io::orc::gpu diff --git a/cpp/src/io/parquet/page_enc.cu b/cpp/src/io/parquet/page_enc.cu index 0af561be8da..fe0dbb85124 100644 --- a/cpp/src/io/parquet/page_enc.cu +++ b/cpp/src/io/parquet/page_enc.cu @@ -1858,8 +1858,8 @@ __device__ std::pair get_extremum(statistics_val const* s } case dtype_int64: case dtype_timestamp64: - case dtype_float64: - case dtype_decimal64: return {stats_val, sizeof(int64_t)}; + case dtype_float64: return {stats_val, sizeof(int64_t)}; + case dtype_decimal64: case dtype_decimal128: byte_reverse128(stats_val->d128_val, scratch); return {scratch, sizeof(__int128_t)}; diff --git a/cpp/src/io/statistics/statistics_type_identification.cuh b/cpp/src/io/statistics/statistics_type_identification.cuh index 32931d7d34d..ea8c71f0dcb 100644 --- a/cpp/src/io/statistics/statistics_type_identification.cuh +++ b/cpp/src/io/statistics/statistics_type_identification.cuh @@ -49,15 +49,15 @@ enum class is_int96_timestamp { YES, NO }; template struct conversion_map; -// Every timestamp or duration type is converted to milliseconds in ORC statistics +// Every timestamp or duration type is converted to nanoseconds in ORC statistics template struct conversion_map { - using types = std::tuple, - std::pair, - std::pair, - std::pair, - std::pair, - std::pair>; + using types = std::tuple, + std::pair, + std::pair, + std::pair, + std::pair, + std::pair>; }; // In Parquet timestamps and durations with second resolution are converted to @@ -125,7 +125,7 @@ class extrema_type { using non_arithmetic_extrema_type = typename std::conditional_t< cudf::is_fixed_point() or cudf::is_duration() or cudf::is_timestamp(), - typename std::conditional_t, __int128_t, int64_t>, + typename std::conditional_t(), __int128_t, int64_t>, typename std::conditional_t< std::is_same_v, string_view, @@ -134,8 +134,7 @@ class extrema_type { // unsigned int/bool -> uint64_t // signed int -> int64_t // float/double -> double - // decimal32/64 -> int64_t - // decimal128 -> __int128_t + // decimal32/64/128 -> __int128_t // duration_[T] -> int64_t // string_view -> string_view // byte_array_view -> byte_array_view diff --git a/cpp/src/io/statistics/typed_statistics_chunk.cuh b/cpp/src/io/statistics/typed_statistics_chunk.cuh index d007209a12a..e6ec1471cb7 100644 --- a/cpp/src/io/statistics/typed_statistics_chunk.cuh +++ b/cpp/src/io/statistics/typed_statistics_chunk.cuh @@ -244,9 +244,9 @@ get_untyped_chunk(typed_statistics_chunk const& chunk) stat.null_count = chunk.null_count; stat.has_minmax = chunk.has_minmax; stat.has_sum = [&]() { - if (!chunk.has_minmax) return false; // invalidate the sum if overflow or underflow is possible if constexpr (std::is_floating_point_v or std::is_integral_v) { + if (!chunk.has_minmax) { return true; } return std::numeric_limits::max() / chunk.non_nulls >= static_cast(chunk.maximum_value) and std::numeric_limits::lowest() / chunk.non_nulls <= diff --git a/cpp/src/strings/convert/convert_fixed_point.cu b/cpp/src/strings/convert/convert_fixed_point.cu index a3336258d3e..51aab9faeba 100644 --- a/cpp/src/strings/convert/convert_fixed_point.cu +++ b/cpp/src/strings/convert/convert_fixed_point.cu @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -200,62 +200,19 @@ struct from_fixed_point_fn { size_type* d_offsets{}; char* d_chars{}; - /** - * @brief Calculates the size of the string required to convert the element, in base-10 format. - * - * Output format is [-]integer.fraction - */ - __device__ int32_t compute_output_size(DecimalType value) - { - auto const scale = d_decimals.type().scale(); - - if (scale >= 0) return count_digits(value) + scale; - - auto const abs_value = numeric::detail::abs(value); - auto const exp_ten = numeric::detail::exp10(-scale); - auto const fraction = count_digits(abs_value % exp_ten); - auto const num_zeros = std::max(0, (-scale - fraction)); - return static_cast(value < 0) + // sign if negative - count_digits(abs_value / exp_ten) + // integer - 1 + // decimal point - num_zeros + // zeros padding - fraction; // size of fraction - } - /** * @brief Converts a decimal element into a string. * * The value is converted into base-10 digits [0-9] * plus the decimal point and a negative sign prefix. */ - __device__ void decimal_to_string(size_type idx) + __device__ void fixed_point_element_to_string(size_type idx) { auto const value = d_decimals.element(idx); auto const scale = d_decimals.type().scale(); char* d_buffer = d_chars + d_offsets[idx]; - if (scale >= 0) { - d_buffer += integer_to_string(value, d_buffer); - thrust::generate_n(thrust::seq, d_buffer, scale, []() { return '0'; }); // add zeros - return; - } - - // scale < 0 - // write format: [-]integer.fraction - // where integer = abs(value) / (10^abs(scale)) - // fraction = abs(value) % (10^abs(scale)) - if (value < 0) *d_buffer++ = '-'; // add sign - auto const abs_value = numeric::detail::abs(value); - auto const exp_ten = numeric::detail::exp10(-scale); - auto const num_zeros = std::max(0, (-scale - count_digits(abs_value % exp_ten))); - - d_buffer += integer_to_string(abs_value / exp_ten, d_buffer); // add the integer part - *d_buffer++ = '.'; // add decimal point - - thrust::generate_n(thrust::seq, d_buffer, num_zeros, []() { return '0'; }); // add zeros - d_buffer += num_zeros; - - integer_to_string(abs_value % exp_ten, d_buffer); // add the fraction part + fixed_point_to_string(value, scale, d_buffer); } __device__ void operator()(size_type idx) @@ -265,9 +222,10 @@ struct from_fixed_point_fn { return; } if (d_chars != nullptr) { - decimal_to_string(idx); + fixed_point_element_to_string(idx); } else { - d_offsets[idx] = compute_output_size(d_decimals.element(idx)); + d_offsets[idx] = + fixed_point_string_size(d_decimals.element(idx), d_decimals.type().scale()); } } }; diff --git a/cpp/tests/io/orc_test.cpp b/cpp/tests/io/orc_test.cpp index cff7b1cf081..890ef914713 100644 --- a/cpp/tests/io/orc_test.cpp +++ b/cpp/tests/io/orc_test.cpp @@ -976,6 +976,10 @@ TEST_F(OrcReaderTest, CombinedSkipRowTest) TEST_F(OrcStatisticsTest, Basic) { auto sequence = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i; }); + auto ts_sequence = + cudf::detail::make_counting_transform_iterator(0, [](auto i) { return (i - 4) * 1000002; }); + auto dec_sequence = + cudf::detail::make_counting_transform_iterator(0, [&](auto i) { return i * 1001; }); auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 2; }); std::vector strings{ @@ -986,11 +990,17 @@ TEST_F(OrcStatisticsTest, Basic) sequence, sequence + num_rows, validity); column_wrapper col2( sequence, sequence + num_rows, validity); - column_wrapper col3{strings.begin(), strings.end()}; - column_wrapper col4(sequence, sequence + num_rows); - column_wrapper col5( - sequence, sequence + num_rows, validity); - table_view expected({col1, col2, col3, col4, col5}); + str_col col3{strings.begin(), strings.end()}; + column_wrapper col4( + ts_sequence, ts_sequence + num_rows, validity); + column_wrapper col5( + ts_sequence, ts_sequence + num_rows, validity); + bool_col col6({true, true, true, true, true, false, false, false, false}, validity); + + cudf::test::fixed_point_column_wrapper col7( + dec_sequence, dec_sequence + num_rows, numeric::scale_type{-1}); + + table_view expected({col1, col2, col3, col4, col5, col6, col7}); auto filepath = temp_env->get_temp_filepath("OrcStatsMerge.orc"); @@ -1000,16 +1010,21 @@ TEST_F(OrcStatisticsTest, Basic) auto const stats = cudf::io::read_parsed_orc_statistics(cudf::io::source_info{filepath}); - auto const expected_column_names = - std::vector{"", "_col0", "_col1", "_col2", "_col3", "_col4"}; + auto expected_column_names = std::vector{""}; + std::generate_n( + std::back_inserter(expected_column_names), + expected.num_columns(), + [starting_index = 0]() mutable { return "_col" + std::to_string(starting_index++); }); EXPECT_EQ(stats.column_names, expected_column_names); auto validate_statistics = [&](std::vector const& stats) { + ASSERT_EQ(stats.size(), expected.num_columns() + 1); auto& s0 = stats[0]; EXPECT_EQ(*s0.number_of_values, 9ul); auto& s1 = stats[1]; EXPECT_EQ(*s1.number_of_values, 4ul); + EXPECT_TRUE(*s1.has_null); auto& ts1 = std::get(s1.type_specific_stats); EXPECT_EQ(*ts1.minimum, 1); EXPECT_EQ(*ts1.maximum, 7); @@ -1017,30 +1032,55 @@ TEST_F(OrcStatisticsTest, Basic) auto& s2 = stats[2]; EXPECT_EQ(*s2.number_of_values, 4ul); + EXPECT_TRUE(*s2.has_null); auto& ts2 = std::get(s2.type_specific_stats); EXPECT_EQ(*ts2.minimum, 1.); EXPECT_EQ(*ts2.maximum, 7.); - // No sum ATM, filed #7087 - ASSERT_FALSE(ts2.sum); + EXPECT_EQ(*ts2.sum, 16.); auto& s3 = stats[3]; EXPECT_EQ(*s3.number_of_values, 9ul); + EXPECT_FALSE(*s3.has_null); auto& ts3 = std::get(s3.type_specific_stats); EXPECT_EQ(*ts3.minimum, "Friday"); EXPECT_EQ(*ts3.maximum, "Wednesday"); EXPECT_EQ(*ts3.sum, 58ul); auto& s4 = stats[4]; - EXPECT_EQ(*s4.number_of_values, 9ul); - EXPECT_EQ(std::get(s4.type_specific_stats).count[0], 8ul); + EXPECT_EQ(*s4.number_of_values, 4ul); + EXPECT_TRUE(*s4.has_null); + auto& ts4 = std::get(s4.type_specific_stats); + EXPECT_EQ(*ts4.minimum, -4); + EXPECT_EQ(*ts4.maximum, 3); + EXPECT_EQ(*ts4.minimum_utc, -4); + EXPECT_EQ(*ts4.maximum_utc, 3); + EXPECT_EQ(*ts4.minimum_nanos, 999994); + EXPECT_EQ(*ts4.maximum_nanos, 6); auto& s5 = stats[5]; EXPECT_EQ(*s5.number_of_values, 4ul); + EXPECT_TRUE(*s5.has_null); auto& ts5 = std::get(s5.type_specific_stats); - EXPECT_EQ(*ts5.minimum_utc, 1000); - EXPECT_EQ(*ts5.maximum_utc, 7000); - ASSERT_FALSE(ts5.minimum); - ASSERT_FALSE(ts5.maximum); + EXPECT_EQ(*ts5.minimum, -3001); + EXPECT_EQ(*ts5.maximum, 3000); + EXPECT_EQ(*ts5.minimum_utc, -3001); + EXPECT_EQ(*ts5.maximum_utc, 3000); + EXPECT_EQ(*ts5.minimum_nanos, 994000); + EXPECT_EQ(*ts5.maximum_nanos, 6000); + + auto& s6 = stats[6]; + EXPECT_EQ(*s6.number_of_values, 4ul); + EXPECT_TRUE(*s6.has_null); + auto& ts6 = std::get(s6.type_specific_stats); + EXPECT_EQ(ts6.count[0], 2); + + auto& s7 = stats[7]; + EXPECT_EQ(*s7.number_of_values, 9ul); + EXPECT_FALSE(*s7.has_null); + auto& ts7 = std::get(s7.type_specific_stats); + EXPECT_EQ(*ts7.minimum, "0.0"); + EXPECT_EQ(*ts7.maximum, "800.8"); + EXPECT_EQ(*ts7.sum, "3603.6"); }; validate_statistics(stats.file_stats); @@ -1259,9 +1299,8 @@ TEST_F(OrcStatisticsTest, Overflow) TEST_F(OrcStatisticsTest, HasNull) { - // cudf's ORC writer doesn't yet support the ability to encode the hasNull value in statistics so - // we're embedding a file created using pyorc - // + // This test can now be implemented with libcudf; keeping the pyorc version to keep the test + // inputs diversified // Method to create file: // >>> import pyorc // >>> output = open("./temp.orc", "wb") @@ -1861,4 +1900,38 @@ TEST_F(OrcWriterTest, EmptyChildStringColumn) CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view()); } +template +void check_all_null_stats(cudf::io::column_statistics const& stats) +{ + EXPECT_EQ(stats.number_of_values, 0); + EXPECT_TRUE(stats.has_null); + + auto const ts = std::get(stats.type_specific_stats); + EXPECT_FALSE(ts.minimum.has_value()); + EXPECT_FALSE(ts.maximum.has_value()); + EXPECT_TRUE(ts.sum.has_value()); + EXPECT_EQ(*ts.sum, 0); +} + +TEST_F(OrcStatisticsTest, AllNulls) +{ + float64_col double_col({0., 0., 0.}, cudf::test::iterators::all_nulls()); + int32_col int_col({0, 0, 0}, cudf::test::iterators::all_nulls()); + str_col string_col({"", "", ""}, cudf::test::iterators::all_nulls()); + + cudf::table_view expected({int_col, double_col, string_col}); + + std::vector out_buffer; + cudf::io::orc_writer_options out_opts = + cudf::io::orc_writer_options::builder(cudf::io::sink_info{&out_buffer}, expected); + cudf::io::write_orc(out_opts); + + auto const stats = cudf::io::read_parsed_orc_statistics( + cudf::io::source_info{out_buffer.data(), out_buffer.size()}); + + check_all_null_stats(stats.file_stats[1]); + check_all_null_stats(stats.file_stats[2]); + check_all_null_stats(stats.file_stats[3]); +} + CUDF_TEST_PROGRAM_MAIN() diff --git a/python/cudf/cudf/tests/test_orc.py b/python/cudf/cudf/tests/test_orc.py index aafc8831bf4..07aa5430f4f 100644 --- a/python/cudf/cudf/tests/test_orc.py +++ b/python/cudf/cudf/tests/test_orc.py @@ -633,16 +633,19 @@ def test_orc_write_statistics(tmpdir, datadir, nrows, stats_freq): for col in gdf: if "minimum" in file_stats[0][col]: stats_min = file_stats[0][col]["minimum"] - actual_min = gdf[col].min() - assert normalized_equals(actual_min, stats_min) + if stats_min is not None: + actual_min = gdf[col].min() + assert normalized_equals(actual_min, stats_min) if "maximum" in file_stats[0][col]: stats_max = file_stats[0][col]["maximum"] - actual_max = gdf[col].max() - assert normalized_equals(actual_max, stats_max) + if stats_max is not None: + actual_max = gdf[col].max() + assert normalized_equals(actual_max, stats_max) if "number_of_values" in file_stats[0][col]: stats_num_vals = file_stats[0][col]["number_of_values"] - actual_num_vals = gdf[col].count() - assert stats_num_vals == actual_num_vals + if stats_num_vals is not None: + actual_num_vals = gdf[col].count() + assert stats_num_vals == actual_num_vals # compare stripe statistics with actual min/max for stripe_idx in range(0, orc_file.nstripes): @@ -651,21 +654,24 @@ def test_orc_write_statistics(tmpdir, datadir, nrows, stats_freq): stripe_df = cudf.DataFrame(stripe.to_pandas()) for col in stripe_df: if "minimum" in stripes_stats[stripe_idx][col]: - actual_min = stripe_df[col].min() stats_min = stripes_stats[stripe_idx][col]["minimum"] - assert normalized_equals(actual_min, stats_min) + if stats_min is not None: + actual_min = stripe_df[col].min() + assert normalized_equals(actual_min, stats_min) if "maximum" in stripes_stats[stripe_idx][col]: - actual_max = stripe_df[col].max() stats_max = stripes_stats[stripe_idx][col]["maximum"] - assert normalized_equals(actual_max, stats_max) + if stats_max is not None: + actual_max = stripe_df[col].max() + assert normalized_equals(actual_max, stats_max) if "number_of_values" in stripes_stats[stripe_idx][col]: stats_num_vals = stripes_stats[stripe_idx][col][ "number_of_values" ] - actual_num_vals = stripe_df[col].count() - assert stats_num_vals == actual_num_vals + if stats_num_vals is not None: + actual_num_vals = stripe_df[col].count() + assert stats_num_vals == actual_num_vals @pytest.mark.parametrize("stats_freq", ["STRIPE", "ROWGROUP"]) @@ -733,16 +739,19 @@ def test_orc_chunked_write_statistics(tmpdir, datadir, nrows, stats_freq): for col in expect: if "minimum" in file_stats[0][col]: stats_min = file_stats[0][col]["minimum"] - actual_min = expect[col].min() - assert normalized_equals(actual_min, stats_min) + if stats_min is not None: + actual_min = expect[col].min() + assert normalized_equals(actual_min, stats_min) if "maximum" in file_stats[0][col]: stats_max = file_stats[0][col]["maximum"] - actual_max = expect[col].max() - assert normalized_equals(actual_max, stats_max) + if stats_max is not None: + actual_max = expect[col].max() + assert normalized_equals(actual_max, stats_max) if "number_of_values" in file_stats[0][col]: stats_num_vals = file_stats[0][col]["number_of_values"] - actual_num_vals = expect[col].count() - assert stats_num_vals == actual_num_vals + if stats_num_vals is not None: + actual_num_vals = expect[col].count() + assert stats_num_vals == actual_num_vals # compare stripe statistics with actual min/max for stripe_idx in range(0, orc_file.nstripes): @@ -751,21 +760,24 @@ def test_orc_chunked_write_statistics(tmpdir, datadir, nrows, stats_freq): stripe_df = cudf.DataFrame(stripe.to_pandas()) for col in stripe_df: if "minimum" in stripes_stats[stripe_idx][col]: - actual_min = stripe_df[col].min() stats_min = stripes_stats[stripe_idx][col]["minimum"] - assert normalized_equals(actual_min, stats_min) + if stats_min is not None: + actual_min = stripe_df[col].min() + assert normalized_equals(actual_min, stats_min) if "maximum" in stripes_stats[stripe_idx][col]: - actual_max = stripe_df[col].max() stats_max = stripes_stats[stripe_idx][col]["maximum"] - assert normalized_equals(actual_max, stats_max) + if stats_max is not None: + actual_max = stripe_df[col].max() + assert normalized_equals(actual_max, stats_max) if "number_of_values" in stripes_stats[stripe_idx][col]: stats_num_vals = stripes_stats[stripe_idx][col][ "number_of_values" ] - actual_num_vals = stripe_df[col].count() - assert stats_num_vals == actual_num_vals + if stats_num_vals is not None: + actual_num_vals = stripe_df[col].count() + assert stats_num_vals == actual_num_vals @pytest.mark.parametrize("nrows", [1, 100, 6000000]) From bdc1f3a6e1f383cd689ba8e92903b89e49cdb8d8 Mon Sep 17 00:00:00 2001 From: David Wendt <45795991+davidwendt@users.noreply.github.com> Date: Mon, 18 Sep 2023 19:34:29 -0400 Subject: [PATCH 076/150] Expose streams in public strings case APIs (#14056) Add stream parameter to public strings APIs: - `cudf::strings::capitalize()` - `cudf::strings::title()` - `cudf::strings::is_title()` - `cudf::strings::to_lower()` - `cudf::strings::to_upper()` - `cudf::strings::swapcase()` Reference #13744 Authors: - David Wendt (https://github.com/davidwendt) Approvers: - Mark Harris (https://github.com/harrism) - Vyas Ramasubramani (https://github.com/vyasr) URL: https://github.com/rapidsai/cudf/pull/14056 --- cpp/include/cudf/strings/capitalize.hpp | 28 ++++++++----- cpp/include/cudf/strings/case.hpp | 8 +++- cpp/src/strings/capitalize.cu | 9 ++-- cpp/src/strings/case.cu | 9 ++-- cpp/tests/CMakeLists.txt | 1 + cpp/tests/streams/strings/case_test.cpp | 55 +++++++++++++++++++++++++ 6 files changed, 92 insertions(+), 18 deletions(-) create mode 100644 cpp/tests/streams/strings/case_test.cpp diff --git a/cpp/include/cudf/strings/capitalize.hpp b/cpp/include/cudf/strings/capitalize.hpp index 6d01ab047ba..57375e9ac6a 100644 --- a/cpp/include/cudf/strings/capitalize.hpp +++ b/cpp/include/cudf/strings/capitalize.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -50,16 +50,18 @@ namespace strings { * * Any null string entries return corresponding null output column entries. * - * @throw cudf::logic_error if `delimiter.is_valid()` is `false`. + * @throw cudf::logic_error if `delimiter.is_valid()` is `false`. * - * @param input String column. - * @param delimiters Characters for identifying words to capitalize. + * @param input String column + * @param delimiters Characters for identifying words to capitalize + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory - * @return Column of strings capitalized from the input column. + * @return Column of strings capitalized from the input column */ std::unique_ptr capitalize( strings_column_view const& input, - string_scalar const& delimiters = string_scalar(""), + string_scalar const& delimiters = string_scalar("", true, cudf::get_default_stream()), + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -83,14 +85,16 @@ std::unique_ptr capitalize( * * Any null string entries return corresponding null output column entries. * - * @param input String column. - * @param sequence_type The character type that is used when identifying words. + * @param input String column + * @param sequence_type The character type that is used when identifying words + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory - * @return Column of titled strings. + * @return Column of titled strings */ std::unique_ptr title( strings_column_view const& input, string_character_types sequence_type = string_character_types::ALPHA, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -112,12 +116,14 @@ std::unique_ptr title( * * Any null string entries result in corresponding null output column entries. * - * @param input String column. + * @param input String column + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory - * @return Column of type BOOL8. + * @return Column of type BOOL8 */ std::unique_ptr is_title( strings_column_view const& input, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of doxygen group diff --git a/cpp/include/cudf/strings/case.hpp b/cpp/include/cudf/strings/case.hpp index 06ba4f8d882..94191686a92 100644 --- a/cpp/include/cudf/strings/case.hpp +++ b/cpp/include/cudf/strings/case.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -38,11 +38,13 @@ namespace strings { * Any null entries create null entries in the output column. * * @param strings Strings instance for this operation. + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory. * @return New column of strings with characters converted. */ std::unique_ptr to_lower( strings_column_view const& strings, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -55,11 +57,13 @@ std::unique_ptr to_lower( * Any null entries create null entries in the output column. * * @param strings Strings instance for this operation. + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory. * @return New column of strings with characters converted. */ std::unique_ptr to_upper( strings_column_view const& strings, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -73,11 +77,13 @@ std::unique_ptr to_upper( * Any null entries create null entries in the output column. * * @param strings Strings instance for this operation. + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory. * @return New column of strings with characters converted. */ std::unique_ptr swapcase( strings_column_view const& strings, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of doxygen group diff --git a/cpp/src/strings/capitalize.cu b/cpp/src/strings/capitalize.cu index 4e248922702..c555031b588 100644 --- a/cpp/src/strings/capitalize.cu +++ b/cpp/src/strings/capitalize.cu @@ -287,25 +287,28 @@ std::unique_ptr is_title(strings_column_view const& input, std::unique_ptr capitalize(strings_column_view const& input, string_scalar const& delimiter, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::capitalize(input, delimiter, cudf::get_default_stream(), mr); + return detail::capitalize(input, delimiter, stream, mr); } std::unique_ptr title(strings_column_view const& input, string_character_types sequence_type, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::title(input, sequence_type, cudf::get_default_stream(), mr); + return detail::title(input, sequence_type, stream, mr); } std::unique_ptr is_title(strings_column_view const& input, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::is_title(input, cudf::get_default_stream(), mr); + return detail::is_title(input, stream, mr); } } // namespace strings diff --git a/cpp/src/strings/case.cu b/cpp/src/strings/case.cu index c5fe7a19f53..8f4c2ee574a 100644 --- a/cpp/src/strings/case.cu +++ b/cpp/src/strings/case.cu @@ -310,24 +310,27 @@ std::unique_ptr swapcase(strings_column_view const& strings, // APIs std::unique_ptr to_lower(strings_column_view const& strings, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::to_lower(strings, cudf::get_default_stream(), mr); + return detail::to_lower(strings, stream, mr); } std::unique_ptr to_upper(strings_column_view const& strings, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::to_upper(strings, cudf::get_default_stream(), mr); + return detail::to_upper(strings, stream, mr); } std::unique_ptr swapcase(strings_column_view const& strings, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::swapcase(strings, cudf::get_default_stream(), mr); + return detail::swapcase(strings, stream, mr); } } // namespace strings diff --git a/cpp/tests/CMakeLists.txt b/cpp/tests/CMakeLists.txt index a69dc9bf2f8..4923ef5c903 100644 --- a/cpp/tests/CMakeLists.txt +++ b/cpp/tests/CMakeLists.txt @@ -627,6 +627,7 @@ ConfigureTest(STREAM_CONCATENATE_TEST streams/concatenate_test.cpp STREAM_MODE t ConfigureTest(STREAM_FILLING_TEST streams/filling_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_REPLACE_TEST streams/replace_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_SEARCH_TEST streams/search_test.cpp STREAM_MODE testing) +ConfigureTest(STREAM_STRINGS_TEST streams/strings/case_test.cpp STREAM_MODE testing) # ################################################################################################## # Install tests #################################################################################### diff --git a/cpp/tests/streams/strings/case_test.cpp b/cpp/tests/streams/strings/case_test.cpp new file mode 100644 index 00000000000..df3eabd773a --- /dev/null +++ b/cpp/tests/streams/strings/case_test.cpp @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include +#include +#include + +class StringsCaseTest : public cudf::test::BaseFixture {}; + +TEST_F(StringsCaseTest, LowerUpper) +{ + auto const input = + cudf::test::strings_column_wrapper({"", + "The quick brown fox", + "jumps over the lazy dog.", + "all work and no play makes Jack a dull boy", + R"(!"#$%&'()*+,-./0123456789:;<=>?@[\]^_`{|}~)"}); + auto view = cudf::strings_column_view(input); + + cudf::strings::to_lower(view, cudf::test::get_default_stream()); + cudf::strings::to_upper(view, cudf::test::get_default_stream()); + cudf::strings::swapcase(view, cudf::test::get_default_stream()); +} + +TEST_F(StringsCaseTest, Capitalize) +{ + auto const input = + cudf::test::strings_column_wrapper({"", + "The Quick Brown Fox", + "jumps over the lazy dog", + "all work and no play makes Jack a dull boy"}); + auto view = cudf::strings_column_view(input); + + auto const delimiter = cudf::string_scalar(" ", true, cudf::test::get_default_stream()); + cudf::strings::capitalize(view, delimiter, cudf::test::get_default_stream()); + cudf::strings::is_title(view, cudf::test::get_default_stream()); + cudf::strings::title( + view, cudf::strings::string_character_types::ALPHA, cudf::test::get_default_stream()); +} From c016b58b24e63468e9110a6ca82adfc5fd61202d Mon Sep 17 00:00:00 2001 From: Bradley Dice Date: Tue, 19 Sep 2023 07:50:20 -0500 Subject: [PATCH 077/150] Update to clang 16.0.6. (#14120) This PR updates cudf to use clang 16.0.6. The previous version 16.0.1 has some minor formatting issues affecting several RAPIDS repos. Authors: - Bradley Dice (https://github.com/bdice) Approvers: - Mark Harris (https://github.com/harrism) - David Wendt (https://github.com/davidwendt) URL: https://github.com/rapidsai/cudf/pull/14120 --- .pre-commit-config.yaml | 2 +- cpp/benchmarks/iterator/iterator.cu | 2 +- .../stream_compaction/apply_boolean_mask.cpp | 4 +- cpp/benchmarks/string/char_types.cpp | 2 +- cpp/benchmarks/string/extract.cpp | 2 +- .../cudf/column/column_device_view.cuh | 2 +- cpp/include/cudf/detail/copy_if.cuh | 2 +- cpp/include/cudf/detail/indexalator.cuh | 4 +- cpp/include/cudf/detail/join.hpp | 4 +- cpp/include/cudf/fixed_point/fixed_point.hpp | 2 +- cpp/include/cudf/groupby.hpp | 4 +- cpp/include/cudf/io/csv.hpp | 2 +- cpp/include/cudf/io/json.hpp | 2 +- cpp/include/cudf/strings/detail/utf8.hpp | 36 ++-- cpp/include/cudf/table/row_operators.cuh | 4 +- cpp/include/cudf/table/table_view.hpp | 2 +- cpp/include/cudf/wrappers/dictionary.hpp | 2 +- cpp/include/cudf_test/base_fixture.hpp | 4 +- cpp/include/nvtext/subword_tokenize.hpp | 2 +- cpp/scripts/run-clang-tidy.py | 2 +- cpp/src/copying/contiguous_split.cu | 8 +- cpp/src/groupby/sort/functors.hpp | 10 +- cpp/src/io/avro/avro_gpu.cu | 2 +- cpp/src/io/comp/cpu_unbz2.cpp | 2 +- cpp/src/io/comp/debrotli.cu | 4 +- cpp/src/io/comp/gpuinflate.cu | 18 +- cpp/src/io/comp/uncomp.cpp | 10 +- cpp/src/io/comp/unsnap.cu | 2 +- cpp/src/io/json/json_column.cu | 2 +- cpp/src/io/json/nested_json_gpu.cu | 160 +++++++++--------- cpp/src/io/orc/orc_gpu.hpp | 2 +- cpp/src/io/orc/stripe_data.cu | 4 +- .../io/parquet/compact_protocol_reader.cpp | 2 +- .../io/parquet/compact_protocol_writer.cpp | 2 +- cpp/src/io/parquet/delta_binary.cuh | 20 +-- cpp/src/io/parquet/page_delta_decode.cu | 2 +- cpp/src/io/parquet/parquet.hpp | 4 +- cpp/src/io/parquet/parquet_gpu.hpp | 22 +-- cpp/src/io/parquet/reader_impl_preprocess.cu | 2 +- cpp/src/join/join.cu | 4 +- .../quantiles/tdigest/tdigest_aggregation.cu | 2 +- .../rolling/detail/rolling_collect_list.cuh | 2 +- cpp/src/strings/char_types/char_types.cu | 4 +- cpp/src/strings/convert/convert_datetime.cu | 6 +- cpp/src/strings/convert/convert_durations.cu | 2 +- cpp/src/strings/convert/convert_floats.cu | 6 +- cpp/src/strings/convert/convert_integers.cu | 2 +- cpp/src/strings/convert/convert_ipv4.cu | 2 +- cpp/src/strings/convert/convert_urls.cu | 4 +- cpp/src/strings/json/json_path.cu | 2 +- cpp/src/strings/regex/regcomp.cpp | 14 +- cpp/src/strings/regex/regcomp.h | 8 +- cpp/src/strings/regex/regex.cuh | 18 +- cpp/src/strings/regex/regex.inl | 10 +- cpp/src/strings/replace/replace_re.cu | 2 +- cpp/src/strings/split/partition.cu | 2 +- cpp/src/strings/split/split.cuh | 2 +- cpp/src/strings/split/split_re.cu | 2 +- cpp/src/strings/utilities.cu | 6 +- cpp/src/text/normalize.cu | 4 +- cpp/src/text/replace.cu | 2 +- cpp/src/text/subword/bpe_tokenizer.cu | 2 +- cpp/src/text/subword/load_merges_file.cu | 2 +- cpp/src/text/utilities/tokenize_ops.cuh | 2 +- cpp/tests/groupby/merge_lists_tests.cpp | 2 +- cpp/tests/groupby/merge_sets_tests.cpp | 12 +- cpp/tests/io/parquet_test.cpp | 6 +- cpp/tests/lists/reverse_tests.cpp | 8 +- .../difference_distinct_tests.cpp | 2 +- .../intersect_distinct_tests.cpp | 4 +- .../set_operations/union_distinct_tests.cpp | 4 +- .../stream_compaction/distinct_tests.cpp | 10 +- .../reshape/interleave_columns_tests.cpp | 2 +- .../rolling/range_rolling_window_test.cpp | 2 +- cpp/tests/sort/segmented_sort_tests.cpp | 2 +- cpp/tests/strings/chars_types_tests.cpp | 12 +- cpp/tests/strings/durations_tests.cpp | 8 +- cpp/tests/utilities/column_utilities.cu | 2 +- 78 files changed, 276 insertions(+), 276 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 238e5b44030..7e44091774f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -63,7 +63,7 @@ repos: # Explicitly specify the pyproject.toml at the repo root, not per-project. args: ["--config=pyproject.toml"] - repo: https://github.com/pre-commit/mirrors-clang-format - rev: v16.0.1 + rev: v16.0.6 hooks: - id: clang-format types_or: [c, c++, cuda] diff --git a/cpp/benchmarks/iterator/iterator.cu b/cpp/benchmarks/iterator/iterator.cu index 7acf24c30a5..dcd13cf62c4 100644 --- a/cpp/benchmarks/iterator/iterator.cu +++ b/cpp/benchmarks/iterator/iterator.cu @@ -145,7 +145,7 @@ void BM_iterator(benchmark::State& state) cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0 if (cub_or_thrust) { if (raw_or_iterator) { - raw_stream_bench_cub(hasnull_F, dev_result); // driven by raw pointer + raw_stream_bench_cub(hasnull_F, dev_result); // driven by raw pointer } else { iterator_bench_cub(hasnull_F, dev_result); // driven by riterator without nulls } diff --git a/cpp/benchmarks/stream_compaction/apply_boolean_mask.cpp b/cpp/benchmarks/stream_compaction/apply_boolean_mask.cpp index a6feaf04842..f78aa9fa654 100644 --- a/cpp/benchmarks/stream_compaction/apply_boolean_mask.cpp +++ b/cpp/benchmarks/stream_compaction/apply_boolean_mask.cpp @@ -59,8 +59,8 @@ void calculate_bandwidth(benchmark::State& state, cudf::size_type num_columns) int64_t const column_bytes_in = column_bytes_out; // we only read unmasked inputs int64_t const bytes_read = - (column_bytes_in + validity_bytes_in) * num_columns + // reading columns - mask_size; // reading boolean mask + (column_bytes_in + validity_bytes_in) * num_columns + // reading columns + mask_size; // reading boolean mask int64_t const bytes_written = (column_bytes_out + validity_bytes_out) * num_columns; // writing columns diff --git a/cpp/benchmarks/string/char_types.cpp b/cpp/benchmarks/string/char_types.cpp index 8e9e595fcef..59e6245fd41 100644 --- a/cpp/benchmarks/string/char_types.cpp +++ b/cpp/benchmarks/string/char_types.cpp @@ -43,7 +43,7 @@ static void bench_char_types(nvbench::state& state) state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value())); // gather some throughput statistics as well auto chars_size = input.chars_size(); - state.add_global_memory_reads(chars_size); // all bytes are read; + state.add_global_memory_reads(chars_size); // all bytes are read; if (api_type == "all") { state.add_global_memory_writes(num_rows); // output is a bool8 per row } else { diff --git a/cpp/benchmarks/string/extract.cpp b/cpp/benchmarks/string/extract.cpp index 9e67c5a5b52..135dadabbe4 100644 --- a/cpp/benchmarks/string/extract.cpp +++ b/cpp/benchmarks/string/extract.cpp @@ -43,7 +43,7 @@ static void bench_extract(nvbench::state& state) std::uniform_int_distribution words_dist(0, 999); std::vector samples(100); // 100 unique rows of data to reuse std::generate(samples.begin(), samples.end(), [&]() { - std::string row; // build a row of random tokens + std::string row; // build a row of random tokens while (static_cast(row.size()) < row_width) { row += std::to_string(words_dist(generator)) + " "; } diff --git a/cpp/include/cudf/column/column_device_view.cuh b/cpp/include/cudf/column/column_device_view.cuh index 05ef21bd750..35851a99822 100644 --- a/cpp/include/cudf/column/column_device_view.cuh +++ b/cpp/include/cudf/column/column_device_view.cuh @@ -1393,7 +1393,7 @@ struct pair_accessor { */ template struct pair_rep_accessor { - column_device_view const col; ///< column view of column in device + column_device_view const col; ///< column view of column in device using rep_type = device_storage_type_t; ///< representation type diff --git a/cpp/include/cudf/detail/copy_if.cuh b/cpp/include/cudf/detail/copy_if.cuh index 1dd91dcd865..ebe7e052b6d 100644 --- a/cpp/include/cudf/detail/copy_if.cuh +++ b/cpp/include/cudf/detail/copy_if.cuh @@ -133,7 +133,7 @@ __launch_bounds__(block_size) __global__ if (has_validity) { temp_valids[threadIdx.x] = false; // init shared memory if (threadIdx.x < cudf::detail::warp_size) temp_valids[block_size + threadIdx.x] = false; - __syncthreads(); // wait for init + __syncthreads(); // wait for init } if (mask_true) { diff --git a/cpp/include/cudf/detail/indexalator.cuh b/cpp/include/cudf/detail/indexalator.cuh index 0ab9da0dbd0..4731c4919e3 100644 --- a/cpp/include/cudf/detail/indexalator.cuh +++ b/cpp/include/cudf/detail/indexalator.cuh @@ -248,7 +248,7 @@ struct input_indexalator : base_indexalator { friend struct indexalator_factory; friend struct base_indexalator; // for CRTP - using reference = size_type const; // this keeps STL and thrust happy + using reference = size_type const; // this keeps STL and thrust happy input_indexalator() = default; input_indexalator(input_indexalator const&) = default; @@ -332,7 +332,7 @@ struct output_indexalator : base_indexalator { friend struct indexalator_factory; friend struct base_indexalator; // for CRTP - using reference = output_indexalator const&; // required for output iterators + using reference = output_indexalator const&; // required for output iterators output_indexalator() = default; output_indexalator(output_indexalator const&) = default; diff --git a/cpp/include/cudf/detail/join.hpp b/cpp/include/cudf/detail/join.hpp index 6fcf10aef57..b69632c83ca 100644 --- a/cpp/include/cudf/detail/join.hpp +++ b/cpp/include/cudf/detail/join.hpp @@ -78,8 +78,8 @@ struct hash_join { cudf::null_equality const _nulls_equal; ///< whether to consider nulls as equal cudf::table_view _build; ///< input table to build the hash map std::shared_ptr - _preprocessed_build; ///< input table preprocssed for row operators - map_type _hash_table; ///< hash table built on `_build` + _preprocessed_build; ///< input table preprocssed for row operators + map_type _hash_table; ///< hash table built on `_build` public: /** diff --git a/cpp/include/cudf/fixed_point/fixed_point.hpp b/cpp/include/cudf/fixed_point/fixed_point.hpp index 7c59c2f9194..13d8716c1df 100644 --- a/cpp/include/cudf/fixed_point/fixed_point.hpp +++ b/cpp/include/cudf/fixed_point/fixed_point.hpp @@ -829,5 +829,5 @@ using decimal32 = fixed_point; ///< 32-bit decima using decimal64 = fixed_point; ///< 64-bit decimal fixed point using decimal128 = fixed_point<__int128_t, Radix::BASE_10>; ///< 128-bit decimal fixed point -/** @} */ // end of group +/** @} */ // end of group } // namespace numeric diff --git a/cpp/include/cudf/groupby.hpp b/cpp/include/cudf/groupby.hpp index 6e575685daa..1c31e8777a8 100644 --- a/cpp/include/cudf/groupby.hpp +++ b/cpp/include/cudf/groupby.hpp @@ -386,8 +386,8 @@ class groupby { ///< indicates null order ///< of each column std::unique_ptr - _helper; ///< Helper object - ///< used by sort based implementation + _helper; ///< Helper object + ///< used by sort based implementation /** * @brief Get the sort helper object diff --git a/cpp/include/cudf/io/csv.hpp b/cpp/include/cudf/io/csv.hpp index c84ca7e6c73..b49a13a8ea9 100644 --- a/cpp/include/cudf/io/csv.hpp +++ b/cpp/include/cudf/io/csv.hpp @@ -213,7 +213,7 @@ class csv_reader_options { auto const max_row_bytes = 16 * 1024; // 16KB auto const column_bytes = 64; - auto const base_padding = 1024; // 1KB + auto const base_padding = 1024; // 1KB if (num_columns == 0) { // Use flat size if the number of columns is not known diff --git a/cpp/include/cudf/io/json.hpp b/cpp/include/cudf/io/json.hpp index 15dc2a614ad..d408d249a7f 100644 --- a/cpp/include/cudf/io/json.hpp +++ b/cpp/include/cudf/io/json.hpp @@ -207,7 +207,7 @@ class json_reader_options { auto const max_row_bytes = 16 * 1024; // 16KB auto const column_bytes = 64; - auto const base_padding = 1024; // 1KB + auto const base_padding = 1024; // 1KB if (num_columns == 0) { // Use flat size if the number of columns is not known diff --git a/cpp/include/cudf/strings/detail/utf8.hpp b/cpp/include/cudf/strings/detail/utf8.hpp index df8e2885782..e04572535de 100644 --- a/cpp/include/cudf/strings/detail/utf8.hpp +++ b/cpp/include/cudf/strings/detail/utf8.hpp @@ -155,18 +155,18 @@ constexpr inline size_type from_char_utf8(char_utf8 character, char* str) constexpr uint32_t utf8_to_codepoint(cudf::char_utf8 utf8_char) { uint32_t unchr = 0; - if (utf8_char < 0x0000'0080) // single-byte pass thru + if (utf8_char < 0x0000'0080) // single-byte pass thru unchr = utf8_char; - else if (utf8_char < 0x0000'E000) // two bytes + else if (utf8_char < 0x0000'E000) // two bytes { - unchr = (utf8_char & 0x1F00) >> 2; // shift and - unchr |= (utf8_char & 0x003F); // unmask - } else if (utf8_char < 0x00F0'0000) // three bytes + unchr = (utf8_char & 0x1F00) >> 2; // shift and + unchr |= (utf8_char & 0x003F); // unmask + } else if (utf8_char < 0x00F0'0000) // three bytes { - unchr = (utf8_char & 0x0F'0000) >> 4; // get upper 4 bits - unchr |= (utf8_char & 0x00'3F00) >> 2; // shift and - unchr |= (utf8_char & 0x00'003F); // unmask - } else if (utf8_char <= 0xF800'0000u) // four bytes + unchr = (utf8_char & 0x0F'0000) >> 4; // get upper 4 bits + unchr |= (utf8_char & 0x00'3F00) >> 2; // shift and + unchr |= (utf8_char & 0x00'003F); // unmask + } else if (utf8_char <= 0xF800'0000u) // four bytes { unchr = (utf8_char & 0x0300'0000) >> 6; // upper 3 bits unchr |= (utf8_char & 0x003F'0000) >> 4; // next 6 bits @@ -185,20 +185,20 @@ constexpr uint32_t utf8_to_codepoint(cudf::char_utf8 utf8_char) constexpr cudf::char_utf8 codepoint_to_utf8(uint32_t unchr) { cudf::char_utf8 utf8 = 0; - if (unchr < 0x0000'0080) // single byte utf8 + if (unchr < 0x0000'0080) // single byte utf8 utf8 = unchr; - else if (unchr < 0x0000'0800) // double byte utf8 + else if (unchr < 0x0000'0800) // double byte utf8 { - utf8 = (unchr << 2) & 0x1F00; // shift bits for - utf8 |= (unchr & 0x3F); // utf8 encoding + utf8 = (unchr << 2) & 0x1F00; // shift bits for + utf8 |= (unchr & 0x3F); // utf8 encoding utf8 |= 0x0000'C080; - } else if (unchr < 0x0001'0000) // triple byte utf8 + } else if (unchr < 0x0001'0000) // triple byte utf8 { - utf8 = (unchr << 4) & 0x0F'0000; // upper 4 bits - utf8 |= (unchr << 2) & 0x00'3F00; // next 6 bits - utf8 |= (unchr & 0x3F); // last 6 bits + utf8 = (unchr << 4) & 0x0F'0000; // upper 4 bits + utf8 |= (unchr << 2) & 0x00'3F00; // next 6 bits + utf8 |= (unchr & 0x3F); // last 6 bits utf8 |= 0x00E0'8080; - } else if (unchr < 0x0011'0000) // quadruple byte utf8 + } else if (unchr < 0x0011'0000) // quadruple byte utf8 { utf8 = (unchr << 6) & 0x0700'0000; // upper 3 bits utf8 |= (unchr << 4) & 0x003F'0000; // next 6 bits diff --git a/cpp/include/cudf/table/row_operators.cuh b/cpp/include/cudf/table/row_operators.cuh index 599a85c8a54..4806f96c934 100644 --- a/cpp/include/cudf/table/row_operators.cuh +++ b/cpp/include/cudf/table/row_operators.cuh @@ -105,9 +105,9 @@ inline __device__ auto null_compare(bool lhs_is_null, bool rhs_is_null, null_ord { if (lhs_is_null and rhs_is_null) { // null (dictionary_wrapper const& lhs, using dictionary32 = dictionary_wrapper; ///< 32-bit integer indexed dictionary wrapper -/** @} */ // end of group +/** @} */ // end of group } // namespace cudf diff --git a/cpp/include/cudf_test/base_fixture.hpp b/cpp/include/cudf_test/base_fixture.hpp index b622d7c6b78..06aabbe4e9c 100644 --- a/cpp/include/cudf_test/base_fixture.hpp +++ b/cpp/include/cudf_test/base_fixture.hpp @@ -331,9 +331,9 @@ inline auto parse_cudf_test_opts(int argc, char** argv) cxxopts::Options options(argv[0], " - cuDF tests command line options"); char const* env_rmm_mode = std::getenv("GTEST_CUDF_RMM_MODE"); // Overridden by CLI options char const* env_stream_mode = - std::getenv("GTEST_CUDF_STREAM_MODE"); // Overridden by CLI options + std::getenv("GTEST_CUDF_STREAM_MODE"); // Overridden by CLI options char const* env_stream_error_mode = - std::getenv("GTEST_CUDF_STREAM_ERROR_MODE"); // Overridden by CLI options + std::getenv("GTEST_CUDF_STREAM_ERROR_MODE"); // Overridden by CLI options auto default_rmm_mode = env_rmm_mode ? env_rmm_mode : "pool"; auto default_stream_mode = env_stream_mode ? env_stream_mode : "default"; auto default_stream_error_mode = env_stream_error_mode ? env_stream_error_mode : "error"; diff --git a/cpp/include/nvtext/subword_tokenize.hpp b/cpp/include/nvtext/subword_tokenize.hpp index ac75f5e9147..72a899d70b4 100644 --- a/cpp/include/nvtext/subword_tokenize.hpp +++ b/cpp/include/nvtext/subword_tokenize.hpp @@ -44,7 +44,7 @@ struct hashed_vocabulary { std::unique_ptr bin_offsets; ///< uint16 column, containing the start index of each ///< bin in the flattened hash table std::unique_ptr - cp_metadata; ///< uint32 column, The code point metadata table to use for normalization + cp_metadata; ///< uint32 column, The code point metadata table to use for normalization std::unique_ptr aux_cp_table; ///< uint64 column, The auxiliary code point table to use for normalization }; diff --git a/cpp/scripts/run-clang-tidy.py b/cpp/scripts/run-clang-tidy.py index a617a4c0df7..e5e57dbf562 100644 --- a/cpp/scripts/run-clang-tidy.py +++ b/cpp/scripts/run-clang-tidy.py @@ -22,7 +22,7 @@ import shutil -EXPECTED_VERSION = "16.0.1" +EXPECTED_VERSION = "16.0.6" VERSION_REGEX = re.compile(r" LLVM version ([0-9.]+)") GPU_ARCH_REGEX = re.compile(r"sm_(\d+)") SPACES = re.compile(r"\s+") diff --git a/cpp/src/copying/contiguous_split.cu b/cpp/src/copying/contiguous_split.cu index e1a55ec5419..5ea56a05dcb 100644 --- a/cpp/src/copying/contiguous_split.cu +++ b/cpp/src/copying/contiguous_split.cu @@ -114,8 +114,8 @@ struct dst_buf_info { int bit_shift; // # of bits to shift right by (for validity buffers) size_type valid_count; // validity count for this block of work - int src_buf_index; // source buffer index - int dst_buf_index; // destination buffer index + int src_buf_index; // source buffer index + int dst_buf_index; // destination buffer index }; /** @@ -1384,7 +1384,7 @@ struct chunk_iteration_state { std::size_t starting_batch; ///< Starting batch index for the current iteration std::vector const h_num_buffs_per_iteration; ///< The count of batches per iteration std::vector const - h_size_of_buffs_per_iteration; ///< The size in bytes per iteration + h_size_of_buffs_per_iteration; ///< The size in bytes per iteration }; std::unique_ptr chunk_iteration_state::create( @@ -1989,7 +1989,7 @@ struct contiguous_split_state { // This can be 1 if `contiguous_split` is just packing and not splitting std::size_t const num_partitions; ///< The number of partitions to produce - size_type const num_src_bufs; ///< Number of source buffers including children + size_type const num_src_bufs; ///< Number of source buffers including children std::size_t const num_bufs; ///< Number of source buffers including children * number of splits diff --git a/cpp/src/groupby/sort/functors.hpp b/cpp/src/groupby/sort/functors.hpp index c378ac99727..be36956b929 100644 --- a/cpp/src/groupby/sort/functors.hpp +++ b/cpp/src/groupby/sort/functors.hpp @@ -94,12 +94,12 @@ struct store_result_functor { }; protected: - sort::sort_groupby_helper& helper; ///< Sort helper - cudf::detail::result_cache& cache; ///< cache of results to store into - column_view const& values; ///< Column of values to group and aggregate + sort::sort_groupby_helper& helper; ///< Sort helper + cudf::detail::result_cache& cache; ///< cache of results to store into + column_view const& values; ///< Column of values to group and aggregate - rmm::cuda_stream_view stream; ///< CUDA stream on which to execute kernels - rmm::mr::device_memory_resource* mr; ///< Memory resource to allocate space for results + rmm::cuda_stream_view stream; ///< CUDA stream on which to execute kernels + rmm::mr::device_memory_resource* mr; ///< Memory resource to allocate space for results sorted keys_are_sorted; ///< Whether the keys are sorted std::unique_ptr sorted_values; ///< Memoised grouped and sorted values diff --git a/cpp/src/io/avro/avro_gpu.cu b/cpp/src/io/avro/avro_gpu.cu index 2c634d9b590..365f6d6875c 100644 --- a/cpp/src/io/avro/avro_gpu.cu +++ b/cpp/src/io/avro/avro_gpu.cu @@ -303,7 +303,7 @@ avro_decode_row(schemadesc_s const* schema, // If within an array, check if we reached the last item if (array_repeat_count != 0 && array_children <= 0 && cur < end) { if (!--array_repeat_count) { - i = array_start; // Restart at the array parent + i = array_start; // Restart at the array parent } else { i = array_start + 1; // Restart after the array parent array_children = schema[array_start].count; diff --git a/cpp/src/io/comp/cpu_unbz2.cpp b/cpp/src/io/comp/cpu_unbz2.cpp index 7159ff30d7c..a116335b254 100644 --- a/cpp/src/io/comp/cpu_unbz2.cpp +++ b/cpp/src/io/comp/cpu_unbz2.cpp @@ -216,7 +216,7 @@ int32_t bz2_decompress_block(unbz_state_s* s) s->currBlockNo++; - skipbits(s, 32); // block CRC + skipbits(s, 32); // block CRC if (getbits(s, 1)) return BZ_DATA_ERROR; // blockRandomized not supported (old bzip versions) diff --git a/cpp/src/io/comp/debrotli.cu b/cpp/src/io/comp/debrotli.cu index 542ca031b7c..8bafd054bdb 100644 --- a/cpp/src/io/comp/debrotli.cu +++ b/cpp/src/io/comp/debrotli.cu @@ -121,7 +121,7 @@ __inline__ __device__ int brotli_context(int p1, int p2, int lut) struct huff_scratch_s { uint16_t code_length_histo[16]; uint8_t code_length_code_lengths[brotli_code_length_codes]; - int8_t offset[6]; // offsets in sorted table for each length + int8_t offset[6]; // offsets in sorted table for each length uint16_t lenvlctab[32]; uint16_t sorted[brotli_code_length_codes]; // symbols sorted by code length int16_t next_symbol[32]; @@ -1298,7 +1298,7 @@ static __device__ void InverseMoveToFrontTransform(debrotli_state_s* s, uint8_t* // Reinitialize elements that could have been changed. uint32_t i = 1; uint32_t upper_bound = s->mtf_upper_bound; - uint32_t* mtf = &s->mtf[1]; // Make mtf[-1] addressable. + uint32_t* mtf = &s->mtf[1]; // Make mtf[-1] addressable. auto* mtf_u8 = reinterpret_cast(mtf); uint32_t pattern = 0x0302'0100; // Little-endian diff --git a/cpp/src/io/comp/gpuinflate.cu b/cpp/src/io/comp/gpuinflate.cu index 42c4fbe7bea..8993815e560 100644 --- a/cpp/src/io/comp/gpuinflate.cu +++ b/cpp/src/io/comp/gpuinflate.cu @@ -124,11 +124,11 @@ struct inflate_state_s { uint8_t* outbase; ///< start of output buffer uint8_t* outend; ///< end of output buffer // Input state - uint8_t const* cur; ///< input buffer - uint8_t const* end; ///< end of input buffer + uint8_t const* cur; ///< input buffer + uint8_t const* end; ///< end of input buffer - uint2 bitbuf; ///< bit buffer (64-bit) - uint32_t bitpos; ///< position in bit buffer + uint2 bitbuf; ///< bit buffer (64-bit) + uint32_t bitpos; ///< position in bit buffer int32_t err; ///< Error status int btype; ///< current block type @@ -295,7 +295,7 @@ __device__ int construct( return 0; // complete, but decode() will fail // check for an over-subscribed or incomplete set of lengths - left = 1; // one possible code of zero length + left = 1; // one possible code of zero length for (len = 1; len <= max_bits; len++) { left <<= 1; // one more bit, double codes left left -= counts[len]; // deduct count from possible codes @@ -349,8 +349,8 @@ __device__ int init_dynamic(inflate_state_s* s) index = 0; while (index < nlen + ndist) { int symbol = decode(s, s->lencnt, s->lensym); - if (symbol < 0) return symbol; // invalid symbol - if (symbol < 16) // length in 0..15 + if (symbol < 0) return symbol; // invalid symbol + if (symbol < 16) // length in 0..15 lengths[index++] = symbol; else { // repeat instruction int len = 0; // last length to repeat, assume repeating zeros @@ -358,9 +358,9 @@ __device__ int init_dynamic(inflate_state_s* s) if (index == 0) return -5; // no last length! len = lengths[index - 1]; // last length symbol = 3 + getbits(s, 2); - } else if (symbol == 17) // repeat zero 3..10 times + } else if (symbol == 17) // repeat zero 3..10 times symbol = 3 + getbits(s, 3); - else // == 18, repeat zero 11..138 times + else // == 18, repeat zero 11..138 times symbol = 11 + getbits(s, 7); if (index + symbol > nlen + ndist) return -6; // too many lengths! while (symbol--) // repeat last or zero symbol times diff --git a/cpp/src/io/comp/uncomp.cpp b/cpp/src/io/comp/uncomp.cpp index 017fd8abb47..0d2d21333bb 100644 --- a/cpp/src/io/comp/uncomp.cpp +++ b/cpp/src/io/comp/uncomp.cpp @@ -28,7 +28,7 @@ #include // memset -#include // uncompress +#include // uncompress using cudf::host_span; @@ -47,7 +47,7 @@ struct gz_file_header_s { uint8_t os; // OS id }; -struct zip_eocd_s // end of central directory +struct zip_eocd_s // end of central directory { uint32_t sig; // 0x0605'4b50 uint16_t disk_id; // number of this disk @@ -59,7 +59,7 @@ struct zip_eocd_s // end of central directory // number uint16_t comment_len; // comment length (excluded from struct) }; -struct zip64_eocdl // end of central dir locator +struct zip64_eocdl // end of central dir locator { uint32_t sig; // 0x0706'4b50 uint32_t disk_start; // number of the disk with the start of the zip64 end of central directory @@ -67,7 +67,7 @@ struct zip64_eocdl // end of central dir locator uint32_t num_disks; // total number of disks }; -struct zip_cdfh_s // central directory file header +struct zip_cdfh_s // central directory file header { uint32_t sig; // 0x0201'4b50 uint16_t ver; // version made by @@ -111,7 +111,7 @@ struct bz2_file_header_s { struct gz_archive_s { gz_file_header_s const* fhdr; - uint16_t hcrc16; // header crc16 if present + uint16_t hcrc16; // header crc16 if present uint16_t xlen; uint8_t const* fxtra; // xlen bytes (optional) uint8_t const* fname; // zero-terminated original filename if present diff --git a/cpp/src/io/comp/unsnap.cu b/cpp/src/io/comp/unsnap.cu index a7a1cfd3f9e..c699502317f 100644 --- a/cpp/src/io/comp/unsnap.cu +++ b/cpp/src/io/comp/unsnap.cu @@ -45,7 +45,7 @@ void __device__ busy_wait(size_t cycles) struct unsnap_batch_s { int32_t len; // 1..64 = Number of bytes uint32_t - offset; // copy distance if greater than zero or negative of literal offset in byte stream + offset; // copy distance if greater than zero or negative of literal offset in byte stream }; /** diff --git a/cpp/src/io/json/json_column.cu b/cpp/src/io/json/json_column.cu index bdad16bd9f1..cabf904f020 100644 --- a/cpp/src/io/json/json_column.cu +++ b/cpp/src/io/json/json_column.cu @@ -169,7 +169,7 @@ reduce_to_column_tree(tree_meta_t& tree, }); // 4. unique_copy parent_node_ids, ranges - rmm::device_uvector column_levels(0, stream); // not required + rmm::device_uvector column_levels(0, stream); // not required rmm::device_uvector parent_col_ids(num_columns, stream); rmm::device_uvector col_range_begin(num_columns, stream); // Field names rmm::device_uvector col_range_end(num_columns, stream); diff --git a/cpp/src/io/json/nested_json_gpu.cu b/cpp/src/io/json/nested_json_gpu.cu index b691eaa8caf..0b49f97597d 100644 --- a/cpp/src/io/json/nested_json_gpu.cu +++ b/cpp/src/io/json/nested_json_gpu.cu @@ -762,18 +762,18 @@ auto get_translation_table(bool include_line_delimiter) nl_tokens({}), // LINE_BREAK {ValueBegin}}}; // OTHER pda_tlt[static_cast(pda_state_t::PD_BOA)] = { - { /*ROOT*/ - {ErrorBegin}, // OPENING_BRACE - {ErrorBegin}, // OPENING_BRACKET - {ErrorBegin}, // CLOSING_BRACE - {ErrorBegin}, // CLOSING_BRACKET - {ErrorBegin}, // QUOTE - {ErrorBegin}, // ESCAPE - {ErrorBegin}, // COMMA - {ErrorBegin}, // COLON - {ErrorBegin}, // WHITE_SPACE - nl_tokens({ErrorBegin}), // LINE_BREAK - {ErrorBegin}, // OTHER + { /*ROOT*/ + {ErrorBegin}, // OPENING_BRACE + {ErrorBegin}, // OPENING_BRACKET + {ErrorBegin}, // CLOSING_BRACE + {ErrorBegin}, // CLOSING_BRACKET + {ErrorBegin}, // QUOTE + {ErrorBegin}, // ESCAPE + {ErrorBegin}, // COMMA + {ErrorBegin}, // COLON + {ErrorBegin}, // WHITE_SPACE + nl_tokens({ErrorBegin}), // LINE_BREAK + {ErrorBegin}, // OTHER /*LIST*/ {StructBegin}, // OPENING_BRACE {ListBegin}, // OPENING_BRACKET @@ -799,18 +799,18 @@ auto get_translation_table(bool include_line_delimiter) nl_tokens({}), // LINE_BREAK {ErrorBegin}}}; // OTHER pda_tlt[static_cast(pda_state_t::PD_LON)] = { - { /*ROOT*/ - {ErrorBegin}, // OPENING_BRACE - {ErrorBegin}, // OPENING_BRACKET - {ErrorBegin}, // CLOSING_BRACE - {ErrorBegin}, // CLOSING_BRACKET - {ErrorBegin}, // QUOTE - {ErrorBegin}, // ESCAPE - {ErrorBegin}, // COMMA - {ErrorBegin}, // COLON - {ValueEnd}, // WHITE_SPACE - nl_tokens({ValueEnd}), // LINE_BREAK - {}, // OTHER + { /*ROOT*/ + {ErrorBegin}, // OPENING_BRACE + {ErrorBegin}, // OPENING_BRACKET + {ErrorBegin}, // CLOSING_BRACE + {ErrorBegin}, // CLOSING_BRACKET + {ErrorBegin}, // QUOTE + {ErrorBegin}, // ESCAPE + {ErrorBegin}, // COMMA + {ErrorBegin}, // COLON + {ValueEnd}, // WHITE_SPACE + nl_tokens({ValueEnd}), // LINE_BREAK + {}, // OTHER /*LIST*/ {ErrorBegin}, // OPENING_BRACE {ErrorBegin}, // OPENING_BRACKET @@ -824,17 +824,17 @@ auto get_translation_table(bool include_line_delimiter) nl_tokens({ValueEnd}), // LINE_BREAK {}, // OTHER /*STRUCT*/ - {ErrorBegin}, // OPENING_BRACE - {ErrorBegin}, // OPENING_BRACKET - {ValueEnd, StructMemberEnd, StructEnd}, // CLOSING_BRACE - {ErrorBegin}, // CLOSING_BRACKET - {ErrorBegin}, // QUOTE - {ErrorBegin}, // ESCAPE - {ValueEnd, StructMemberEnd}, // COMMA - {ErrorBegin}, // COLON - {ValueEnd}, // WHITE_SPACE - nl_tokens({ValueEnd}), // LINE_BREAK - {}}}; // OTHER + {ErrorBegin}, // OPENING_BRACE + {ErrorBegin}, // OPENING_BRACKET + {ValueEnd, StructMemberEnd, StructEnd}, // CLOSING_BRACE + {ErrorBegin}, // CLOSING_BRACKET + {ErrorBegin}, // QUOTE + {ErrorBegin}, // ESCAPE + {ValueEnd, StructMemberEnd}, // COMMA + {ErrorBegin}, // COLON + {ValueEnd}, // WHITE_SPACE + nl_tokens({ValueEnd}), // LINE_BREAK + {}}}; // OTHER pda_tlt[static_cast(pda_state_t::PD_STR)] = {{ /*ROOT*/ {}, // OPENING_BRACE @@ -974,17 +974,17 @@ auto get_translation_table(bool include_line_delimiter) nl_tokens({ErrorBegin}), // LINE_BREAK {ErrorBegin}, // OTHER /*STRUCT*/ - {ErrorBegin}, // OPENING_BRACE - {ErrorBegin}, // OPENING_BRACKET - {StructEnd}, // CLOSING_BRACE - {ErrorBegin}, // CLOSING_BRACKET - {StructMemberBegin, FieldNameBegin}, // QUOTE - {ErrorBegin}, // ESCAPE - {ErrorBegin}, // COMMA - {ErrorBegin}, // COLON - {}, // WHITE_SPACE - nl_tokens({}), // LINE_BREAK - {ErrorBegin}}}; // OTHER + {ErrorBegin}, // OPENING_BRACE + {ErrorBegin}, // OPENING_BRACKET + {StructEnd}, // CLOSING_BRACE + {ErrorBegin}, // CLOSING_BRACKET + {StructMemberBegin, FieldNameBegin}, // QUOTE + {ErrorBegin}, // ESCAPE + {ErrorBegin}, // COMMA + {ErrorBegin}, // COLON + {}, // WHITE_SPACE + nl_tokens({}), // LINE_BREAK + {ErrorBegin}}}; // OTHER pda_tlt[static_cast(pda_state_t::PD_FLN)] = {{ /*ROOT*/ {ErrorBegin}, // OPENING_BRACE @@ -1011,17 +1011,17 @@ auto get_translation_table(bool include_line_delimiter) nl_tokens({ErrorBegin}), // LINE_BREAK {ErrorBegin}, // OTHER /*STRUCT*/ - {}, // OPENING_BRACE - {}, // OPENING_BRACKET - {}, // CLOSING_BRACE - {}, // CLOSING_BRACKET - {FieldNameEnd}, // QUOTE - {}, // ESCAPE - {}, // COMMA - {}, // COLON - {}, // WHITE_SPACE - nl_tokens({}), // LINE_BREAK - {}}}; // OTHER + {}, // OPENING_BRACE + {}, // OPENING_BRACKET + {}, // CLOSING_BRACE + {}, // CLOSING_BRACKET + {FieldNameEnd}, // QUOTE + {}, // ESCAPE + {}, // COMMA + {}, // COLON + {}, // WHITE_SPACE + nl_tokens({}), // LINE_BREAK + {}}}; // OTHER pda_tlt[static_cast(pda_state_t::PD_FNE)] = {{ /*ROOT*/ {ErrorBegin}, // OPENING_BRACE @@ -1048,17 +1048,17 @@ auto get_translation_table(bool include_line_delimiter) nl_tokens({ErrorBegin}), // LINE_BREAK {ErrorBegin}, // OTHER /*STRUCT*/ - {}, // OPENING_BRACE - {}, // OPENING_BRACKET - {}, // CLOSING_BRACE - {}, // CLOSING_BRACKET - {}, // QUOTE - {}, // ESCAPE - {}, // COMMA - {}, // COLON - {}, // WHITE_SPACE - nl_tokens({}), // LINE_BREAK - {}}}; // OTHER + {}, // OPENING_BRACE + {}, // OPENING_BRACKET + {}, // CLOSING_BRACE + {}, // CLOSING_BRACKET + {}, // QUOTE + {}, // ESCAPE + {}, // COMMA + {}, // COLON + {}, // WHITE_SPACE + nl_tokens({}), // LINE_BREAK + {}}}; // OTHER pda_tlt[static_cast(pda_state_t::PD_PFN)] = {{ /*ROOT*/ {ErrorBegin}, // OPENING_BRACE @@ -1097,18 +1097,18 @@ auto get_translation_table(bool include_line_delimiter) nl_tokens({}), // LINE_BREAK {ErrorBegin}}}; // OTHER - pda_tlt[static_cast(pda_state_t::PD_ERR)] = {{ /*ROOT*/ - {}, // OPENING_BRACE - {}, // OPENING_BRACKET - {}, // CLOSING_BRACE - {}, // CLOSING_BRACKET - {}, // QUOTE - {}, // ESCAPE - {}, // COMMA - {}, // COLON - {}, // WHITE_SPACE - nl_tokens({}), // LINE_BREAK - {}, // OTHER + pda_tlt[static_cast(pda_state_t::PD_ERR)] = {{ /*ROOT*/ + {}, // OPENING_BRACE + {}, // OPENING_BRACKET + {}, // CLOSING_BRACE + {}, // CLOSING_BRACKET + {}, // QUOTE + {}, // ESCAPE + {}, // COMMA + {}, // COLON + {}, // WHITE_SPACE + nl_tokens({}), // LINE_BREAK + {}, // OTHER /*LIST*/ {}, // OPENING_BRACE {}, // OPENING_BRACKET diff --git a/cpp/src/io/orc/orc_gpu.hpp b/cpp/src/io/orc/orc_gpu.hpp index 681cc0fb9d2..9b8df50a22a 100644 --- a/cpp/src/io/orc/orc_gpu.hpp +++ b/cpp/src/io/orc/orc_gpu.hpp @@ -157,7 +157,7 @@ struct EncChunk { uint8_t dtype_len; // data type length int32_t scale; // scale for decimals or timestamps - uint32_t* dict_index; // dictionary index from row index + uint32_t* dict_index; // dictionary index from row index uint32_t* decimal_offsets; orc_column_device_view const* column; }; diff --git a/cpp/src/io/orc/stripe_data.cu b/cpp/src/io/orc/stripe_data.cu index b66ca827119..3edcd3d83b2 100644 --- a/cpp/src/io/orc/stripe_data.cu +++ b/cpp/src/io/orc/stripe_data.cu @@ -367,14 +367,14 @@ inline __device__ uint32_t varint_length(volatile orc_bytestream_s* bs, int pos) if (zbit) { return 5 + (zbit >> 3); // up to 9x7 bits } else if ((sizeof(T) <= 8) || (bytestream_readbyte(bs, pos + 9) <= 0x7f)) { - return 10; // up to 70 bits + return 10; // up to 70 bits } else { uint64_t next64 = bytestream_readu64(bs, pos + 10); zbit = __ffsll((~next64) & 0x8080'8080'8080'8080ull); if (zbit) { return 10 + (zbit >> 3); // Up to 18x7 bits (126) } else { - return 19; // Up to 19x7 bits (133) + return 19; // Up to 19x7 bits (133) } } } diff --git a/cpp/src/io/parquet/compact_protocol_reader.cpp b/cpp/src/io/parquet/compact_protocol_reader.cpp index 92fcd151925..ae11af92f78 100644 --- a/cpp/src/io/parquet/compact_protocol_reader.cpp +++ b/cpp/src/io/parquet/compact_protocol_reader.cpp @@ -168,7 +168,7 @@ bool CompactProtocolReader::read(LogicalType* l) ParquetFieldUnion(2, l->isset.MAP, l->MAP), ParquetFieldUnion(3, l->isset.LIST, l->LIST), ParquetFieldUnion(4, l->isset.ENUM, l->ENUM), - ParquetFieldUnion(5, l->isset.DECIMAL, l->DECIMAL), // read the struct + ParquetFieldUnion(5, l->isset.DECIMAL, l->DECIMAL), // read the struct ParquetFieldUnion(6, l->isset.DATE, l->DATE), ParquetFieldUnion(7, l->isset.TIME, l->TIME), // read the struct ParquetFieldUnion(8, l->isset.TIMESTAMP, l->TIMESTAMP), // read the struct diff --git a/cpp/src/io/parquet/compact_protocol_writer.cpp b/cpp/src/io/parquet/compact_protocol_writer.cpp index b2a89129645..b2c0c97c52d 100644 --- a/cpp/src/io/parquet/compact_protocol_writer.cpp +++ b/cpp/src/io/parquet/compact_protocol_writer.cpp @@ -315,7 +315,7 @@ inline void CompactProtocolFieldWriter::field_struct(int field, T const& val) if constexpr (not std::is_empty_v) { writer.write(val); // write the struct if it's not empty } else { - put_byte(0); // otherwise, add a stop field + put_byte(0); // otherwise, add a stop field } current_field_value = field; } diff --git a/cpp/src/io/parquet/delta_binary.cuh b/cpp/src/io/parquet/delta_binary.cuh index 4fc8b9cfb8e..2382e4aafdf 100644 --- a/cpp/src/io/parquet/delta_binary.cuh +++ b/cpp/src/io/parquet/delta_binary.cuh @@ -90,16 +90,16 @@ inline __device__ zigzag128_t get_zz128(uint8_t const*& cur, uint8_t const* end) } struct delta_binary_decoder { - uint8_t const* block_start; // start of data, but updated as data is read - uint8_t const* block_end; // end of data - uleb128_t block_size; // usually 128, must be multiple of 128 - uleb128_t mini_block_count; // usually 4, chosen such that block_size/mini_block_count is a - // multiple of 32 - uleb128_t value_count; // total values encoded in the block - zigzag128_t last_value; // last value decoded, initialized to first_value from header - - uint32_t values_per_mb; // block_size / mini_block_count, must be multiple of 32 - uint32_t current_value_idx; // current value index, initialized to 0 at start of block + uint8_t const* block_start; // start of data, but updated as data is read + uint8_t const* block_end; // end of data + uleb128_t block_size; // usually 128, must be multiple of 128 + uleb128_t mini_block_count; // usually 4, chosen such that block_size/mini_block_count is a + // multiple of 32 + uleb128_t value_count; // total values encoded in the block + zigzag128_t last_value; // last value decoded, initialized to first_value from header + + uint32_t values_per_mb; // block_size / mini_block_count, must be multiple of 32 + uint32_t current_value_idx; // current value index, initialized to 0 at start of block zigzag128_t cur_min_delta; // min delta for the block uint32_t cur_mb; // index of the current mini-block within the block diff --git a/cpp/src/io/parquet/page_delta_decode.cu b/cpp/src/io/parquet/page_delta_decode.cu index e79a479388f..35f33a761be 100644 --- a/cpp/src/io/parquet/page_delta_decode.cu +++ b/cpp/src/io/parquet/page_delta_decode.cu @@ -85,7 +85,7 @@ __global__ void __launch_bounds__(96) gpuDecodeDeltaBinary( if (t < 2 * warp_size) { // warp0..1 target_pos = min(src_pos + 2 * batch_size, s->nz_count + batch_size); - } else { // warp2 + } else { // warp2 target_pos = min(s->nz_count, src_pos + batch_size); } __syncthreads(); diff --git a/cpp/src/io/parquet/parquet.hpp b/cpp/src/io/parquet/parquet.hpp index a729f28d672..f7318bb9935 100644 --- a/cpp/src/io/parquet/parquet.hpp +++ b/cpp/src/io/parquet/parquet.hpp @@ -365,8 +365,8 @@ struct ColumnIndex { std::vector> min_values; // lower bound for values in each page std::vector> max_values; // upper bound for values in each page BoundaryOrder boundary_order = - BoundaryOrder::UNORDERED; // Indicates if min and max values are ordered - std::vector null_counts; // Optional count of null values per page + BoundaryOrder::UNORDERED; // Indicates if min and max values are ordered + std::vector null_counts; // Optional count of null values per page }; // bit space we are reserving in column_buffer::user_data diff --git a/cpp/src/io/parquet/parquet_gpu.hpp b/cpp/src/io/parquet/parquet_gpu.hpp index e82b6abc13d..a3cc37dee4f 100644 --- a/cpp/src/io/parquet/parquet_gpu.hpp +++ b/cpp/src/io/parquet/parquet_gpu.hpp @@ -299,7 +299,7 @@ struct ColumnChunkDesc { int8_t converted_type; // converted type enum LogicalType logical_type; // logical type int8_t decimal_precision; // Decimal precision - int32_t ts_clock_rate; // output timestamp clock frequency (0=default, 1000=ms, 1000000000=ns) + int32_t ts_clock_rate; // output timestamp clock frequency (0=default, 1000=ms, 1000000000=ns) int32_t src_col_index; // my input column index int32_t src_col_schema; // my schema index in the file @@ -396,16 +396,16 @@ constexpr uint32_t encoding_to_mask(Encoding encoding) struct EncColumnChunk { parquet_column_device_view const* col_desc; //!< Column description size_type col_desc_id; - PageFragment* fragments; //!< First fragment in chunk - uint8_t* uncompressed_bfr; //!< Uncompressed page data - uint8_t* compressed_bfr; //!< Compressed page data - statistics_chunk const* stats; //!< Fragment statistics - uint32_t bfr_size; //!< Uncompressed buffer size - uint32_t compressed_size; //!< Compressed buffer size - uint32_t max_page_data_size; //!< Max data size (excluding header) of any page in this chunk - uint32_t page_headers_size; //!< Sum of size of all page headers - size_type start_row; //!< First row of chunk - uint32_t num_rows; //!< Number of rows in chunk + PageFragment* fragments; //!< First fragment in chunk + uint8_t* uncompressed_bfr; //!< Uncompressed page data + uint8_t* compressed_bfr; //!< Compressed page data + statistics_chunk const* stats; //!< Fragment statistics + uint32_t bfr_size; //!< Uncompressed buffer size + uint32_t compressed_size; //!< Compressed buffer size + uint32_t max_page_data_size; //!< Max data size (excluding header) of any page in this chunk + uint32_t page_headers_size; //!< Sum of size of all page headers + size_type start_row; //!< First row of chunk + uint32_t num_rows; //!< Number of rows in chunk size_type num_values; //!< Number of values in chunk. Different from num_rows for nested types uint32_t first_fragment; //!< First fragment of chunk EncPage* pages; //!< Ptr to pages that belong to this chunk diff --git a/cpp/src/io/parquet/reader_impl_preprocess.cu b/cpp/src/io/parquet/reader_impl_preprocess.cu index bde73c3dd96..a2db0de26bb 100644 --- a/cpp/src/io/parquet/reader_impl_preprocess.cu +++ b/cpp/src/io/parquet/reader_impl_preprocess.cu @@ -1673,7 +1673,7 @@ void reader::impl::preprocess_pages(size_t skip_rows, // - we will be doing a chunked read gpu::ComputePageSizes(pages, chunks, - 0, // 0-max size_t. process all possible rows + 0, // 0-max size_t. process all possible rows std::numeric_limits::max(), true, // compute num_rows chunk_read_limit > 0, // compute string sizes diff --git a/cpp/src/join/join.cu b/cpp/src/join/join.cu index 8210f3114d6..ae025b1a213 100644 --- a/cpp/src/join/join.cu +++ b/cpp/src/join/join.cu @@ -73,7 +73,7 @@ left_join(table_view const& left_input, // Make sure any dictionary columns have matched key sets. // This will return any new dictionary columns created as well as updated table_views. auto matched = cudf::dictionary::detail::match_dictionaries( - {left_input, right_input}, // these should match + {left_input, right_input}, // these should match stream, rmm::mr::get_current_device_resource()); // temporary objects returned // now rebuild the table views with the updated ones @@ -98,7 +98,7 @@ full_join(table_view const& left_input, // Make sure any dictionary columns have matched key sets. // This will return any new dictionary columns created as well as updated table_views. auto matched = cudf::dictionary::detail::match_dictionaries( - {left_input, right_input}, // these should match + {left_input, right_input}, // these should match stream, rmm::mr::get_current_device_resource()); // temporary objects returned // now rebuild the table views with the updated ones diff --git a/cpp/src/quantiles/tdigest/tdigest_aggregation.cu b/cpp/src/quantiles/tdigest/tdigest_aggregation.cu index 2ce55e10fb1..9e8b75ae3b6 100644 --- a/cpp/src/quantiles/tdigest/tdigest_aggregation.cu +++ b/cpp/src/quantiles/tdigest/tdigest_aggregation.cu @@ -459,7 +459,7 @@ __global__ void generate_cluster_limits_kernel(int delta, int adjusted_w_index = nearest_w_index; if ((last_inserted_index < 0) || // if we haven't inserted anything yet (nearest_w_index == - last_inserted_index)) { // if we land in the same bucket as the previous cap + last_inserted_index)) { // if we land in the same bucket as the previous cap // force the value into this bucket adjusted_w_index = (last_inserted_index == group_size - 1) diff --git a/cpp/src/rolling/detail/rolling_collect_list.cuh b/cpp/src/rolling/detail/rolling_collect_list.cuh index 9f74a961e12..39d15ed716f 100644 --- a/cpp/src/rolling/detail/rolling_collect_list.cuh +++ b/cpp/src/rolling/detail/rolling_collect_list.cuh @@ -116,7 +116,7 @@ std::unique_ptr create_collect_gather_map(column_view const& child_offse thrust::make_counting_iterator(per_row_mapping.size()), gather_map->mutable_view().template begin(), [d_offsets = - child_offsets.template begin(), // E.g. [0, 2, 5, 8, 11, 13] + child_offsets.template begin(), // E.g. [0, 2, 5, 8, 11, 13] d_groups = per_row_mapping.template begin(), // E.g. [0,0, 1,1,1, 2,2,2, 3,3,3, 4,4] d_prev = preceding_iter] __device__(auto i) { diff --git a/cpp/src/strings/char_types/char_types.cu b/cpp/src/strings/char_types/char_types.cu index b87fb80fcc2..0c0ad0ad29e 100644 --- a/cpp/src/strings/char_types/char_types.cu +++ b/cpp/src/strings/char_types/char_types.cu @@ -139,9 +139,9 @@ struct filter_chars_fn { { auto const code_point = detail::utf8_to_codepoint(ch); auto const flag = code_point <= 0x00'FFFF ? d_flags[code_point] : 0; - if (flag == 0) // all types pass unless specifically identified + if (flag == 0) // all types pass unless specifically identified return (types_to_remove == ALL_TYPES); - if (types_to_keep == ALL_TYPES) // filter case + if (types_to_keep == ALL_TYPES) // filter case return (types_to_remove & flag) != 0; return (types_to_keep & flag) == 0; // keep case } diff --git a/cpp/src/strings/convert/convert_datetime.cu b/cpp/src/strings/convert/convert_datetime.cu index cca06ca0739..8a953d778ed 100644 --- a/cpp/src/strings/convert/convert_datetime.cu +++ b/cpp/src/strings/convert/convert_datetime.cu @@ -317,8 +317,8 @@ struct parse_datetime { bytes_read -= left; break; } - case 'u': [[fallthrough]]; // day of week: Mon(1)-Sat(6),Sun(7) - case 'w': { // day of week; Sun(0),Mon(1)-Sat(6) + case 'u': [[fallthrough]]; // day of week: Mon(1)-Sat(6),Sun(7) + case 'w': { // day of week; Sun(0),Mon(1)-Sat(6) auto const [weekday, left] = parse_int(ptr, item.length); timeparts.weekday = // 0 is mapped to 7 for chrono library static_cast((item.value == 'w' && weekday == 0) ? 7 : weekday); @@ -1000,7 +1000,7 @@ struct datetime_formatter_fn { case 'S': // second copy_value = timeparts.second; break; - case 'f': // sub-second + case 'f': // sub-second { char subsecond_digits[] = "000000000"; // 9 max digits int const digits = [] { diff --git a/cpp/src/strings/convert/convert_durations.cu b/cpp/src/strings/convert/convert_durations.cu index 863f76b9b98..6ab70825a6b 100644 --- a/cpp/src/strings/convert/convert_durations.cu +++ b/cpp/src/strings/convert/convert_durations.cu @@ -576,7 +576,7 @@ struct parse_duration { item_length++; // : timeparts->second = parse_second(ptr + item_length, item_length); break; - case 'r': // hh:MM:SS AM/PM + case 'r': // hh:MM:SS AM/PM timeparts->hour = parse_hour(ptr, item_length); item_length++; // : timeparts->minute = parse_minute(ptr + item_length, item_length); diff --git a/cpp/src/strings/convert/convert_floats.cu b/cpp/src/strings/convert/convert_floats.cu index ab1e6870937..32167589ab4 100644 --- a/cpp/src/strings/convert/convert_floats.cu +++ b/cpp/src/strings/convert/convert_floats.cu @@ -284,7 +284,7 @@ struct ftos_converter { while (pb != buffer) // reverses the digits *ptr++ = *--pb; // e.g. 54321 -> 12345 } else - *ptr++ = '0'; // always include at least .0 + *ptr++ = '0'; // always include at least .0 // exponent if (exp10) { *ptr++ = 'e'; @@ -310,7 +310,7 @@ struct ftos_converter { { if (std::isnan(value)) return 3; // NaN bool bneg = false; - if (signbit(value)) { // handles -0.0 too + if (signbit(value)) { // handles -0.0 too value = -value; bneg = true; } @@ -337,7 +337,7 @@ struct ftos_converter { ++count; // always include .0 // exponent if (exp10) { - count += 2; // 'e±' + count += 2; // 'e±' if (exp10 < 0) exp10 = -exp10; count += (int)(exp10 < 10); // padding while (exp10 > 0) { diff --git a/cpp/src/strings/convert/convert_integers.cu b/cpp/src/strings/convert/convert_integers.cu index 260c3393f3c..5597d2831c0 100644 --- a/cpp/src/strings/convert/convert_integers.cu +++ b/cpp/src/strings/convert/convert_integers.cu @@ -76,7 +76,7 @@ struct string_to_integer_check_fn { auto const digit = static_cast(chr - '0'); auto const bound_check = (bound_val - sign * digit) / IntegerType{10} * sign; if (value > bound_check) return false; - value = value* IntegerType{10} + digit; + value = value * IntegerType{10} + digit; } return true; diff --git a/cpp/src/strings/convert/convert_ipv4.cu b/cpp/src/strings/convert/convert_ipv4.cu index 4606aba6d17..adb72cb0263 100644 --- a/cpp/src/strings/convert/convert_ipv4.cu +++ b/cpp/src/strings/convert/convert_ipv4.cu @@ -197,7 +197,7 @@ std::unique_ptr is_ipv4(strings_column_view const& strings, if (d_str.empty()) return false; constexpr int max_ip = 255; // values must be in [0,255] int ip_vals[4] = {-1, -1, -1, -1}; - int ipv_idx = 0; // index into ip_vals + int ipv_idx = 0; // index into ip_vals for (auto const ch : d_str) { if ((ch >= '0') && (ch <= '9')) { auto const ip_val = ip_vals[ipv_idx]; diff --git a/cpp/src/strings/convert/convert_urls.cu b/cpp/src/strings/convert/convert_urls.cu index 71b6c09310e..9efa148cfd2 100644 --- a/cpp/src/strings/convert/convert_urls.cu +++ b/cpp/src/strings/convert/convert_urls.cu @@ -107,9 +107,9 @@ struct url_encoder_fn { out_ptr = copy_and_increment(out_ptr, hex, 2); // add them to the output } } - } else // these are to be utf-8 url-encoded + } else // these are to be utf-8 url-encoded { - uint8_t char_bytes[4]; // holds utf-8 bytes for one character + uint8_t char_bytes[4]; // holds utf-8 bytes for one character size_type char_width = from_char_utf8(ch, reinterpret_cast(char_bytes)); nbytes += char_width * 3; // '%' plus 2 hex chars per byte (example: é is %C3%A9) // process each byte in this current character diff --git a/cpp/src/strings/json/json_path.cu b/cpp/src/strings/json/json_path.cu index 2d2691e0518..c56752f5429 100644 --- a/cpp/src/strings/json/json_path.cu +++ b/cpp/src/strings/json/json_path.cu @@ -984,7 +984,7 @@ std::unique_ptr get_json_object(cudf::strings_column_view const& c col.size(), rmm::device_buffer{0, stream, mr}, // no data cudf::detail::create_null_mask(col.size(), mask_state::ALL_NULL, stream, mr), - col.size()); // null count + col.size()); // null count } constexpr int block_size = 512; diff --git a/cpp/src/strings/regex/regcomp.cpp b/cpp/src/strings/regex/regcomp.cpp index 5fd098a872e..b7a7f19369d 100644 --- a/cpp/src/strings/regex/regcomp.cpp +++ b/cpp/src/strings/regex/regcomp.cpp @@ -184,9 +184,9 @@ class regex_parser { int32_t _id_cclass_d{-1}; // digits [0-9] int32_t _id_cclass_D{-1}; // not digits - char32_t _chr{}; // last lex'd char - int32_t _cclass_id{}; // last lex'd class - int16_t _min_count{}; // data for counted operators + char32_t _chr{}; // last lex'd char + int32_t _cclass_id{}; // last lex'd class + int16_t _min_count{}; // data for counted operators int16_t _max_count{}; std::vector _items; @@ -361,9 +361,9 @@ class regex_parser { auto [q, n_chr] = next_char(); if (n_chr == 0) { return 0; } // malformed: '[x-' - if (!q && n_chr == ']') { // handles: '[x-]' + if (!q && n_chr == ']') { // handles: '[x-]' literals.push_back(chr); - literals.push_back(chr); // add '-' as literal + literals.push_back(chr); // add '-' as literal break; } // normal case: '[a-z]' @@ -749,7 +749,7 @@ class regex_parser { // infinite repeats if (n > 0) { // append '+' after last repetition out.push_back(regex_parser::Item{item.type == COUNTED ? PLUS : PLUS_LAZY, 0}); - } else { // copy it once then append '*' + } else { // copy it once then append '*' out.insert(out.end(), begin, end); out.push_back(regex_parser::Item{item.type == COUNTED ? STAR : STAR_LAZY, 0}); } @@ -1095,7 +1095,7 @@ void reprog::build_start_ids() ids.pop(); reinst const& inst = _insts[id]; if (inst.type == OR) { - if (inst.u2.left_id != id) // prevents infinite while-loop here + if (inst.u2.left_id != id) // prevents infinite while-loop here ids.push(inst.u2.left_id); if (inst.u1.right_id != id) // prevents infinite while-loop here ids.push(inst.u1.right_id); diff --git a/cpp/src/strings/regex/regcomp.h b/cpp/src/strings/regex/regcomp.h index aa2cb363b80..ab912ace0df 100644 --- a/cpp/src/strings/regex/regcomp.h +++ b/cpp/src/strings/regex/regcomp.h @@ -77,16 +77,16 @@ constexpr int32_t NCCLASS_D{1 << 5}; // not CCLASS_D or '\n' * @brief Structure of an encoded regex instruction */ struct reinst { - int32_t type; /* operator type or instruction type */ + int32_t type; /* operator type or instruction type */ union { int32_t cls_id; /* class pointer */ char32_t c; /* character */ int32_t subid; /* sub-expression id for RBRA and LBRA */ int32_t right_id; /* right child of OR */ } u1; - union { /* regexec relies on these two being in the same union */ - int32_t left_id; /* left child of OR */ - int32_t next_id; /* next instruction for CAT & LBRA */ + union { /* regexec relies on these two being in the same union */ + int32_t left_id; /* left child of OR */ + int32_t next_id; /* next instruction for CAT & LBRA */ } u2; int32_t reserved4; }; diff --git a/cpp/src/strings/regex/regex.cuh b/cpp/src/strings/regex/regex.cuh index 19d82380350..c1abbd78b43 100644 --- a/cpp/src/strings/regex/regex.cuh +++ b/cpp/src/strings/regex/regex.cuh @@ -253,21 +253,21 @@ class reprog_device { reprog_device(reprog const&); - int32_t _startinst_id; // first instruction id - int32_t _num_capturing_groups; // instruction groups - int32_t _insts_count; // number of instructions - int32_t _starts_count; // number of start-insts ids - int32_t _classes_count; // number of classes - int32_t _max_insts; // for partitioning working memory + int32_t _startinst_id; // first instruction id + int32_t _num_capturing_groups; // instruction groups + int32_t _insts_count; // number of instructions + int32_t _starts_count; // number of start-insts ids + int32_t _classes_count; // number of classes + int32_t _max_insts; // for partitioning working memory uint8_t const* _codepoint_flags{}; // table of character types reinst const* _insts{}; // array of regex instructions int32_t const* _startinst_ids{}; // array of start instruction ids reclass_device const* _classes{}; // array of regex classes - std::size_t _prog_size{}; // total size of this instance - void* _buffer{}; // working memory buffer - int32_t _thread_count{}; // threads available in working memory + std::size_t _prog_size{}; // total size of this instance + void* _buffer{}; // working memory buffer + int32_t _thread_count{}; // threads available in working memory }; /** diff --git a/cpp/src/strings/regex/regex.inl b/cpp/src/strings/regex/regex.inl index c5205ae7789..ce12dc17aa4 100644 --- a/cpp/src/strings/regex/regex.inl +++ b/cpp/src/strings/regex/regex.inl @@ -146,17 +146,17 @@ __device__ __forceinline__ bool reclass_device::is_match(char32_t const ch, uint32_t codept = utf8_to_codepoint(ch); if (codept > 0x00'FFFF) return false; int8_t fl = codepoint_flags[codept]; - if ((builtins & CCLASS_W) && ((ch == '_') || IS_ALPHANUM(fl))) // \w + if ((builtins & CCLASS_W) && ((ch == '_') || IS_ALPHANUM(fl))) // \w return true; - if ((builtins & CCLASS_S) && IS_SPACE(fl)) // \s + if ((builtins & CCLASS_S) && IS_SPACE(fl)) // \s return true; - if ((builtins & CCLASS_D) && IS_DIGIT(fl)) // \d + if ((builtins & CCLASS_D) && IS_DIGIT(fl)) // \d return true; if ((builtins & NCCLASS_W) && ((ch != '\n') && (ch != '_') && !IS_ALPHANUM(fl))) // \W return true; - if ((builtins & NCCLASS_S) && !IS_SPACE(fl)) // \S + if ((builtins & NCCLASS_S) && !IS_SPACE(fl)) // \S return true; - if ((builtins & NCCLASS_D) && ((ch != '\n') && !IS_DIGIT(fl))) // \D + if ((builtins & NCCLASS_D) && ((ch != '\n') && !IS_DIGIT(fl))) // \D return true; // return false; diff --git a/cpp/src/strings/replace/replace_re.cu b/cpp/src/strings/replace/replace_re.cu index 460074a5296..81ddb937be5 100644 --- a/cpp/src/strings/replace/replace_re.cu +++ b/cpp/src/strings/replace/replace_re.cu @@ -68,7 +68,7 @@ struct replace_regex_fn { if (!match) { break; } // no more matches auto const [start_pos, end_pos] = match_positions_to_bytes(*match, d_str, last_pos); - nbytes += d_repl.size_bytes() - (end_pos - start_pos); // add new size + nbytes += d_repl.size_bytes() - (end_pos - start_pos); // add new size if (out_ptr) { // replace: // i:bbbbsssseeee diff --git a/cpp/src/strings/split/partition.cu b/cpp/src/strings/split/partition.cu index 099f5978992..0c7d119ea38 100644 --- a/cpp/src/strings/split/partition.cu +++ b/cpp/src/strings/split/partition.cu @@ -170,7 +170,7 @@ struct rpartition_fn : public partition_fn { --itr; pos = check_delimiter(idx, d_str, itr); } - if (pos < 0) // delimiter not found + if (pos < 0) // delimiter not found { d_indices_left[idx] = string_index_pair{"", 0}; // two empty d_indices_delim[idx] = string_index_pair{"", 0}; // strings diff --git a/cpp/src/strings/split/split.cuh b/cpp/src/strings/split/split.cuh index e76d8ac1c60..dc0b04af388 100644 --- a/cpp/src/strings/split/split.cuh +++ b/cpp/src/strings/split/split.cuh @@ -190,7 +190,7 @@ struct split_tokenizer_fn : base_split_tokenizer { device_span d_delimiters, device_span d_tokens) const { - auto const base_ptr = get_base_ptr(); // d_positions values based on this + auto const base_ptr = get_base_ptr(); // d_positions values based on this auto str_ptr = d_str.data(); auto const str_end = str_ptr + d_str.size_bytes(); // end of the string auto const token_count = static_cast(d_tokens.size()); diff --git a/cpp/src/strings/split/split_re.cu b/cpp/src/strings/split/split_re.cu index 9aeb6b69bdc..3be5937297f 100644 --- a/cpp/src/strings/split/split_re.cu +++ b/cpp/src/strings/split/split_re.cu @@ -91,7 +91,7 @@ struct token_reader_fn { } else { if (direction == split_direction::FORWARD) { break; } // we are done for (auto l = 0; l < token_idx - 1; ++l) { - d_result[l] = d_result[l + 1]; // shift left + d_result[l] = d_result[l + 1]; // shift left } d_result[token_idx - 1] = token; } diff --git a/cpp/src/strings/utilities.cu b/cpp/src/strings/utilities.cu index 57a868485df..c8c68d19ce6 100644 --- a/cpp/src/strings/utilities.cu +++ b/cpp/src/strings/utilities.cu @@ -86,9 +86,9 @@ thread_safe_per_context_cache d_special_case_mappings; } // namespace - /** - * @copydoc cudf::strings::detail::get_character_flags_table - */ +/** + * @copydoc cudf::strings::detail::get_character_flags_table + */ character_flags_table_type const* get_character_flags_table() { return d_character_codepoint_flags.find_or_initialize([&](void) { diff --git a/cpp/src/text/normalize.cu b/cpp/src/text/normalize.cu index 78dfb6bf1a6..1b07b0785f5 100644 --- a/cpp/src/text/normalize.cu +++ b/cpp/src/text/normalize.cu @@ -70,7 +70,7 @@ struct normalize_spaces_fn { cudf::string_view const single_space(" ", 1); auto const d_str = d_strings.element(idx); char* buffer = d_chars ? d_chars + d_offsets[idx] : nullptr; - char* optr = buffer; // running output pointer + char* optr = buffer; // running output pointer cudf::size_type nbytes = 0; // holds the number of bytes per output string @@ -146,7 +146,7 @@ struct codepoint_to_utf8_fn { char* out_ptr = d_chars + d_offsets[idx]; for (uint32_t jdx = 0; jdx < count; ++jdx) { uint32_t code_point = *str_cps++; - if (code_point < UTF8_1BYTE) // ASCII range + if (code_point < UTF8_1BYTE) // ASCII range *out_ptr++ = static_cast(code_point); else if (code_point < UTF8_2BYTE) { // create two-byte UTF-8 // b00001xxx:byyyyyyyy => b110xxxyy:b10yyyyyy diff --git a/cpp/src/text/replace.cu b/cpp/src/text/replace.cu index d122f048a4e..34916e121dc 100644 --- a/cpp/src/text/replace.cu +++ b/cpp/src/text/replace.cu @@ -114,7 +114,7 @@ using strings_iterator = cudf::column_device_view::const_iterator= end) { break; } // done checking for pairs // skip to the next adjacent pair diff --git a/cpp/src/text/subword/load_merges_file.cu b/cpp/src/text/subword/load_merges_file.cu index 1f1b90b3f49..db6ad2e2dd2 100644 --- a/cpp/src/text/subword/load_merges_file.cu +++ b/cpp/src/text/subword/load_merges_file.cu @@ -93,7 +93,7 @@ std::unique_ptr initialize_merge_pairs_map( auto merge_pairs_map = std::make_unique( static_cast(input.size() * 2), // capacity is 2x; cuco::empty_key{-1}, - cuco::empty_value{-1}, // empty value is not used + cuco::empty_value{-1}, // empty value is not used bpe_equal{input}, probe_scheme{bpe_hasher{input}}, hash_table_allocator_type{default_allocator{}, stream}, diff --git a/cpp/src/text/utilities/tokenize_ops.cuh b/cpp/src/text/utilities/tokenize_ops.cuh index fbd2d1efcff..a84e94a6924 100644 --- a/cpp/src/text/utilities/tokenize_ops.cuh +++ b/cpp/src/text/utilities/tokenize_ops.cuh @@ -230,7 +230,7 @@ struct multi_delimiter_strings_tokenizer { }); if (itr_find != delimiters_end) { // found delimiter auto token_size = static_cast((curr_ptr - data_ptr) - last_pos); - if (token_size > 0) // we only care about non-zero sized tokens + if (token_size > 0) // we only care about non-zero sized tokens { if (d_str_tokens) d_str_tokens[token_idx] = string_index_pair{data_ptr + last_pos, token_size}; diff --git a/cpp/tests/groupby/merge_lists_tests.cpp b/cpp/tests/groupby/merge_lists_tests.cpp index 991473c5023..f2909f870aa 100644 --- a/cpp/tests/groupby/merge_lists_tests.cpp +++ b/cpp/tests/groupby/merge_lists_tests.cpp @@ -374,7 +374,7 @@ TEST_F(GroupbyMergeListsTest, StringsColumnInput) "" /*NULL*/, "" /*NULL*/, "German Shepherd", - "" /*NULL*/ + "" /*NULL*/ }, nulls_at({3, 4, 5, 7})}, // key = "dog" lists_col{{"Whale", "" /*NULL*/, "Polar Bear"}, null_at(1)}, // key = "unknown" diff --git a/cpp/tests/groupby/merge_sets_tests.cpp b/cpp/tests/groupby/merge_sets_tests.cpp index 67ff61563bb..5fc7e68b524 100644 --- a/cpp/tests/groupby/merge_sets_tests.cpp +++ b/cpp/tests/groupby/merge_sets_tests.cpp @@ -333,7 +333,7 @@ TEST_F(GroupbyMergeSetsTest, StringsColumnInput) lists_col{{"" /*NULL*/, "" /*NULL*/, "" /*NULL*/}, all_nulls()} // key = "dog" }; auto const lists3 = lists_col{ - lists_col{"Fuji", "Red Delicious"}, // key = "apple" + lists_col{"Fuji", "Red Delicious"}, // key = "apple" lists_col{{"" /*NULL*/, "Corgi", "German Shepherd", "" /*NULL*/, "Golden Retriever"}, nulls_at({0, 3})}, // key = "dog" lists_col{{"Seeedless", "Mini"}, no_nulls()} // key = "water melon" @@ -343,14 +343,14 @@ TEST_F(GroupbyMergeSetsTest, StringsColumnInput) merge_sets(vcol_views{keys1, keys2, keys3}, vcol_views{lists1, lists2, lists3}); auto const expected_keys = strings_col{"apple", "banana", "dog", "unknown", "water melon"}; auto const expected_lists = lists_col{ - lists_col{"Fuji", "Honey Bee", "Red Delicious"}, // key = "apple" - lists_col{"Green", "Yellow"}, // key = "banana" + lists_col{"Fuji", "Honey Bee", "Red Delicious"}, // key = "apple" + lists_col{"Green", "Yellow"}, // key = "banana" lists_col{{ "Corgi", "German Shepherd", "Golden Retriever", "Poodle", "" /*NULL*/ }, - null_at(4)}, // key = "dog" - lists_col{{"Polar Bear", "Whale", "" /*NULL*/}, null_at(2)}, // key = "unknown" - lists_col{{"Mini", "Seeedless"}, no_nulls()} // key = "water melon" + null_at(4)}, // key = "dog" + lists_col{{"Polar Bear", "Whale", "" /*NULL*/}, null_at(2)}, // key = "unknown" + lists_col{{"Mini", "Seeedless"}, no_nulls()} // key = "water melon" }; CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected_keys, *out_keys, verbosity); diff --git a/cpp/tests/io/parquet_test.cpp b/cpp/tests/io/parquet_test.cpp index 64aca091686..81e0e12eeb9 100644 --- a/cpp/tests/io/parquet_test.cpp +++ b/cpp/tests/io/parquet_test.cpp @@ -2166,7 +2166,7 @@ TEST_F(ParquetChunkedWriterTest, ForcedNullabilityList) cudf::io::table_input_metadata metadata(table1); metadata.column_metadata[0].set_nullability(true); // List is nullable at first (root) level metadata.column_metadata[0].child(1).set_nullability( - false); // non-nullable at second (leaf) level + false); // non-nullable at second (leaf) level metadata.column_metadata[1].set_nullability(true); auto filepath = temp_env->get_temp_filepath("ChunkedListNullable.parquet"); @@ -5880,7 +5880,7 @@ TEST_F(ParquetMetadataReaderTest, TestNested) EXPECT_EQ(out_map_col.type_kind(), cudf::io::parquet::TypeKind::UNDEFINED_TYPE); // map ASSERT_EQ(out_map_col.num_children(), 1); - EXPECT_EQ(out_map_col.child(0).name(), "key_value"); // key_value (named in parquet writer) + EXPECT_EQ(out_map_col.child(0).name(), "key_value"); // key_value (named in parquet writer) ASSERT_EQ(out_map_col.child(0).num_children(), 2); EXPECT_EQ(out_map_col.child(0).child(0).name(), "key"); // key (named in parquet writer) EXPECT_EQ(out_map_col.child(0).child(1).name(), "value"); // value (named in parquet writer) @@ -5897,7 +5897,7 @@ TEST_F(ParquetMetadataReaderTest, TestNested) ASSERT_EQ(out_list_col.child(0).num_children(), 1); auto const& out_list_struct_col = out_list_col.child(0).child(0); - EXPECT_EQ(out_list_struct_col.name(), "element"); // elements (named in parquet writer) + EXPECT_EQ(out_list_struct_col.name(), "element"); // elements (named in parquet writer) EXPECT_EQ(out_list_struct_col.type_kind(), cudf::io::parquet::TypeKind::UNDEFINED_TYPE); // struct ASSERT_EQ(out_list_struct_col.num_children(), 2); diff --git a/cpp/tests/lists/reverse_tests.cpp b/cpp/tests/lists/reverse_tests.cpp index a899d387c3e..00dc13c5812 100644 --- a/cpp/tests/lists/reverse_tests.cpp +++ b/cpp/tests/lists/reverse_tests.cpp @@ -370,8 +370,8 @@ TYPED_TEST(ListsReverseTypedTest, InputListsOfStructsWithNulls) "Kiwi", "Cherry", "Banana", - "", /*NULL*/ - "", /*NULL*/ + "", /*NULL*/ + "", /*NULL*/ "Apple", "", /*NULL*/ "Banana", // end list1 @@ -436,8 +436,8 @@ TYPED_TEST(ListsReverseTypedTest, InputListsOfStructsWithNulls) "Kiwi", "Cherry", "Banana", - "", /*NULL*/ - "", /*NULL*/ + "", /*NULL*/ + "", /*NULL*/ "Apple", "", /*NULL*/ "Banana", // end list1 diff --git a/cpp/tests/lists/set_operations/difference_distinct_tests.cpp b/cpp/tests/lists/set_operations/difference_distinct_tests.cpp index bf7ebc902ba..84c51f256b7 100644 --- a/cpp/tests/lists/set_operations/difference_distinct_tests.cpp +++ b/cpp/tests/lists/set_operations/difference_distinct_tests.cpp @@ -571,7 +571,7 @@ TEST_F(SetDifferenceTest, InputListsOfNestedStructsHaveNull) "" /*NULL*/, "" /*NULL*/, "" /*NULL*/, "Apple", "Banana", "Cherry", "Kiwi", // end list1 "" /*NULL*/, "Bear", "Cat", "Dog", "Duck", - "Panda", // end list2 + "Panda", // end list2 "ÁÁÁ", "ÉÉÉÉÉ", "ÁBC", "ÁÁÁ", "ÍÍÍÍÍ", "" /*NULL*/, "XYZ", "ÁBC" // end list3 diff --git a/cpp/tests/lists/set_operations/intersect_distinct_tests.cpp b/cpp/tests/lists/set_operations/intersect_distinct_tests.cpp index dbccf06036b..11f98af3520 100644 --- a/cpp/tests/lists/set_operations/intersect_distinct_tests.cpp +++ b/cpp/tests/lists/set_operations/intersect_distinct_tests.cpp @@ -514,7 +514,7 @@ TEST_F(SetIntersectTest, InputListsOfNestedStructsHaveNull) null, // end list1 null, // end list2 null, - null // end list3 + null // end list3 }, all_nulls()}; auto grandchild2 = strings_col{{ @@ -522,7 +522,7 @@ TEST_F(SetIntersectTest, InputListsOfNestedStructsHaveNull) "Apple", // end list1 "" /*NULL*/, // end list2 "ÁÁÁ", - "ÉÉÉÉÉ" // end list3 + "ÉÉÉÉÉ" // end list3 }, nulls_at({0, 2})}; auto child1 = structs_col{{grandchild1, grandchild2}, null_at(0)}; diff --git a/cpp/tests/lists/set_operations/union_distinct_tests.cpp b/cpp/tests/lists/set_operations/union_distinct_tests.cpp index 5cc0897351d..e33ea31541b 100644 --- a/cpp/tests/lists/set_operations/union_distinct_tests.cpp +++ b/cpp/tests/lists/set_operations/union_distinct_tests.cpp @@ -560,7 +560,7 @@ TEST_F(SetUnionTest, InputListsOfNestedStructsHaveNull) auto grandchild2 = strings_col{{ "" /*NULL*/, "Apple", "Banana", "Cherry", "Kiwi", "Banana", "Cherry", - "Kiwi", // end list1 + "Kiwi", // end list1 "" /*NULL*/, "Bear", "Cat", "Dog", "Duck", "Panda", "Bear", "Cat", "Dog", "Duck", "Panda", // end list2 @@ -597,7 +597,7 @@ TEST_F(SetUnionTest, InputListsOfNestedStructsHaveNull) { "" /*NULL*/, "" /*NULL*/, "" /*NULL*/, "" /*NULL*/, "" /*NULL*/, "" /*NULL*/, "Apple", "Apple", "Banana", "Cherry", "Kiwi", "Banana", "Cherry", - "Kiwi", // end list1 + "Kiwi", // end list1 "" /*NULL*/, "" /*NULL*/, "Bear", "Cat", "Dog", "Duck", "Panda", "Bear", "Cat", "Dog", "Duck", "Panda", // end list2 "ÁÁÁ", "ÁÁÁ", "ÉÉÉÉÉ", "ÉÉÉÉÉ", "ÁBC", "ÁÁÁ", "ÍÍÍÍÍ", diff --git a/cpp/tests/lists/stream_compaction/distinct_tests.cpp b/cpp/tests/lists/stream_compaction/distinct_tests.cpp index 57d1714c255..fbc637f9315 100644 --- a/cpp/tests/lists/stream_compaction/distinct_tests.cpp +++ b/cpp/tests/lists/stream_compaction/distinct_tests.cpp @@ -529,7 +529,7 @@ TEST_F(ListDistinctTest, InputListsOfStructsHaveNull) 2, 3, 3, - 3}, // end list3 + 3}, // end list3 nulls_at({1, 6, 12, 13})}; auto child2 = strings_col{{ // begin list1 "XXX", /*NULL*/ @@ -551,7 +551,7 @@ TEST_F(ListDistinctTest, InputListsOfStructsHaveNull) "ÁBC", "ÁÁÁ", "ÍÍÍÍÍ", - "", /*NULL*/ + "", /*NULL*/ "XYZ", "ÁBC"}, // end list3 nulls_at({6, 17})}; @@ -670,7 +670,7 @@ TEST_F(ListDistinctTest, InputListsOfNestedStructsHaveNull) "ÁBC", "ÁÁÁ", "ÍÍÍÍÍ", - "", /*NULL*/ + "", /*NULL*/ "XYZ", "ÁBC" // end list3 }, @@ -729,8 +729,8 @@ TEST_F(ListDistinctTest, InputListsOfStructsOfLists) floats_lists{3, 4, 5}, // end list2 // begin list3 floats_lists{}, - floats_lists{}, // end list3 - // begin list4 + floats_lists{}, // end list3 + // begin list4 floats_lists{6, 7}, floats_lists{6, 7}, floats_lists{6, 7}}; diff --git a/cpp/tests/reshape/interleave_columns_tests.cpp b/cpp/tests/reshape/interleave_columns_tests.cpp index eba6c961bbb..e8ea9d619c5 100644 --- a/cpp/tests/reshape/interleave_columns_tests.cpp +++ b/cpp/tests/reshape/interleave_columns_tests.cpp @@ -806,7 +806,7 @@ TYPED_TEST(ListsColumnsInterleaveTypedTest, SlicedInputListsOfListsWithNulls) ListsCol{ListsCol{{null, 11}, null_at(0)}, ListsCol{{22, null, null}, nulls_at({1, 2})}}, // don't care ListsCol{ListsCol{{null, 11}, null_at(0)}, - ListsCol{{22, null, null}, nulls_at({1, 2})}} // don't care + ListsCol{{22, null, null}, nulls_at({1, 2})}} // don't care }; auto const col1 = cudf::slice(col1_original, {3, 6})[0]; diff --git a/cpp/tests/rolling/range_rolling_window_test.cpp b/cpp/tests/rolling/range_rolling_window_test.cpp index 585383f28f8..eed9db1fe04 100644 --- a/cpp/tests/rolling/range_rolling_window_test.cpp +++ b/cpp/tests/rolling/range_rolling_window_test.cpp @@ -91,7 +91,7 @@ struct window_exec { ScalarT preceding; // Preceding window scalar. ScalarT following; // Following window scalar. cudf::size_type min_periods = 1; -}; // struct window_exec; +}; // struct window_exec; struct RangeRollingTest : public cudf::test::BaseFixture {}; diff --git a/cpp/tests/sort/segmented_sort_tests.cpp b/cpp/tests/sort/segmented_sort_tests.cpp index b3f98eb54b9..da9666cbc74 100644 --- a/cpp/tests/sort/segmented_sort_tests.cpp +++ b/cpp/tests/sort/segmented_sort_tests.cpp @@ -270,7 +270,7 @@ TEST_F(SegmentedSortInt, Sliced) column_wrapper expected2{{0, 1, 3, 2, 4, 5, 6}}; column_wrapper expected3{{0, 1, 2, 3, 4, 5, 6}}; // clang-format on - auto slice = cudf::slice(col1, {4, 11})[0]; // 7 elements + auto slice = cudf::slice(col1, {4, 11})[0]; // 7 elements cudf::table_view input{{slice}}; auto seg_slice = cudf::slice(segments2, {2, 4})[0]; // 2 elements diff --git a/cpp/tests/strings/chars_types_tests.cpp b/cpp/tests/strings/chars_types_tests.cpp index a16da41af7a..c595977c269 100644 --- a/cpp/tests/strings/chars_types_tests.cpp +++ b/cpp/tests/strings/chars_types_tests.cpp @@ -50,17 +50,17 @@ TEST_P(CharsTypes, AllTypes) "\t\r\n\f "}; bool expecteds[] = {false, false, false, false, false, false, false, false, - false, false, false, false, false, true, false, false, // decimal + false, false, false, false, false, true, false, false, // decimal false, false, false, false, false, false, false, false, - false, true, false, true, false, true, false, false, // numeric + false, true, false, true, false, true, false, false, // numeric false, false, false, false, false, false, false, false, - false, false, false, true, false, true, false, false, // digit + false, false, false, true, false, true, false, false, // digit true, true, false, true, false, false, false, false, - false, false, false, false, false, false, true, false, // alpha + false, false, false, false, false, false, true, false, // alpha false, false, false, false, false, false, false, false, - false, false, false, false, false, false, false, true, // space + false, false, false, false, false, false, false, true, // space false, false, false, true, false, false, false, false, - false, false, false, false, false, false, false, false, // upper + false, false, false, false, false, false, false, false, // upper false, true, false, false, false, false, false, false, false, false, false, false, false, false, true, false}; // lower diff --git a/cpp/tests/strings/durations_tests.cpp b/cpp/tests/strings/durations_tests.cpp index 0c7a1ad8042..1902f907f43 100644 --- a/cpp/tests/strings/durations_tests.cpp +++ b/cpp/tests/strings/durations_tests.cpp @@ -398,7 +398,7 @@ TEST_F(StringsDurationsTest, ParseSingle) "-59", "999", "-999", - "", // error + "", // error "01", ""}; // error auto size = cudf::column_view(string_src).size(); @@ -449,7 +449,7 @@ TEST_F(StringsDurationsTest, ParseMultiple) "-59:00:00", "999:00:00", "-999:00:00", - "", // error + "", // error "01:01:01", ""}; // error auto size = cudf::column_view(string_src).size(); @@ -503,7 +503,7 @@ TEST_F(StringsDurationsTest, ParseSubsecond) "-59:00:00", "999:00:00", "-999:00:00", - "", // error + "", // error "01:01:01", ""}; // error auto size = cudf::column_view(string_src).size(); @@ -660,7 +660,7 @@ TEST_F(StringsDurationsTest, ParseCompoundSpecifier) "09:00 AM", // error "", // error "01:01:01", - ""}; // error + ""}; // error cudf::test::fixed_width_column_wrapper expected_s3( {0, diff --git a/cpp/tests/utilities/column_utilities.cu b/cpp/tests/utilities/column_utilities.cu index bae402155e9..620e0bfe8de 100644 --- a/cpp/tests/utilities/column_utilities.cu +++ b/cpp/tests/utilities/column_utilities.cu @@ -440,7 +440,7 @@ class corresponding_rows_not_equivalent { // Must handle inf and nan separately if (std::isinf(x) || std::isinf(y)) { - return x != y; // comparison of (inf==inf) returns true + return x != y; // comparison of (inf==inf) returns true } else if (std::isnan(x) || std::isnan(y)) { return std::isnan(x) != std::isnan(y); // comparison of (nan==nan) returns false } else { From 97501d87e2070e8f07eb17b2c5e59742c490c6b1 Mon Sep 17 00:00:00 2001 From: Karthikeyan <6488848+karthikeyann@users.noreply.github.com> Date: Wed, 20 Sep 2023 07:42:20 +0530 Subject: [PATCH 078/150] Long string optimization for string column parsing in JSON reader (#13803) closes #13724 In old code, 1 thread per string is allocated for parsing a string column. For longer strings (>1024), the runtime of 1-thread-per-string to decode is taking too long even for few strings. In this change, 1 warp per string is used for parsing for strings length <=1024 and 1 block per string for string length >1024. If max string length < 128, 1 thread per string is used as usual. 256 threads_per_block is used for both kernels. Code for 1-warp-per-string and 1-block-per-string is similar, but only varies with warp-wide and block-wide primitives for reduction and scan operations. shared memory usage will differ slightly too. Authors: - Karthikeyan (https://github.com/karthikeyann) - Vukasin Milovanovic (https://github.com/vuule) Approvers: - Robert Maynard (https://github.com/robertmaynard) - Vukasin Milovanovic (https://github.com/vuule) - Elias Stehle (https://github.com/elstehle) - Lawrence Mitchell (https://github.com/wence-) URL: https://github.com/rapidsai/cudf/pull/13803 --- cpp/CMakeLists.txt | 2 + cpp/include/cudf/io/detail/data_casting.cuh | 431 -------- cpp/src/io/json/json_column.cu | 39 +- cpp/src/io/json/nested_json_gpu.cu | 22 +- cpp/src/io/json/write_json.cu | 3 +- cpp/src/io/utilities/data_casting.cu | 987 ++++++++++++++++++ cpp/src/io/utilities/parsing_utils.cuh | 24 +- cpp/src/io/utilities/string_parsing.hpp | 79 ++ .../{type_inference.cuh => type_inference.cu} | 57 +- cpp/tests/io/json_test.cpp | 119 +++ cpp/tests/io/json_type_cast_test.cu | 189 +++- cpp/tests/io/type_inference_test.cu | 30 +- 12 files changed, 1395 insertions(+), 587 deletions(-) delete mode 100644 cpp/include/cudf/io/detail/data_casting.cuh create mode 100644 cpp/src/io/utilities/data_casting.cu create mode 100644 cpp/src/io/utilities/string_parsing.hpp rename cpp/src/io/utilities/{type_inference.cuh => type_inference.cu} (84%) diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index 900e9eed98e..a84f7bd5224 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -413,11 +413,13 @@ add_library( src/io/utilities/arrow_io_source.cpp src/io/utilities/column_buffer.cpp src/io/utilities/config_utils.cpp + src/io/utilities/data_casting.cu src/io/utilities/data_sink.cpp src/io/utilities/datasource.cpp src/io/utilities/file_io_utilities.cpp src/io/utilities/parsing_utils.cu src/io/utilities/row_selection.cpp + src/io/utilities/type_inference.cu src/io/utilities/trie.cu src/jit/cache.cpp src/jit/parser.cpp diff --git a/cpp/include/cudf/io/detail/data_casting.cuh b/cpp/include/cudf/io/detail/data_casting.cuh deleted file mode 100644 index b7ee5e05e96..00000000000 --- a/cpp/include/cudf/io/detail/data_casting.cuh +++ /dev/null @@ -1,431 +0,0 @@ -/* - * Copyright (c) 2022-2023, NVIDIA CORPORATION. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include - -#include - -namespace cudf::io::json::detail { - -// Unicode code point escape sequence -static constexpr char UNICODE_SEQ = 0x7F; - -// Invalid escape sequence -static constexpr char NON_ESCAPE_CHAR = 0x7E; - -// Unicode code point escape sequence prefix comprises '\' and 'u' characters -static constexpr size_type UNICODE_ESC_PREFIX = 2; - -// Unicode code point escape sequence comprises four hex characters -static constexpr size_type UNICODE_HEX_DIGIT_COUNT = 4; - -// A unicode code point escape sequence is \uXXXX -static auto constexpr NUM_UNICODE_ESC_SEQ_CHARS = UNICODE_ESC_PREFIX + UNICODE_HEX_DIGIT_COUNT; - -static constexpr auto UTF16_HIGH_SURROGATE_BEGIN = 0xD800; -static constexpr auto UTF16_HIGH_SURROGATE_END = 0xDC00; -static constexpr auto UTF16_LOW_SURROGATE_BEGIN = 0xDC00; -static constexpr auto UTF16_LOW_SURROGATE_END = 0xE000; - -/** - * @brief Describing whether data casting of a certain item succeed, the item was parsed to null, or - * whether type casting failed. - */ -enum class data_casting_result { PARSING_SUCCESS, PARSED_TO_NULL, PARSING_FAILURE }; - -/** - * @brief Providing additional information about the type casting result. - */ -struct data_casting_result_info { - // Number of bytes written to output - size_type bytes; - // Whether parsing succeeded, item was parsed to null, or failed - data_casting_result result; -}; - -/** - * @brief Returns the character to output for a given escaped character that's following a - * backslash. - * - * @param escaped_char The character following the backslash. - * @return The character to output for a given character that's following a backslash - */ -__device__ __forceinline__ char get_escape_char(char escaped_char) -{ - switch (escaped_char) { - case '"': return '"'; - case '\\': return '\\'; - case '/': return '/'; - case 'b': return '\b'; - case 'f': return '\f'; - case 'n': return '\n'; - case 'r': return '\r'; - case 't': return '\t'; - case 'u': return UNICODE_SEQ; - default: return NON_ESCAPE_CHAR; - } -} - -/** - * @brief Returns the escaped characters for a given character. - * - * @param escaped_char The character to escape. - * @return The escaped characters for a given character. - */ -__device__ __forceinline__ thrust::pair get_escaped_char(char escaped_char) -{ - switch (escaped_char) { - case '"': return {'\\', '"'}; - case '\\': return {'\\', '\\'}; - case '/': return {'\\', '/'}; - case '\b': return {'\\', 'b'}; - case '\f': return {'\\', 'f'}; - case '\n': return {'\\', 'n'}; - case '\r': return {'\\', 'r'}; - case '\t': return {'\\', 't'}; - // case 'u': return UNICODE_SEQ; - default: return {'\0', escaped_char}; - } -} -/** - * @brief Parses the hex value from the four hex digits of a unicode code point escape sequence - * \uXXXX. - * - * @param str Pointer to the first (most-significant) hex digit - * @return The parsed hex value if successful, -1 otherwise. - */ -__device__ __forceinline__ int32_t parse_unicode_hex(char const* str) -{ - // Prepare result - int32_t result = 0, base = 1; - constexpr int32_t hex_radix = 16; - - // Iterate over hex digits right-to-left - size_type index = UNICODE_HEX_DIGIT_COUNT; - while (index-- > 0) { - char const ch = str[index]; - if (ch >= '0' && ch <= '9') { - result += static_cast((ch - '0') + 0) * base; - base *= hex_radix; - } else if (ch >= 'A' && ch <= 'F') { - result += static_cast((ch - 'A') + 10) * base; - base *= hex_radix; - } else if (ch >= 'a' && ch <= 'f') { - result += static_cast((ch - 'a') + 10) * base; - base *= hex_radix; - } else { - return -1; - } - } - return result; -} - -/** - * @brief Writes the UTF-8 byte sequence to \p out_it and returns the number of bytes written to - * \p out_it - */ -constexpr size_type write_utf8_char(char_utf8 character, char*& out_it) -{ - auto const bytes = (out_it == nullptr) ? strings::detail::bytes_in_char_utf8(character) - : strings::detail::from_char_utf8(character, out_it); - if (out_it) out_it += bytes; - return bytes; -} - -/** - * @brief Processes a string, replaces escape sequences and optionally strips off the quote - * characters. - * - * @tparam in_iterator_t A bidirectional input iterator type whose value_type is convertible to - * char - * @param in_begin Iterator to the first item to process - * @param in_end Iterator to one past the last item to process - * @param d_buffer Output character buffer to the first item to write - * @param options Settings for controlling string processing behavior - * @return A struct of (num_bytes_written, parsing_success_result), where num_bytes_written is - * the number of bytes written to d_buffer, parsing_success_result is enum value indicating whether - * parsing succeeded, item was parsed to null, or failed. - */ -template -__device__ __forceinline__ data_casting_result_info -process_string(in_iterator_t in_begin, - in_iterator_t in_end, - char* d_buffer, - cudf::io::parse_options_view const& options) -{ - int32_t bytes = 0; - auto const num_in_chars = thrust::distance(in_begin, in_end); - // String values are indicated by keeping the quote character - bool const is_string_value = - num_in_chars >= 2LL && - (options.quotechar == '\0' || - (*in_begin == options.quotechar) && (*thrust::prev(in_end) == options.quotechar)); - - // Copy literal/numeric value - if (not is_string_value) { - while (in_begin != in_end) { - if (d_buffer) *d_buffer++ = *in_begin; - ++in_begin; - ++bytes; - } - return {bytes, data_casting_result::PARSING_SUCCESS}; - } - // Whether in the original JSON this was a string value enclosed in quotes - // ({"a":"foo"} vs. {"a":1.23}) - char const backslash_char = '\\'; - - // Escape-flag, set after encountering a backslash character - bool escape = false; - - // Exclude beginning and ending quote chars from string range - if (!options.keepquotes) { - ++in_begin; - --in_end; - } - - // Iterate over the input - while (in_begin != in_end) { - // Copy single character to output - if (!escape) { - escape = (*in_begin == backslash_char); - if (!escape) { - if (d_buffer) *d_buffer++ = *in_begin; - ++bytes; - } - ++in_begin; - continue; - } - - // Previous char indicated beginning of escape sequence - // Reset escape flag for next loop iteration - escape = false; - - // Check the character that is supposed to be escaped - auto escaped_char = get_escape_char(*in_begin); - - // We escaped an invalid escape character -> "fail"/null for this item - if (escaped_char == NON_ESCAPE_CHAR) { return {bytes, data_casting_result::PARSING_FAILURE}; } - - // Regular, single-character escape - if (escaped_char != UNICODE_SEQ) { - if (d_buffer) *d_buffer++ = escaped_char; - ++bytes; - ++in_begin; - continue; - } - - // This is an escape sequence of a unicode code point: \uXXXX, - // where each X in XXXX represents a hex digit - // Skip over the 'u' char from \uXXXX to the first hex digit - ++in_begin; - - // Make sure that there's at least 4 characters left from the - // input, which are expected to be hex digits - if (thrust::distance(in_begin, in_end) < UNICODE_HEX_DIGIT_COUNT) { - return {bytes, data_casting_result::PARSING_FAILURE}; - } - - auto hex_val = parse_unicode_hex(in_begin); - - // Couldn't parse hex values from the four-character sequence -> "fail"/null for this item - if (hex_val < 0) { return {bytes, data_casting_result::PARSING_FAILURE}; } - - // Skip over the four hex digits - thrust::advance(in_begin, UNICODE_HEX_DIGIT_COUNT); - - // If this may be a UTF-16 encoded surrogate pair: - // we expect another \uXXXX sequence - int32_t hex_low_val = 0; - if (thrust::distance(in_begin, in_end) >= NUM_UNICODE_ESC_SEQ_CHARS && - *in_begin == backslash_char && *thrust::next(in_begin) == 'u') { - // Try to parse hex value following the '\' and 'u' characters from what may be a UTF16 low - // surrogate - hex_low_val = parse_unicode_hex(thrust::next(in_begin, 2)); - } - - // This is indeed a UTF16 surrogate pair - if (hex_val >= UTF16_HIGH_SURROGATE_BEGIN && hex_val < UTF16_HIGH_SURROGATE_END && - hex_low_val >= UTF16_LOW_SURROGATE_BEGIN && hex_low_val < UTF16_LOW_SURROGATE_END) { - // Skip over the second \uXXXX sequence - thrust::advance(in_begin, NUM_UNICODE_ESC_SEQ_CHARS); - - // Compute UTF16-encoded code point - uint32_t unicode_code_point = 0x10000 + ((hex_val - UTF16_HIGH_SURROGATE_BEGIN) << 10) + - (hex_low_val - UTF16_LOW_SURROGATE_BEGIN); - auto utf8_chars = strings::detail::codepoint_to_utf8(unicode_code_point); - bytes += write_utf8_char(utf8_chars, d_buffer); - } - - // Just a single \uXXXX sequence - else { - auto utf8_chars = strings::detail::codepoint_to_utf8(hex_val); - bytes += write_utf8_char(utf8_chars, d_buffer); - } - } - - // The last character of the input is a backslash -> "fail"/null for this item - if (escape) { return {bytes, data_casting_result::PARSING_FAILURE}; } - return {bytes, data_casting_result::PARSING_SUCCESS}; -} - -template -struct string_parse { - str_tuple_it str_tuples; - bitmask_type* null_mask; - size_type* null_count_data; - cudf::io::parse_options_view const options; - size_type* d_offsets{}; - char* d_chars{}; - - __device__ void operator()(size_type idx) - { - if (null_mask != nullptr && not bit_is_set(null_mask, idx)) { - if (!d_chars) d_offsets[idx] = 0; - return; - } - auto const in_begin = str_tuples[idx].first; - auto const in_end = in_begin + str_tuples[idx].second; - auto const num_in_chars = str_tuples[idx].second; - - // Check if the value corresponds to the null literal - auto const is_null_literal = - (!d_chars) && - serialized_trie_contains(options.trie_na, {in_begin, static_cast(num_in_chars)}); - if (is_null_literal && null_mask != nullptr) { - clear_bit(null_mask, idx); - atomicAdd(null_count_data, 1); - if (!d_chars) d_offsets[idx] = 0; - return; - } - - char* d_buffer = d_chars ? d_chars + d_offsets[idx] : nullptr; - auto str_process_info = process_string(in_begin, in_end, d_buffer, options); - if (str_process_info.result != data_casting_result::PARSING_SUCCESS) { - if (null_mask != nullptr) { - clear_bit(null_mask, idx); - atomicAdd(null_count_data, 1); - } - if (!d_chars) d_offsets[idx] = 0; - } else { - if (!d_chars) d_offsets[idx] = str_process_info.bytes; - } - } -}; -/** - * @brief Parses the data from an iterator of string views, casting it to the given target data type - * - * @param str_tuples Iterator returning a string view, i.e., a (ptr, length) pair - * @param col_size The total number of items of this column - * @param col_type The column's target data type - * @param null_mask A null mask that renders certain items from the input invalid - * @param options Settings for controlling the processing behavior - * @param stream CUDA stream used for device memory operations and kernel launches - * @param mr The resource to be used for device memory allocation - * @return The column that contains the parsed data - */ -template -std::unique_ptr parse_data(str_tuple_it str_tuples, - size_type col_size, - data_type col_type, - B&& null_mask, - size_type null_count, - cudf::io::parse_options_view const& options, - rmm::cuda_stream_view stream, - rmm::mr::device_memory_resource* mr) -{ - CUDF_FUNC_RANGE(); - - auto d_null_count = rmm::device_scalar(null_count, stream); - auto null_count_data = d_null_count.data(); - - if (col_type == cudf::data_type{cudf::type_id::STRING}) { - // this utility calls the functor to build the offsets and chars columns; - // the bitmask and null count may be updated by parse failures - auto [offsets, chars] = cudf::strings::detail::make_strings_children( - string_parse{ - str_tuples, static_cast(null_mask.data()), null_count_data, options}, - col_size, - stream, - mr); - - return make_strings_column(col_size, - std::move(offsets), - std::move(chars), - d_null_count.value(stream), - std::move(null_mask)); - } - - auto out_col = - make_fixed_width_column(col_type, col_size, std::move(null_mask), null_count, stream, mr); - auto output_dv_ptr = mutable_column_device_view::create(*out_col, stream); - - // use existing code (`ConvertFunctor`) to convert values - thrust::for_each_n( - rmm::exec_policy(stream), - thrust::make_counting_iterator(0), - col_size, - [str_tuples, col = *output_dv_ptr, options, col_type, null_count_data] __device__( - size_type row) { - if (col.is_null(row)) { return; } - auto const in = str_tuples[row]; - - auto const is_null_literal = - serialized_trie_contains(options.trie_na, {in.first, static_cast(in.second)}); - - if (is_null_literal) { - col.set_null(row); - atomicAdd(null_count_data, 1); - return; - } - - // If this is a string value, remove quotes - auto [in_begin, in_end] = trim_quotes(in.first, in.first + in.second, options.quotechar); - - auto const is_parsed = cudf::type_dispatcher(col_type, - ConvertFunctor{}, - in_begin, - in_end, - col.data(), - row, - col_type, - options, - false); - if (not is_parsed) { - col.set_null(row); - atomicAdd(null_count_data, 1); - } - }); - - out_col->set_null_count(d_null_count.value(stream)); - - return out_col; -} - -} // namespace cudf::io::json::detail diff --git a/cpp/src/io/json/json_column.cu b/cpp/src/io/json/json_column.cu index cabf904f020..5d7fb9d6b43 100644 --- a/cpp/src/io/json/json_column.cu +++ b/cpp/src/io/json/json_column.cu @@ -16,14 +16,13 @@ #include "nested_json.hpp" #include -#include +#include #include #include #include #include #include -#include #include #include #include @@ -331,23 +330,27 @@ std::vector copy_strings_to_host(device_span input, { CUDF_FUNC_RANGE(); auto const num_strings = node_range_begin.size(); - rmm::device_uvector> string_views(num_strings, stream); + rmm::device_uvector string_offsets(num_strings, stream); + rmm::device_uvector string_lengths(num_strings, stream); auto d_offset_pairs = thrust::make_zip_iterator(node_range_begin.begin(), node_range_end.begin()); thrust::transform(rmm::exec_policy(stream), d_offset_pairs, d_offset_pairs + num_strings, - string_views.begin(), - [data = input.data()] __device__(auto const& offsets) { + thrust::make_zip_iterator(string_offsets.begin(), string_lengths.begin()), + [] __device__(auto const& offsets) { // Note: first character for non-field columns - return thrust::make_pair( - data + thrust::get<0>(offsets), + return thrust::make_tuple( + static_cast(thrust::get<0>(offsets)), static_cast(thrust::get<1>(offsets) - thrust::get<0>(offsets))); }); cudf::io::parse_options_view options_view{}; options_view.quotechar = '\0'; // no quotes options_view.keepquotes = true; - auto d_column_names = parse_data(string_views.begin(), + auto d_offset_length_it = + thrust::make_zip_iterator(string_offsets.begin(), string_lengths.begin()); + auto d_column_names = parse_data(input.data(), + d_offset_length_it, num_strings, data_type{type_id::STRING}, rmm::device_buffer{}, @@ -355,7 +358,7 @@ std::vector copy_strings_to_host(device_span input, options_view, stream, rmm::mr::get_current_device_resource()); - auto to_host = [stream](auto const& col) { + auto to_host = [stream](auto const& col) { if (col.is_empty()) return std::vector{}; auto const scv = cudf::strings_column_view(col); auto const h_chars = cudf::detail::make_std_vector_sync( @@ -763,19 +766,6 @@ std::pair, std::vector> device_json_co // TODO how about directly storing pair in json_column? auto offset_length_it = thrust::make_zip_iterator(json_col.string_offsets.begin(), json_col.string_lengths.begin()); - // Prepare iterator that returns (string_offset, string_length)-pairs needed by inference - auto string_ranges_it = - thrust::make_transform_iterator(offset_length_it, [] __device__(auto ip) { - return thrust::pair{ - thrust::get<0>(ip), static_cast(thrust::get<1>(ip))}; - }); - - // Prepare iterator that returns (string_ptr, string_length)-pairs needed by type conversion - auto string_spans_it = thrust::make_transform_iterator( - offset_length_it, [data = d_input.data()] __device__(auto ip) { - return thrust::pair{ - data + thrust::get<0>(ip), static_cast(thrust::get<1>(ip))}; - }); data_type target_type{}; @@ -790,12 +780,13 @@ std::pair, std::vector> device_json_co // Infer column type, if we don't have an explicit type for it else { target_type = cudf::io::detail::infer_data_type( - options.json_view(), d_input, string_ranges_it, col_size, stream); + options.json_view(), d_input, offset_length_it, col_size, stream); } auto [result_bitmask, null_count] = make_validity(json_col); // Convert strings to the inferred data type - auto col = parse_data(string_spans_it, + auto col = parse_data(d_input.data(), + offset_length_it, col_size, target_type, std::move(result_bitmask), diff --git a/cpp/src/io/json/nested_json_gpu.cu b/cpp/src/io/json/nested_json_gpu.cu index 0b49f97597d..06ac11485cb 100644 --- a/cpp/src/io/json/nested_json_gpu.cu +++ b/cpp/src/io/json/nested_json_gpu.cu @@ -19,14 +19,13 @@ #include #include #include -#include +#include #include #include #include #include #include -#include #include #include #include @@ -1949,20 +1948,6 @@ std::pair, std::vector> json_column_to auto offset_length_it = thrust::make_zip_iterator(d_string_offsets.begin(), d_string_lengths.begin()); - // Prepare iterator that returns (string_offset, string_length)-pairs needed by inference - auto string_ranges_it = - thrust::make_transform_iterator(offset_length_it, [] __device__(auto ip) { - return thrust::pair{ - thrust::get<0>(ip), static_cast(thrust::get<1>(ip))}; - }); - - // Prepare iterator that returns (string_ptr, string_length)-pairs needed by type conversion - auto string_spans_it = thrust::make_transform_iterator( - offset_length_it, [data = d_input.data()] __device__(auto ip) { - return thrust::pair{ - data + thrust::get<0>(ip), static_cast(thrust::get<1>(ip))}; - }); - data_type target_type{}; if (schema.has_value()) { @@ -1978,7 +1963,7 @@ std::pair, std::vector> json_column_to target_type = cudf::io::detail::infer_data_type(parsing_options(options, stream).json_view(), d_input, - string_ranges_it, + offset_length_it, col_size, stream); } @@ -1986,7 +1971,8 @@ std::pair, std::vector> json_column_to auto [result_bitmask, null_count] = make_validity(json_col); // Convert strings to the inferred data type - auto col = parse_data(string_spans_it, + auto col = parse_data(d_input.data(), + offset_length_it, col_size, target_type, std::move(result_bitmask), diff --git a/cpp/src/io/json/write_json.cu b/cpp/src/io/json/write_json.cu index 1e44522ed33..2d363c51fce 100644 --- a/cpp/src/io/json/write_json.cu +++ b/cpp/src/io/json/write_json.cu @@ -20,6 +20,7 @@ */ #include +#include #include #include @@ -27,9 +28,9 @@ #include #include #include +#include #include #include -#include #include #include #include diff --git a/cpp/src/io/utilities/data_casting.cu b/cpp/src/io/utilities/data_casting.cu new file mode 100644 index 00000000000..1772e5e43fa --- /dev/null +++ b/cpp/src/io/utilities/data_casting.cu @@ -0,0 +1,987 @@ +/* + * Copyright (c) 2022-2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include + +namespace cudf::io::json::detail { + +constexpr auto SINGLE_THREAD_THRESHOLD = 128; +constexpr auto WARP_THRESHOLD = 128 * 128; // 16K + +// Unicode code point escape sequence +static constexpr char UNICODE_SEQ = 0x7F; + +// Invalid escape sequence +static constexpr char NON_ESCAPE_CHAR = 0x7E; + +// Unicode code point escape sequence prefix comprises '\' and 'u' characters +static constexpr size_type UNICODE_ESC_PREFIX = 2; + +// Unicode code point escape sequence comprises four hex characters +static constexpr size_type UNICODE_HEX_DIGIT_COUNT = 4; + +// A unicode code point escape sequence is \uXXXX +static auto constexpr NUM_UNICODE_ESC_SEQ_CHARS = UNICODE_ESC_PREFIX + UNICODE_HEX_DIGIT_COUNT; + +static constexpr auto UTF16_HIGH_SURROGATE_BEGIN = 0xD800; +static constexpr auto UTF16_HIGH_SURROGATE_END = 0xDC00; +static constexpr auto UTF16_LOW_SURROGATE_BEGIN = 0xDC00; +static constexpr auto UTF16_LOW_SURROGATE_END = 0xE000; + +/** + * @brief Describing whether data casting of a certain item succeed, the item was parsed to null, or + * whether type casting failed. + */ +enum class data_casting_result { PARSING_SUCCESS, PARSED_TO_NULL, PARSING_FAILURE }; + +/** + * @brief Providing additional information about the type casting result. + */ +struct data_casting_result_info { + // Number of bytes written to output + size_type bytes; + // Whether parsing succeeded, item was parsed to null, or failed + data_casting_result result; +}; + +/** + * @brief Returns the character to output for a given escaped character that's following a + * backslash. + * + * @param escaped_char The character following the backslash. + * @return The character to output for a given character that's following a backslash + */ +__device__ __forceinline__ char get_escape_char(char escaped_char) +{ + switch (escaped_char) { + case '"': return '"'; + case '\\': return '\\'; + case '/': return '/'; + case 'b': return '\b'; + case 'f': return '\f'; + case 'n': return '\n'; + case 'r': return '\r'; + case 't': return '\t'; + case 'u': return UNICODE_SEQ; + default: return NON_ESCAPE_CHAR; + } +} + +/** + * @brief Parses the hex value from the four hex digits of a unicode code point escape sequence + * \uXXXX. + * + * @param str Pointer to the first (most-significant) hex digit + * @return The parsed hex value if successful, -1 otherwise. + */ +__device__ __forceinline__ int32_t parse_unicode_hex(char const* str) +{ + // Prepare result + int32_t result = 0, base = 1; + constexpr int32_t hex_radix = 16; + + // Iterate over hex digits right-to-left + size_type index = UNICODE_HEX_DIGIT_COUNT; + while (index-- > 0) { + char const ch = str[index]; + if (ch >= '0' && ch <= '9') { + result += static_cast((ch - '0') + 0) * base; + base *= hex_radix; + } else if (ch >= 'A' && ch <= 'F') { + result += static_cast((ch - 'A') + 10) * base; + base *= hex_radix; + } else if (ch >= 'a' && ch <= 'f') { + result += static_cast((ch - 'a') + 10) * base; + base *= hex_radix; + } else { + return -1; + } + } + return result; +} + +/** + * @brief Writes the UTF-8 byte sequence to \p out_it and returns the number of bytes written to + * \p out_it + */ +constexpr size_type write_utf8_char(char_utf8 character, char*& out_it) +{ + auto const bytes = (out_it == nullptr) ? strings::detail::bytes_in_char_utf8(character) + : strings::detail::from_char_utf8(character, out_it); + if (out_it) out_it += bytes; + return bytes; +} + +/** + * @brief Processes a string, replaces escape sequences and optionally strips off the quote + * characters. + * + * @tparam in_iterator_t A bidirectional input iterator type whose value_type is convertible to + * char + * @param in_begin Iterator to the first item to process + * @param in_end Iterator to one past the last item to process + * @param d_buffer Output character buffer to the first item to write + * @param options Settings for controlling string processing behavior + * @return A struct of (num_bytes_written, parsing_success_result), where num_bytes_written is + * the number of bytes written to d_buffer, parsing_success_result is enum value indicating whether + * parsing succeeded, item was parsed to null, or failed. + */ +template +__device__ __forceinline__ data_casting_result_info +process_string(in_iterator_t in_begin, + in_iterator_t in_end, + char* d_buffer, + cudf::io::parse_options_view const& options) +{ + int32_t bytes = 0; + auto const num_in_chars = thrust::distance(in_begin, in_end); + // String values are indicated by keeping the quote character + bool const is_string_value = + num_in_chars >= 2LL && + (options.quotechar == '\0' || + (*in_begin == options.quotechar) && (*thrust::prev(in_end) == options.quotechar)); + + // Copy literal/numeric value + if (not is_string_value) { + bytes += (in_end - in_begin); + if (d_buffer) d_buffer = thrust::copy(thrust::seq, in_begin, in_end, d_buffer); + return {bytes, data_casting_result::PARSING_SUCCESS}; + } + char constexpr backslash_char = '\\'; + + // Escape-flag, set after encountering a backslash character + bool is_prev_char_escape = false; + + // Exclude beginning and ending quote chars from string range + if (!options.keepquotes) { + ++in_begin; + --in_end; + } + + // Iterate over the input + while (in_begin != in_end) { + // Copy single character to output + if (!is_prev_char_escape) { + is_prev_char_escape = (*in_begin == backslash_char); + if (!is_prev_char_escape) { + if (d_buffer) *d_buffer++ = *in_begin; + ++bytes; + } + ++in_begin; + continue; + } + + // Previous char indicated beginning of escape sequence + // Reset escape flag for next loop iteration + is_prev_char_escape = false; + + // Check the character that is supposed to be escaped + auto escaped_char = get_escape_char(*in_begin); + + // We escaped an invalid escape character -> "fail"/null for this item + if (escaped_char == NON_ESCAPE_CHAR) { return {bytes, data_casting_result::PARSING_FAILURE}; } + + // Regular, single-character escape + if (escaped_char != UNICODE_SEQ) { + if (d_buffer) *d_buffer++ = escaped_char; + ++bytes; + ++in_begin; + continue; + } + + // This is an escape sequence of a unicode code point: \uXXXX, + // where each X in XXXX represents a hex digit + // Skip over the 'u' char from \uXXXX to the first hex digit + ++in_begin; + + // Make sure that there's at least 4 characters left from the + // input, which are expected to be hex digits + if (thrust::distance(in_begin, in_end) < UNICODE_HEX_DIGIT_COUNT) { + return {bytes, data_casting_result::PARSING_FAILURE}; + } + + auto hex_val = parse_unicode_hex(in_begin); + + // Couldn't parse hex values from the four-character sequence -> "fail"/null for this item + if (hex_val < 0) { return {bytes, data_casting_result::PARSING_FAILURE}; } + + // Skip over the four hex digits + thrust::advance(in_begin, UNICODE_HEX_DIGIT_COUNT); + + // If this may be a UTF-16 encoded surrogate pair: + // we expect another \uXXXX sequence + int32_t hex_low_val = 0; + if (hex_val >= UTF16_HIGH_SURROGATE_BEGIN && hex_val < UTF16_HIGH_SURROGATE_END && + thrust::distance(in_begin, in_end) >= NUM_UNICODE_ESC_SEQ_CHARS && + *in_begin == backslash_char && *thrust::next(in_begin) == 'u') { + // Try to parse hex value following the '\' and 'u' characters from what may be a UTF16 low + // surrogate + hex_low_val = parse_unicode_hex(thrust::next(in_begin, 2)); + } + + // This is indeed a UTF16 surrogate pair + if (hex_val >= UTF16_HIGH_SURROGATE_BEGIN && hex_val < UTF16_HIGH_SURROGATE_END && + hex_low_val >= UTF16_LOW_SURROGATE_BEGIN && hex_low_val < UTF16_LOW_SURROGATE_END) { + // Skip over the second \uXXXX sequence + thrust::advance(in_begin, NUM_UNICODE_ESC_SEQ_CHARS); + + // Compute UTF16-encoded code point + uint32_t unicode_code_point = 0x10000 + ((hex_val - UTF16_HIGH_SURROGATE_BEGIN) << 10) + + (hex_low_val - UTF16_LOW_SURROGATE_BEGIN); + auto utf8_chars = strings::detail::codepoint_to_utf8(unicode_code_point); + bytes += write_utf8_char(utf8_chars, d_buffer); + } else { + // Just a single \uXXXX sequence + auto utf8_chars = strings::detail::codepoint_to_utf8(hex_val); + bytes += write_utf8_char(utf8_chars, d_buffer); + } + } + + // The last character of the input is a backslash -> "fail"/null for this item + if (is_prev_char_escape) { return {bytes, data_casting_result::PARSING_FAILURE}; } + return {bytes, data_casting_result::PARSING_SUCCESS}; +} + +/** + * @brief Data structure to hold 1 bit per thread with previous `UNICODE_LOOK_BACK` bits stored in a + * warp. + * + * @tparam num_warps number of warps in the block + */ +template +struct bitfield_warp { + static constexpr auto UNICODE_LOOK_BACK{5}; + // 5 because for skipping unicode hex chars, look back up to 5 chars are needed. + // 5+32 for each warp. + bool is_slash[num_warps][UNICODE_LOOK_BACK + cudf::detail::warp_size]; + + /// Sets all bits to 0 + __device__ void reset(unsigned warp_id) + { + if (threadIdx.x % cudf::detail::warp_size < UNICODE_LOOK_BACK) { + is_slash[warp_id][threadIdx.x % cudf::detail::warp_size] = 0; + } + is_slash[warp_id][threadIdx.x % cudf::detail::warp_size + UNICODE_LOOK_BACK] = 0; + } + + /// Shifts UNICODE_LOOK_BACK bits to the left to hold the previous UNICODE_LOOK_BACK bits + __device__ void shift(unsigned warp_id) + { + if (threadIdx.x % cudf::detail::warp_size < UNICODE_LOOK_BACK) + is_slash[warp_id][threadIdx.x % cudf::detail::warp_size] = + is_slash[warp_id][cudf::detail::warp_size + threadIdx.x % cudf::detail::warp_size]; + __syncwarp(); + } + + /// Each thread in a warp sets its own bit. + __device__ void set_bits(unsigned warp_id, bool is_escaping_backslash) + { + is_slash[warp_id][UNICODE_LOOK_BACK + threadIdx.x % cudf::detail::warp_size] = + is_escaping_backslash; + __syncwarp(); + } + + /// Each thread in a warp gets the requested bit. + __device__ bool get_bit(unsigned warp_id, int bit_index) + { + return is_slash[warp_id][UNICODE_LOOK_BACK + bit_index]; + } +}; + +/** + * @brief Data structure to hold 1 bit per thread with previous `UNICODE_LOOK_BACK` bits stored in a + * block. + * + * @tparam num_warps number of warps in the block + */ +template +struct bitfield_block { + static constexpr auto UNICODE_LOOK_BACK{5}; + // 5 because for skipping unicode hex chars, look back up to 5 chars are needed. + // 5 + num_warps*32 for entire block + bool is_slash[UNICODE_LOOK_BACK + num_warps * cudf::detail::warp_size]; + + /// Sets all bits to 0 + __device__ void reset(unsigned warp_id) + { + if (threadIdx.x < UNICODE_LOOK_BACK) { is_slash[threadIdx.x] = 0; } + is_slash[threadIdx.x + UNICODE_LOOK_BACK] = 0; + } + + /// Shifts UNICODE_LOOK_BACK bits to the left to hold the previous UNICODE_LOOK_BACK bits + __device__ void shift(unsigned warp_id) + { + if (threadIdx.x < UNICODE_LOOK_BACK) + is_slash[threadIdx.x] = is_slash[num_warps * cudf::detail::warp_size + threadIdx.x]; + __syncthreads(); + } + + /// Each thread in a block sets its own bit. + __device__ void set_bits(unsigned warp_id, bool is_escaping_backslash) + { + is_slash[UNICODE_LOOK_BACK + threadIdx.x] = is_escaping_backslash; + __syncthreads(); + } + + /// Each thread in a block gets the requested bit. + __device__ bool get_bit(unsigned warp_id, int bit_index) + { + return is_slash[UNICODE_LOOK_BACK + bit_index]; + } +}; + +// Algorithm: warp/block parallel version of string_parse and process_string() +// Decoding character classes (u8, u16, \*, *): +// character count: input->output +// \uXXXX 6->2/3/4 +// \uXXXX\uXXXX 12->2/3/4 +// \" 2->1 +// * 1->1 +// +// ERROR conditions. (all collaborating threads quit) +// c=='\' & curr_idx == end_idx-1; +// [c-1]=='\' & get_escape[c]==NEC +// [c-1]=='\' & [c]=='u' & end_idx-curr_idx < UNICODE_HEX_DIGIT_COUNT +// [c-1]=='\' & [c]=='u' & end_idx-curr_idx >= UNICODE_HEX_DIGIT_COUNT && non-hex +// +// skip conditions. (current thread skips this char, no output) +// c=='\' skip. (Escaping char only) +// [c-2]=='\' && [c-1]=='u' for [2,1], [3,2] [4,5], [5, 6], skip. +// +// write conditions. (write to d_buffer) +// [c-1]!='\' & [c]!='\' write [c] +// [c-1]!='\' & [c]=='\' skip (already covered in skip conditions) +// [c-1]=='\' & [c]!=NEC && [c]!=UNICODE_SEQ, write [c] +// [c-1]=='\' & [c]=='u' & end_idx-curr_idx >= UNICODE_HEX_DIGIT_COUNT && hex, DECODE +// [c+1:4]=curr_hex_val +// // if [c+5]=='\' & [c+6]=='u' & end_idx-curr_idx >= UNICODE_HEX_DIGIT_COUNT && +// hex,DECODE [c+7:4]=next_hex_val +// // if [c-7]=='\' & [c-6]=='u' & end_idx-curr_idx >= UNICODE_HEX_DIGIT_COUNT && +// hex,DECODE [c-5:4]=prev_hex_val prev_hex_val, curr_hex_val, next_hex_val +// // if prev_hex_val in high, curr_hex_val in low, skip. +// // if curr_hex_val in high, next_hex_val in low, write [u16] +// if curr_hex_val not in high, write [u8] +// before writing, find num of output characters per threads, +// then do intra-warp/intra-block scan for out_idx +// propagate offset from next iteration to carry forward. +// Uses 1 warp per string or 1 block per string + +/** + * @brief Warp/Block parallel version of string_parse functor + * + * @tparam is_warp True if 1 warp per string, False if 1 block per string + * @tparam num_warps Number of warps per block + * @tparam str_tuple_it Iterator type for tuple with string pointer and its length + * @param str_tuples iterator of tuple with string pointer and its length + * @param total_out_strings Number of string rows to be processed + * @param str_counter Counter to keep track of processed number of strings + * @param null_mask Null mask + * @param null_count_data pointer to store null count + * @param options Settings for controlling string processing behavior + * @param d_offsets Offsets to identify where to store the results for each string + * @param d_chars Character array to store the characters of strings + */ +template +__global__ void parse_fn_string_parallel(str_tuple_it str_tuples, + size_type total_out_strings, + size_type* str_counter, + bitmask_type* null_mask, + size_type* null_count_data, + cudf::io::parse_options_view const options, + size_type* d_offsets, + char* d_chars) +{ + constexpr auto BLOCK_SIZE = + is_warp ? cudf::detail::warp_size : cudf::detail::warp_size * num_warps; + size_type lane = is_warp ? (threadIdx.x % BLOCK_SIZE) : threadIdx.x; + + // get 1-string index per warp/block + auto get_next_string = [&]() { + if constexpr (is_warp) { + size_type istring; + if (lane == 0) { istring = atomicAdd(str_counter, 1); } + return __shfl_sync(0xffffffff, istring, 0); + } else { + // Ensure lane 0 doesn't update istring before all threads have read the previous iteration's + // istring value + __syncthreads(); + __shared__ size_type istring; + if (lane == 0) { istring = atomicAdd(str_counter, 1); } + __syncthreads(); + return istring; + } + }; + // grid-stride loop. + for (size_type istring = get_next_string(); istring < total_out_strings; + istring = get_next_string()) { + // skip nulls + if (null_mask != nullptr && not bit_is_set(null_mask, istring)) { + if (!d_chars && lane == 0) d_offsets[istring] = 0; + continue; // gride-stride return; + } + + auto in_begin = str_tuples[istring].first; + auto in_end = in_begin + str_tuples[istring].second; + auto const num_in_chars = str_tuples[istring].second; + if constexpr (is_warp) { + if (num_in_chars <= SINGLE_THREAD_THRESHOLD or num_in_chars > WARP_THRESHOLD) continue; + } else { + if (num_in_chars <= WARP_THRESHOLD) continue; + } + + // Check if the value corresponds to the null literal + if (!d_chars) { + auto const is_null_literal = serialized_trie_contains( + options.trie_na, {in_begin, static_cast(num_in_chars)}); + if (is_null_literal && null_mask != nullptr) { + if (lane == 0) { + clear_bit(null_mask, istring); + atomicAdd(null_count_data, 1); + if (!d_chars) d_offsets[istring] = 0; + } + continue; // gride-stride return; + } + } + // String values are indicated by keeping the quote character + bool const is_string_value = + num_in_chars >= 2LL && + (options.quotechar == '\0' || + (*in_begin == options.quotechar) && (*thrust::prev(in_end) == options.quotechar)); + char* d_buffer = d_chars ? d_chars + d_offsets[istring] : nullptr; + + // Copy literal/numeric value + if (not is_string_value) { + if (!d_chars) { + if (lane == 0) { d_offsets[istring] = in_end - in_begin; } + } else { + for (thread_index_type char_index = lane; char_index < (in_end - in_begin); + char_index += BLOCK_SIZE) { + d_buffer[char_index] = in_begin[char_index]; + } + } + continue; // gride-stride return; + } + + // Exclude beginning and ending quote chars from string range + if (!options.keepquotes) { + ++in_begin; + --in_end; + } + // warp-parallelized or block-parallelized process_string() + + auto is_hex = [](auto ch) { + return (ch >= '0' && ch <= '9') || (ch >= 'A' && ch <= 'F') || (ch >= 'a' && ch <= 'f'); + }; + + // for backslash scan calculation: is_previous_escaping_backslash + [[maybe_unused]] auto warp_id = threadIdx.x / cudf::detail::warp_size; + bool init_state_reg; + __shared__ bool init_state_shared; + size_type last_offset_reg; + __shared__ size_type last_offset_shared; + bool& init_state(is_warp ? init_state_reg : init_state_shared); + size_type& last_offset(is_warp ? last_offset_reg : last_offset_shared); + if (is_warp || lane == 0) { + init_state = false; + last_offset = 0; + } + using bitfield = + std::conditional_t, bitfield_block>; + __shared__ bitfield is_slash; + is_slash.reset(warp_id); + __syncthreads(); + // 0-31, 32-63, ... i*32-n. + // entire warp executes but with mask. + for (thread_index_type char_index = lane; + char_index < cudf::util::round_up_safe(in_end - in_begin, static_cast(BLOCK_SIZE)); + char_index += BLOCK_SIZE) { + bool const is_within_bounds = char_index < (in_end - in_begin); + auto const MASK = is_warp ? __ballot_sync(0xffffffff, is_within_bounds) : 0xffffffff; + auto const c = is_within_bounds ? in_begin[char_index] : '\0'; + auto const prev_c = (char_index > 0 and is_within_bounds) ? in_begin[char_index - 1] : '\0'; + auto const escaped_char = get_escape_char(c); + + bool is_escaping_backslash{false}; + [[maybe_unused]] bool is_prev_escaping_backslash{false}; + // To check current is backslash by checking if previous is backslash. + // curr = !prev & c=='\\' + // So, scan is required from beginning of string. + // State table approach (intra-warp FST) (intra-block FST) + // 2 states: Not-Slash(NS), Slash(S). + // prev / * + // NS S NS + // S NS NS + // After inclusive scan, all current S states translate to escaping backslash. + // All escaping backslash should be skipped. + + struct state_table { + // using bit fields instead of state[2] + bool state0 : 1; + bool state1 : 1; + bool inline __device__ get(bool init_state) const { return init_state ? state1 : state0; } + }; + state_table curr{is_within_bounds && c == '\\', false}; // state transition vector. + auto composite_op = [](state_table op1, state_table op2) { + // equivalent of state_table{op2.state[op1.state[0]], op2.state[op1.state[1]]}; + return state_table{op1.state0 ? op2.state1 : op2.state0, + op1.state1 ? op2.state1 : op2.state0}; + }; + state_table scanned; + // inclusive scan of escaping backslashes + if constexpr (is_warp) { + using SlashScan = cub::WarpScan; + __shared__ typename SlashScan::TempStorage temp_slash[num_warps]; + SlashScan(temp_slash[warp_id]).InclusiveScan(curr, scanned, composite_op); + is_escaping_backslash = scanned.get(init_state); + init_state = __shfl_sync(MASK, is_escaping_backslash, BLOCK_SIZE - 1); + __syncwarp(); + is_slash.shift(warp_id); + is_slash.set_bits(warp_id, is_escaping_backslash); + is_prev_escaping_backslash = is_slash.get_bit(warp_id, lane - 1); + } else { + using SlashScan = cub::BlockScan; + __shared__ typename SlashScan::TempStorage temp_slash; + SlashScan(temp_slash).InclusiveScan(curr, scanned, composite_op); + is_escaping_backslash = scanned.get(init_state); + __syncthreads(); + if (threadIdx.x == BLOCK_SIZE - 1) init_state = is_escaping_backslash; + __syncthreads(); + is_slash.shift(warp_id); + is_slash.set_bits(warp_id, is_escaping_backslash); + is_prev_escaping_backslash = is_slash.get_bit(warp_id, lane - 1); + // There is another __syncthreads() at the end of for-loop. + } + + // String with parsing errors are made as null + bool error = false; + if (is_within_bounds) { + // curr=='\' and end, or prev=='\' and curr=='u' and end-curr < UNICODE_HEX_DIGIT_COUNT + // or prev=='\' and curr=='u' and end-curr >= UNICODE_HEX_DIGIT_COUNT and any non-hex + error |= (is_escaping_backslash /*c == '\\'*/ && char_index == (in_end - in_begin) - 1); + error |= (is_prev_escaping_backslash && escaped_char == NON_ESCAPE_CHAR); + error |= (is_prev_escaping_backslash && c == 'u' && + ((in_begin + char_index + UNICODE_HEX_DIGIT_COUNT >= in_end) | + !is_hex(in_begin[char_index + 1]) | !is_hex(in_begin[char_index + 2]) | + !is_hex(in_begin[char_index + 3]) | !is_hex(in_begin[char_index + 4]))); + } + // Make sure all threads have no errors before continuing + if constexpr (is_warp) { + error = __any_sync(MASK, error); + } else { + using ErrorReduce = cub::BlockReduce; + __shared__ typename ErrorReduce::TempStorage temp_storage_error; + __shared__ bool error_reduced; + error_reduced = ErrorReduce(temp_storage_error).Sum(error); // TODO use cub::LogicalOR. + // only valid in thread0, so shared memory is used for broadcast. + __syncthreads(); + error = error_reduced; + } + // If any thread has an error, skip the rest of the string and make this string as null + if (error) { + if (!d_chars && lane == 0) { + if (null_mask != nullptr) { + clear_bit(null_mask, istring); + atomicAdd(null_count_data, 1); + } + last_offset = 0; + d_offsets[istring] = 0; + } + if constexpr (!is_warp) { __syncthreads(); } + break; // gride-stride return; + } + + // Skipping non-copied escaped characters + bool skip = !is_within_bounds; // false; + // skip \ for \" \\ \/ \b \f \n \r \t \uXXXX + skip |= is_escaping_backslash; + if (is_within_bounds) { + // skip X for each X in \uXXXX + skip |= + char_index >= 2 && is_slash.get_bit(warp_id, lane - 2) && in_begin[char_index - 1] == 'u'; + skip |= + char_index >= 3 && is_slash.get_bit(warp_id, lane - 3) && in_begin[char_index - 2] == 'u'; + skip |= + char_index >= 4 && is_slash.get_bit(warp_id, lane - 4) && in_begin[char_index - 3] == 'u'; + skip |= + char_index >= 5 && is_slash.get_bit(warp_id, lane - 5) && in_begin[char_index - 4] == 'u'; + } + int this_num_out = 0; + cudf::char_utf8 write_char{}; + + if (!skip) { + // 1. Unescaped character + if (!is_prev_escaping_backslash) { + this_num_out = 1; + // writes char directly for non-unicode + } else { + // 2. Escaped character + if (escaped_char != UNICODE_SEQ) { + this_num_out = 1; + // writes char directly for non-unicode + } else { + // 3. Unicode + // UTF8 \uXXXX + auto hex_val = parse_unicode_hex(in_begin + char_index + 1); + auto hex_low_val = 0; + // UTF16 \uXXXX\uXXXX + // Note: no need for scanned_backslash below because we already know that + // only '\u' check is enough. + if (hex_val >= UTF16_HIGH_SURROGATE_BEGIN && hex_val < UTF16_HIGH_SURROGATE_END && + (in_begin + char_index + UNICODE_HEX_DIGIT_COUNT + NUM_UNICODE_ESC_SEQ_CHARS) < + in_end && + in_begin[char_index + NUM_UNICODE_ESC_SEQ_CHARS - 1] == '\\' && + in_begin[char_index + NUM_UNICODE_ESC_SEQ_CHARS] == 'u') { + hex_low_val = parse_unicode_hex(in_begin + char_index + 1 + 6); + } + if (hex_val >= UTF16_HIGH_SURROGATE_BEGIN && hex_val < UTF16_HIGH_SURROGATE_END && + hex_low_val >= UTF16_LOW_SURROGATE_BEGIN && hex_low_val < UTF16_LOW_SURROGATE_END) { + // Compute UTF16-encoded code point + uint32_t unicode_code_point = 0x10000 + + ((hex_val - UTF16_HIGH_SURROGATE_BEGIN) << 10) + + (hex_low_val - UTF16_LOW_SURROGATE_BEGIN); + write_char = strings::detail::codepoint_to_utf8(unicode_code_point); + this_num_out = strings::detail::bytes_in_char_utf8(write_char); + } else { + // if hex_val is high surrogate, ideally it should be parsing failure. + // but skipping it as other parsers do this too. + if (hex_val >= UTF16_LOW_SURROGATE_BEGIN && hex_val < UTF16_LOW_SURROGATE_END) { + // Ideally this should be skipped if previous char is high surrogate. + skip = true; + this_num_out = 0; + write_char = 0; + } else { + // if UTF8 + write_char = strings::detail::codepoint_to_utf8(hex_val); + this_num_out = strings::detail::bytes_in_char_utf8(write_char); + } + } + } + } + } // !skip end. + { + // compute offset to write output for each thread + size_type offset; + if constexpr (is_warp) { + using OffsetScan = cub::WarpScan; + __shared__ typename OffsetScan::TempStorage temp_storage[num_warps]; + OffsetScan(temp_storage[warp_id]).ExclusiveSum(this_num_out, offset); + } else { + using OffsetScan = cub::BlockScan; + __shared__ typename OffsetScan::TempStorage temp_storage; + OffsetScan(temp_storage).ExclusiveSum(this_num_out, offset); + __syncthreads(); + } + offset += last_offset; + // Write output + if (d_chars && !skip) { + auto const is_not_unicode = (!is_prev_escaping_backslash) || escaped_char != UNICODE_SEQ; + if (is_not_unicode) { + *(d_buffer + offset) = (!is_prev_escaping_backslash) ? c : escaped_char; + } else { + strings::detail::from_char_utf8(write_char, d_buffer + offset); + } + } + offset += this_num_out; + if constexpr (is_warp) { + last_offset = __shfl_sync(0xffffffff, offset, BLOCK_SIZE - 1); + } else { + __syncthreads(); + if (threadIdx.x == BLOCK_SIZE - 1) last_offset = offset; + __syncthreads(); + } + } + } // char for-loop + if (!d_chars && lane == 0) { d_offsets[istring] = last_offset; } + } // grid-stride for-loop +} + +template +struct string_parse { + str_tuple_it str_tuples; + bitmask_type* null_mask; + size_type* null_count_data; + cudf::io::parse_options_view const options; + size_type* d_offsets{}; + char* d_chars{}; + + __device__ void operator()(size_type idx) + { + if (null_mask != nullptr && not bit_is_set(null_mask, idx)) { + if (!d_chars) d_offsets[idx] = 0; + return; + } + auto const in_begin = str_tuples[idx].first; + auto const in_end = in_begin + str_tuples[idx].second; + auto const num_in_chars = str_tuples[idx].second; + + if (num_in_chars > SINGLE_THREAD_THRESHOLD) return; + + // Check if the value corresponds to the null literal + if (!d_chars) { + auto const is_null_literal = serialized_trie_contains( + options.trie_na, {in_begin, static_cast(num_in_chars)}); + if (is_null_literal && null_mask != nullptr) { + clear_bit(null_mask, idx); + atomicAdd(null_count_data, 1); + if (!d_chars) d_offsets[idx] = 0; + return; + } + } + + char* d_buffer = d_chars ? d_chars + d_offsets[idx] : nullptr; + auto str_process_info = process_string(in_begin, in_end, d_buffer, options); + if (str_process_info.result != data_casting_result::PARSING_SUCCESS) { + if (null_mask != nullptr) { + clear_bit(null_mask, idx); + atomicAdd(null_count_data, 1); + } + if (!d_chars) d_offsets[idx] = 0; + } else { + if (!d_chars) d_offsets[idx] = str_process_info.bytes; + } + } +}; + +template +struct to_string_view_pair { + SymbolT const* data; + to_string_view_pair(SymbolT const* _data) : data(_data) {} + __device__ auto operator()(thrust::tuple ip) + { + return thrust::pair{data + thrust::get<0>(ip), + static_cast(thrust::get<1>(ip))}; + } +}; + +template +static std::unique_ptr parse_string(string_view_pair_it str_tuples, + size_type col_size, + rmm::device_buffer&& null_mask, + rmm::device_scalar& d_null_count, + cudf::io::parse_options_view const& options, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + // CUDF_FUNC_RANGE(); + + auto const max_length = thrust::transform_reduce( + rmm::exec_policy(stream), + str_tuples, + str_tuples + col_size, + [] __device__(auto t) { return t.second; }, + size_type{0}, + thrust::maximum{}); + + auto offsets = cudf::make_numeric_column( + data_type{type_to_id()}, col_size + 1, cudf::mask_state::UNALLOCATED, stream, mr); + auto d_offsets = offsets->mutable_view().data(); + auto null_count_data = d_null_count.data(); + + auto single_thread_fn = string_parse{ + str_tuples, static_cast(null_mask.data()), null_count_data, options, d_offsets}; + thrust::for_each_n(rmm::exec_policy(stream), + thrust::make_counting_iterator(0), + col_size, + single_thread_fn); + + constexpr auto warps_per_block = 8; + constexpr int threads_per_block = cudf::detail::warp_size * warps_per_block; + auto num_blocks = cudf::util::div_rounding_up_safe(col_size, warps_per_block); + auto str_counter = cudf::numeric_scalar(size_type{0}, true, stream); + + // TODO run these independent kernels in parallel streams. + if (max_length > SINGLE_THREAD_THRESHOLD) { + parse_fn_string_parallel + <<>>( + str_tuples, + col_size, + str_counter.data(), + static_cast(null_mask.data()), + null_count_data, + options, + d_offsets, + nullptr); + } + + if (max_length > WARP_THRESHOLD) { + // for strings longer than WARP_THRESHOLD, 1 block per string + str_counter.set_value(0, stream); + parse_fn_string_parallel + <<>>( + str_tuples, + col_size, + str_counter.data(), + static_cast(null_mask.data()), + null_count_data, + options, + d_offsets, + nullptr); + } + auto const bytes = + cudf::detail::sizes_to_offsets(d_offsets, d_offsets + col_size + 1, d_offsets, stream); + CUDF_EXPECTS(bytes <= std::numeric_limits::max(), + "Size of output exceeds the column size limit", + std::overflow_error); + + // CHARS column + std::unique_ptr chars = + strings::detail::create_chars_child_column(static_cast(bytes), stream, mr); + auto d_chars = chars->mutable_view().data(); + + single_thread_fn.d_chars = d_chars; + thrust::for_each_n(rmm::exec_policy(stream), + thrust::make_counting_iterator(0), + col_size, + single_thread_fn); + + if (max_length > SINGLE_THREAD_THRESHOLD) { + str_counter.set_value(0, stream); + parse_fn_string_parallel + <<>>( + str_tuples, + col_size, + str_counter.data(), + static_cast(null_mask.data()), + null_count_data, + options, + d_offsets, + d_chars); + } + + if (max_length > WARP_THRESHOLD) { + str_counter.set_value(0, stream); + // for strings longer than WARP_THRESHOLD, 1 block per string + parse_fn_string_parallel + <<>>( + str_tuples, + col_size, + str_counter.data(), + static_cast(null_mask.data()), + null_count_data, + options, + d_offsets, + d_chars); + } + + return make_strings_column(col_size, + std::move(offsets), + std::move(chars), + d_null_count.value(stream), + std::move(null_mask)); +} + +std::unique_ptr parse_data( + const char* data, + thrust::zip_iterator> offset_length_begin, + size_type col_size, + data_type col_type, + rmm::device_buffer&& null_mask, + size_type null_count, + cudf::io::parse_options_view const& options, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + + if (col_size == 0) { return make_empty_column(col_type); } + auto d_null_count = rmm::device_scalar(null_count, stream); + auto null_count_data = d_null_count.data(); + + // Prepare iterator that returns (string_ptr, string_length)-pairs needed by type conversion + auto str_tuples = thrust::make_transform_iterator(offset_length_begin, to_string_view_pair{data}); + + if (col_type == cudf::data_type{cudf::type_id::STRING}) { + return parse_string(str_tuples, + col_size, + std::forward(null_mask), + d_null_count, + options, + stream, + mr); + } + + auto out_col = + make_fixed_width_column(col_type, col_size, std::move(null_mask), null_count, stream, mr); + auto output_dv_ptr = mutable_column_device_view::create(*out_col, stream); + + // use `ConvertFunctor` to convert non-string values + thrust::for_each_n( + rmm::exec_policy(stream), + thrust::make_counting_iterator(0), + col_size, + [str_tuples, col = *output_dv_ptr, options, col_type, null_count_data] __device__( + size_type row) { + if (col.is_null(row)) { return; } + auto const in = str_tuples[row]; + + auto const is_null_literal = + serialized_trie_contains(options.trie_na, {in.first, static_cast(in.second)}); + + if (is_null_literal) { + col.set_null(row); + atomicAdd(null_count_data, 1); + return; + } + + // If this is a string value, remove quotes + auto [in_begin, in_end] = trim_quotes(in.first, in.first + in.second, options.quotechar); + + auto const is_parsed = cudf::type_dispatcher(col_type, + ConvertFunctor{}, + in_begin, + in_end, + col.data(), + row, + col_type, + options, + false); + if (not is_parsed) { + col.set_null(row); + atomicAdd(null_count_data, 1); + } + }); + + out_col->set_null_count(d_null_count.value(stream)); + + return out_col; +} + +} // namespace cudf::io::json::detail diff --git a/cpp/src/io/utilities/parsing_utils.cuh b/cpp/src/io/utilities/parsing_utils.cuh index 5c3af588411..43d62fcd513 100644 --- a/cpp/src/io/utilities/parsing_utils.cuh +++ b/cpp/src/io/utilities/parsing_utils.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -116,6 +116,28 @@ struct parse_options { } }; +/** + * @brief Returns the escaped characters for a given character. + * + * @param escaped_char The character to escape. + * @return The escaped characters for a given character. + */ +__device__ __forceinline__ thrust::pair get_escaped_char(char escaped_char) +{ + switch (escaped_char) { + case '"': return {'\\', '"'}; + case '\\': return {'\\', '\\'}; + case '/': return {'\\', '/'}; + case '\b': return {'\\', 'b'}; + case '\f': return {'\\', 'f'}; + case '\n': return {'\\', 'n'}; + case '\r': return {'\\', 'r'}; + case '\t': return {'\\', 't'}; + // case 'u': return UNICODE_SEQ; + default: return {'\0', escaped_char}; + } +} + /** * @brief Returns the numeric value of an ASCII/UTF-8 character. * Handles hexadecimal digits, both uppercase and lowercase diff --git a/cpp/src/io/utilities/string_parsing.hpp b/cpp/src/io/utilities/string_parsing.hpp new file mode 100644 index 00000000000..12fc0a5b2e7 --- /dev/null +++ b/cpp/src/io/utilities/string_parsing.hpp @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include + +#include +#include + +#include + +#include +#include + +namespace cudf::io { +namespace detail { + +/** + * @brief Infers data type for a given JSON string input `data`. + * + * @throw cudf::logic_error if input size is 0 + * @throw cudf::logic_error if date time is not inferred as string + * @throw cudf::logic_error if data type inference failed + * + * @param options View of inference options + * @param data JSON string input + * @param offset_length_begin The beginning of an offset-length tuple sequence + * @param size Size of the string input + * @param stream CUDA stream used for device memory operations and kernel launches + * @return The inferred data type + */ +cudf::data_type infer_data_type( + cudf::io::json_inference_options_view const& options, + device_span data, + thrust::zip_iterator> offset_length_begin, + std::size_t const size, + rmm::cuda_stream_view stream); +} // namespace detail + +namespace json::detail { + +/** + * @brief Parses the data from an iterator of string views, casting it to the given target data type + * + * @param data string input base pointer + * @param offset_length_begin The beginning of an offset-length tuple sequence + * @param col_size The total number of items of this column + * @param col_type The column's target data type + * @param null_mask A null mask that renders certain items from the input invalid + * @param options Settings for controlling the processing behavior + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr The resource to be used for device memory allocation + * @return The column that contains the parsed data + */ +std::unique_ptr parse_data( + const char* data, + thrust::zip_iterator> offset_length_begin, + size_type col_size, + data_type col_type, + rmm::device_buffer&& null_mask, + size_type null_count, + cudf::io::parse_options_view const& options, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr); +} // namespace json::detail +} // namespace cudf::io diff --git a/cpp/src/io/utilities/type_inference.cuh b/cpp/src/io/utilities/type_inference.cu similarity index 84% rename from cpp/src/io/utilities/type_inference.cuh rename to cpp/src/io/utilities/type_inference.cu index a9ccc80ca33..79a5c8f1c4c 100644 --- a/cpp/src/io/utilities/type_inference.cuh +++ b/cpp/src/io/utilities/type_inference.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, NVIDIA CORPORATION. + * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,23 +13,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#pragma once #include -#include +#include #include #include -#include #include -#include -#include #include -#include -#include - #include #include @@ -114,14 +107,14 @@ __device__ __inline__ bool is_like_float(std::size_t len, * * @param[in] options View of inference options * @param[in] data JSON string input - * @param[in] column_strings_begin The beginning of an offset-length tuple sequence + * @param[in] offset_length_begin The beginning of an offset-length tuple sequence * @param[in] size Size of the string input * @param[out] column_info Histogram of column type counters */ template __global__ void infer_column_type_kernel(OptionsView options, device_span data, - ColumnStringIter column_strings_begin, + ColumnStringIter offset_length_begin, std::size_t size, cudf::io::column_type_histogram* column_info) { @@ -129,8 +122,8 @@ __global__ void infer_column_type_kernel(OptionsView options, for (auto idx = threadIdx.x + blockDim.x * blockIdx.x; idx < size; idx += gridDim.x * blockDim.x) { - auto const field_offset = thrust::get<0>(*(column_strings_begin + idx)); - auto const field_len = thrust::get<1>(*(column_strings_begin + idx)); + auto const field_offset = thrust::get<0>(*(offset_length_begin + idx)); + auto const field_len = thrust::get<1>(*(offset_length_begin + idx)); auto const field_begin = data.begin() + field_offset; if (cudf::detail::serialized_trie_contains( @@ -234,7 +227,7 @@ __global__ void infer_column_type_kernel(OptionsView options, * * @param options View of inference options * @param data JSON string input - * @param column_strings_begin The beginning of an offset-length tuple sequence + * @param offset_length_begin The beginning of an offset-length tuple sequence * @param size Size of the string input * @param stream CUDA stream used for device memory operations and kernel launches * @return A histogram containing column-specific type counters @@ -242,7 +235,7 @@ __global__ void infer_column_type_kernel(OptionsView options, template cudf::io::column_type_histogram infer_column_type(OptionsView const& options, cudf::device_span data, - ColumnStringIter column_strings_begin, + ColumnStringIter offset_length_begin, std::size_t const size, rmm::cuda_stream_view stream) { @@ -254,40 +247,22 @@ cudf::io::column_type_histogram infer_column_type(OptionsView const& options, d_column_info.data(), 0, sizeof(cudf::io::column_type_histogram), stream.value())); infer_column_type_kernel<<>>( - options, data, column_strings_begin, size, d_column_info.data()); + options, data, offset_length_begin, size, d_column_info.data()); return d_column_info.value(stream); } -/** - * @brief Infers data type for a given JSON string input `data`. - * - * @throw cudf::logic_error if input size is 0 - * @throw cudf::logic_error if date time is not inferred as string - * @throw cudf::logic_error if data type inference failed - * - * @tparam OptionsView Type of inference options view - * @tparam ColumnStringIter Iterator type whose `value_type` is convertible to - * `thrust::tuple` - * - * @param options View of inference options - * @param data JSON string input - * @param column_strings_begin The beginning of an offset-length tuple sequence - * @param size Size of the string input - * @param stream CUDA stream used for device memory operations and kernel launches - * @return The inferred data type - */ -template -cudf::data_type infer_data_type(OptionsView const& options, - device_span data, - ColumnStringIter column_strings_begin, - std::size_t const size, - rmm::cuda_stream_view stream) +cudf::data_type infer_data_type( + cudf::io::json_inference_options_view const& options, + device_span data, + thrust::zip_iterator> offset_length_begin, + std::size_t const size, + rmm::cuda_stream_view stream) { CUDF_FUNC_RANGE(); CUDF_EXPECTS(size != 0, "No data available for data type inference.\n"); - auto const h_column_info = infer_column_type(options, data, column_strings_begin, size, stream); + auto const h_column_info = infer_column_type(options, data, offset_length_begin, size, stream); auto get_type_id = [&](auto const& cinfo) { auto int_count_total = diff --git a/cpp/tests/io/json_test.cpp b/cpp/tests/io/json_test.cpp index 220f1a3391f..7c911ac2e04 100644 --- a/cpp/tests/io/json_test.cpp +++ b/cpp/tests/io/json_test.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -1370,6 +1371,124 @@ TEST_F(JsonReaderTest, JsonExperimentalLines) CUDF_TEST_EXPECT_TABLES_EQUAL(legacy_reader_table.tbl->view(), table.tbl->view()); } +TEST_F(JsonReaderTest, JsonLongString) +{ + // Unicode + // 0000-FFFF Basic Multilingual Plane + // 10000-10FFFF Supplementary Plane + cudf::test::strings_column_wrapper col1{ + { + "\"\\/\b\f\n\r\t", + "\"", + "\\", + "/", + "\b", + "\f\n", + "\r\t", + "$€", + "ராபிட்ஸ்", + "C𝞵𝓓𝒻", + "", // null + "", // null + "கார்த்தி", + "CႮ≪ㇳ䍏凹沦王辿龸ꁗ믜스폶ﴠ", // 0000-FFFF + "𐀀𑿪𒐦𓃰𔙆 𖦆𗿿𘳕𚿾[↳] 𜽆𝓚𞤁🄰", // 10000-1FFFF + "𠘨𡥌𢗉𣇊𤊩𥅽𦉱𧴱𨁲𩁹𪐢𫇭𬬭𭺷𮊦屮", // 20000-2FFFF + "𰾑𱔈𲍉", // 30000-3FFFF + R"("$€ \u0024\u20ac \\u0024\\u20ac \\\u0024\\\u20ac \\\\u0024\\\\u20ac)", + R"( \\\\\\\\\\\\\\\\)", + R"(\\\\\\\\\\\\\\\\)", + R"(\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\)", + R"( \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\)", + R"( \\abcd)", + R"( \\\\\\\\\\\\\\\\ \\\\\\\\\\\\\\\\)", + R"( \\\\\\\\\\\\\\\\ \\\\\\\\\\\\\\\\)", + }, + cudf::test::iterators::nulls_at({10, 11})}; + + cudf::test::fixed_width_column_wrapper repeat_times{ + {1, 2, 3, 4, 5, 6, 7, 8, 9, 13, 19, 37, 81, 161, 323, 631, 1279, 10, 1, 2, 1, 100, 1000, 1, 3}, + cudf::test::iterators::no_nulls()}; + auto d_col2 = cudf::strings::repeat_strings(cudf::strings_column_view{col1}, repeat_times); + auto col2 = d_col2->view(); + cudf::table_view const tbl_view{{col1, col2, repeat_times}}; + cudf::io::table_metadata mt{{{"col1"}, {"col2"}, {"int16"}}}; + + std::vector out_buffer; + auto destination = cudf::io::sink_info(&out_buffer); + auto options_builder = cudf::io::json_writer_options_builder(destination, tbl_view) + .include_nulls(true) + .metadata(mt) + .lines(true) + .na_rep("null"); + + cudf::io::write_json(options_builder.build(), rmm::mr::get_current_device_resource()); + + cudf::table_view const expected = tbl_view; + std::map types; + types["col1"] = data_type{type_id::STRING}; + types["col2"] = data_type{type_id::STRING}; + types["int16"] = data_type{type_id::INT16}; + + // Initialize parsing options (reading json lines) + cudf::io::json_reader_options json_lines_options = + cudf::io::json_reader_options::builder( + cudf::io::source_info{out_buffer.data(), out_buffer.size()}) + .lines(true) + .dtypes(types); + + // Read test data via nested JSON reader + auto const table = cudf::io::read_json(json_lines_options); + CUDF_TEST_EXPECT_TABLES_EQUAL(expected, table.tbl->view()); +} + +TEST_F(JsonReaderTest, ErrorStrings) +{ + // cases of invalid escape characters, invalid unicode encodings. + // Error strings will decode to nulls + auto const buffer = std::string{R"( + {"col0": "\"\a"} + {"col0": "\u"} + {"col0": "\u0"} + {"col0": "\u0b"} + {"col0": "\u00b"} + {"col0": "\u00bz"} + {"col0": "\t34567890123456\t9012345678901\ug0bc"} + {"col0": "\t34567890123456\t90123456789012\u0hbc"} + {"col0": "\t34567890123456\t90123456789012\u00ic"} + {"col0": "\u0b95\u0bbe\u0bb0\u0bcd\u0ba4\u0bcd\u0ba4\u0bbfகார்த்தி"} +)"}; + // Last one is not an error case, but shows that unicode in json is copied string column output. + + cudf::io::json_reader_options const in_opts = + cudf::io::json_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()}) + .dtypes({data_type{cudf::type_id::STRING}}) + .lines(true) + .legacy(false); + + auto const result = cudf::io::read_json(in_opts); + auto const result_view = result.tbl->view().column(0); + + EXPECT_EQ(result.metadata.schema_info[0].name, "col0"); + EXPECT_EQ(result_view.null_count(), 9); + cudf::test::strings_column_wrapper expected{ + {"", + "", + "", + "", + "", + "", + "", + "", + "", + "கார்த்தி\xe0\xae\x95\xe0\xae\xbe\xe0\xae\xb0\xe0\xaf\x8d\xe0\xae\xa4\xe0\xaf\x8d\xe0\xae\xa4" + "\xe0\xae\xbf"}, + // unicode hex 0xe0 0xae 0x95 0xe0 0xae 0xbe 0xe0 0xae 0xb0 0xe0 0xaf 0x8d + // 0xe0 0xae 0xa4 0xe0 0xaf 0x8d 0xe0 0xae 0xa4 0xe0 0xae 0xbf + cudf::test::iterators::nulls_at({0, 1, 2, 3, 4, 5, 6, 7, 8})}; + CUDF_TEST_EXPECT_COLUMNS_EQUAL(result_view, expected); +} + TEST_F(JsonReaderTest, TokenAllocation) { std::array const json_inputs{ diff --git a/cpp/tests/io/json_type_cast_test.cu b/cpp/tests/io/json_type_cast_test.cu index 5c32131114d..9eb5e8f5230 100644 --- a/cpp/tests/io/json_type_cast_test.cu +++ b/cpp/tests/io/json_type_cast_test.cu @@ -21,15 +21,20 @@ #include #include +#include + #include #include #include -#include #include #include #include #include +#include + +#include +#include #include using namespace cudf::test::iterators; @@ -37,13 +42,27 @@ using namespace cudf::test::iterators; struct JSONTypeCastTest : public cudf::test::BaseFixture {}; namespace { -struct to_thrust_pair_fn { - __device__ thrust::pair operator()( - thrust::pair const& p) +struct offsets_to_length { + __device__ cudf::size_type operator()(thrust::tuple const& p) { - return {p.first.data(), p.first.size_bytes()}; + return thrust::get<1>(p) - thrust::get<0>(p); } }; + +/// Returns length of each string in the column +auto string_offset_to_length(cudf::strings_column_view const& column, rmm::cuda_stream_view stream) +{ + auto offsets_begin = column.offsets_begin(); + auto offsets_pair = + thrust::make_zip_iterator(thrust::make_tuple(offsets_begin, thrust::next(offsets_begin))); + rmm::device_uvector svs_length(column.size(), stream); + thrust::transform(rmm::exec_policy(cudf::get_default_stream()), + offsets_pair, + offsets_pair + column.size(), + svs_length.begin(), + offsets_to_length{}); + return svs_length; +} } // namespace auto default_json_options() @@ -67,26 +86,23 @@ TEST_F(JSONTypeCastTest, String) std::vector input_values{"this", "is", "null", "of", "", "strings", R"("null")"}; cudf::test::strings_column_wrapper input(input_values.begin(), input_values.end(), in_valids); - auto d_column = cudf::column_device_view::create(input); - rmm::device_uvector> svs(d_column->size(), stream); - thrust::transform(rmm::exec_policy(cudf::get_default_stream()), - d_column->pair_begin(), - d_column->pair_end(), - svs.begin(), - to_thrust_pair_fn{}); + auto column = cudf::strings_column_view(input); + rmm::device_uvector svs_length = string_offset_to_length(column, stream); auto null_mask_it = no_nulls(); auto null_mask = - std::get<0>(cudf::test::detail::make_null_mask(null_mask_it, null_mask_it + d_column->size())); - - auto str_col = cudf::io::json::detail::parse_data(svs.data(), - svs.size(), - type, - std::move(null_mask), - 0, - default_json_options().view(), - stream, - mr); + std::get<0>(cudf::test::detail::make_null_mask(null_mask_it, null_mask_it + column.size())); + + auto str_col = cudf::io::json::detail::parse_data( + column.chars().data(), + thrust::make_zip_iterator(thrust::make_tuple(column.offsets_begin(), svs_length.begin())), + column.size(), + type, + std::move(null_mask), + 0, + default_json_options().view(), + stream, + mr); auto out_valids = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i != 2 and i != 4; }); @@ -103,26 +119,23 @@ TEST_F(JSONTypeCastTest, Int) auto const type = cudf::data_type{cudf::type_id::INT64}; cudf::test::strings_column_wrapper data({"1", "null", "3", "true", "5", "false"}); - auto d_column = cudf::column_device_view::create(data); - rmm::device_uvector> svs(d_column->size(), stream); - thrust::transform(rmm::exec_policy(cudf::get_default_stream()), - d_column->pair_begin(), - d_column->pair_end(), - svs.begin(), - to_thrust_pair_fn{}); + auto column = cudf::strings_column_view(data); + rmm::device_uvector svs_length = string_offset_to_length(column, stream); auto null_mask_it = no_nulls(); auto null_mask = - std::get<0>(cudf::test::detail::make_null_mask(null_mask_it, null_mask_it + d_column->size())); - - auto col = cudf::io::json::detail::parse_data(svs.data(), - svs.size(), - type, - std::move(null_mask), - 0, - default_json_options().view(), - stream, - mr); + std::get<0>(cudf::test::detail::make_null_mask(null_mask_it, null_mask_it + column.size())); + + auto col = cudf::io::json::detail::parse_data( + column.chars().data(), + thrust::make_zip_iterator(thrust::make_tuple(column.offsets_begin(), svs_length.begin())), + column.size(), + type, + std::move(null_mask), + 0, + default_json_options().view(), + stream, + mr); auto expected = cudf::test::fixed_width_column_wrapper{{1, 2, 3, 1, 5, 0}, {1, 0, 1, 1, 1, 1}}; @@ -146,26 +159,23 @@ TEST_F(JSONTypeCastTest, StringEscapes) R"("escape with nothing to escape \")", R"("\"\\\/\b\f\n\r\t")", }); - auto d_column = cudf::column_device_view::create(data); - rmm::device_uvector> svs(d_column->size(), stream); - thrust::transform(rmm::exec_policy(cudf::get_default_stream()), - d_column->pair_begin(), - d_column->pair_end(), - svs.begin(), - to_thrust_pair_fn{}); + auto column = cudf::strings_column_view(data); + rmm::device_uvector svs_length = string_offset_to_length(column, stream); auto null_mask_it = no_nulls(); auto null_mask = - std::get<0>(cudf::test::detail::make_null_mask(null_mask_it, null_mask_it + d_column->size())); - - auto col = cudf::io::json::detail::parse_data(svs.data(), - svs.size(), - type, - std::move(null_mask), - 0, - default_json_options().view(), - stream, - mr); + std::get<0>(cudf::test::detail::make_null_mask(null_mask_it, null_mask_it + column.size())); + + auto col = cudf::io::json::detail::parse_data( + column.chars().data(), + thrust::make_zip_iterator(thrust::make_tuple(column.offsets_begin(), svs_length.begin())), + column.size(), + type, + std::move(null_mask), + 0, + default_json_options().view(), + stream, + mr); auto expected = cudf::test::strings_column_wrapper{ {"🚀", "A🚀AA", "", "", "", "\\", "➩", "", "\"\\/\b\f\n\r\t"}, @@ -173,4 +183,71 @@ TEST_F(JSONTypeCastTest, StringEscapes) CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(col->view(), expected); } +TEST_F(JSONTypeCastTest, ErrorNulls) +{ + auto const stream = cudf::get_default_stream(); + auto mr = rmm::mr::get_current_device_resource(); + auto const type = cudf::data_type{cudf::type_id::STRING}; + + // error in decoding + std::vector input_values{R"("\"\a")", + R"("\u")", + R"("\u0")", + R"("\u0b")", + R"("\u00b")", + R"("\u00bz")", + R"("\t34567890123456\t9012345678901\ug0bc")", + R"("\t34567890123456\t90123456789012\u0hbc")", + R"("\t34567890123456\t90123456789012\u00ic")", + R"("\t34567890123456\t9012345678901\")", + R"("\t34567890123456\t90123456789012\")", + R"(null)"}; + // Note: without quotes are copied without decoding + cudf::test::strings_column_wrapper input(input_values.begin(), input_values.end()); + + auto column = cudf::strings_column_view(input); + auto space_length = 128; + auto prepend_space = [&space_length](auto const& s) { + if (s[0] == '"') return "\"" + std::string(space_length, ' ') + std::string(s + 1); + return std::string(s); + }; + std::vector small_input; + std::transform( + input_values.begin(), input_values.end(), std::back_inserter(small_input), prepend_space); + cudf::test::strings_column_wrapper small_col(small_input.begin(), small_input.end()); + + std::vector large_input; + space_length = 128 * 128; + std::transform( + input_values.begin(), input_values.end(), std::back_inserter(large_input), prepend_space); + cudf::test::strings_column_wrapper large_col(large_input.begin(), large_input.end()); + + std::vector expected_values{"", "", "", "", "", "", "", "", "", "", "", ""}; + cudf::test::strings_column_wrapper expected( + expected_values.begin(), expected_values.end(), cudf::test::iterators::all_nulls()); + + // single threads, warp, block. + for (auto const& column : + {column, cudf::strings_column_view(small_col), cudf::strings_column_view(large_col)}) { + rmm::device_uvector svs_length = string_offset_to_length(column, stream); + + auto null_mask_it = no_nulls(); + auto null_mask = + std::get<0>(cudf::test::detail::make_null_mask(null_mask_it, null_mask_it + column.size())); + + auto str_col = cudf::io::json::detail::parse_data( + column.chars().data(), + thrust::make_zip_iterator(thrust::make_tuple(column.offsets_begin(), svs_length.begin())), + column.size(), + type, + std::move(null_mask), + 0, + default_json_options().view(), + stream, + mr); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL(str_col->view(), expected); + } +} + CUDF_TEST_PROGRAM_MAIN() diff --git a/cpp/tests/io/type_inference_test.cu b/cpp/tests/io/type_inference_test.cu index b2eb1b94f9c..a14e7ecf5b3 100644 --- a/cpp/tests/io/type_inference_test.cu +++ b/cpp/tests/io/type_inference_test.cu @@ -14,8 +14,8 @@ * limitations under the License. */ +#include #include -#include #include #include @@ -50,8 +50,8 @@ TEST_F(TypeInference, Basic) auto d_data = cudf::make_string_scalar(data); auto& d_string_scalar = static_cast(*d_data); - auto const string_offset = std::vector{1, 4, 7}; - auto const string_length = std::vector{2, 2, 1}; + auto const string_offset = std::vector{1, 4, 7}; + auto const string_length = std::vector{2, 2, 1}; auto const d_string_offset = cudf::detail::make_device_uvector_async( string_offset, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); auto const d_string_length = cudf::detail::make_device_uvector_async( @@ -83,8 +83,8 @@ TEST_F(TypeInference, Null) auto d_data = cudf::make_string_scalar(data); auto& d_string_scalar = static_cast(*d_data); - auto const string_offset = std::vector{1, 1, 4}; - auto const string_length = std::vector{0, 2, 1}; + auto const string_offset = std::vector{1, 1, 4}; + auto const string_length = std::vector{0, 2, 1}; auto const d_string_offset = cudf::detail::make_device_uvector_async( string_offset, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); auto const d_string_length = cudf::detail::make_device_uvector_async( @@ -116,8 +116,8 @@ TEST_F(TypeInference, AllNull) auto d_data = cudf::make_string_scalar(data); auto& d_string_scalar = static_cast(*d_data); - auto const string_offset = std::vector{1, 1, 1}; - auto const string_length = std::vector{0, 0, 4}; + auto const string_offset = std::vector{1, 1, 1}; + auto const string_length = std::vector{0, 0, 4}; auto const d_string_offset = cudf::detail::make_device_uvector_async( string_offset, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); auto const d_string_length = cudf::detail::make_device_uvector_async( @@ -149,8 +149,8 @@ TEST_F(TypeInference, String) auto d_data = cudf::make_string_scalar(data); auto& d_string_scalar = static_cast(*d_data); - auto const string_offset = std::vector{1, 8, 12}; - auto const string_length = std::vector{6, 3, 4}; + auto const string_offset = std::vector{1, 8, 12}; + auto const string_length = std::vector{6, 3, 4}; auto const d_string_offset = cudf::detail::make_device_uvector_async( string_offset, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); auto const d_string_length = cudf::detail::make_device_uvector_async( @@ -182,8 +182,8 @@ TEST_F(TypeInference, Bool) auto d_data = cudf::make_string_scalar(data); auto& d_string_scalar = static_cast(*d_data); - auto const string_offset = std::vector{1, 6, 12}; - auto const string_length = std::vector{4, 5, 5}; + auto const string_offset = std::vector{1, 6, 12}; + auto const string_length = std::vector{4, 5, 5}; auto const d_string_offset = cudf::detail::make_device_uvector_async( string_offset, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); auto const d_string_length = cudf::detail::make_device_uvector_async( @@ -215,8 +215,8 @@ TEST_F(TypeInference, Timestamp) auto d_data = cudf::make_string_scalar(data); auto& d_string_scalar = static_cast(*d_data); - auto const string_offset = std::vector{1, 10}; - auto const string_length = std::vector{8, 9}; + auto const string_offset = std::vector{1, 10}; + auto const string_length = std::vector{8, 9}; auto const d_string_offset = cudf::detail::make_device_uvector_async( string_offset, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); auto const d_string_length = cudf::detail::make_device_uvector_async( @@ -249,8 +249,8 @@ TEST_F(TypeInference, InvalidInput) auto d_data = cudf::make_string_scalar(data); auto& d_string_scalar = static_cast(*d_data); - auto const string_offset = std::vector{1, 3, 5, 7, 9}; - auto const string_length = std::vector{1, 1, 1, 1, 1}; + auto const string_offset = std::vector{1, 3, 5, 7, 9}; + auto const string_length = std::vector{1, 1, 1, 1, 1}; auto const d_string_offset = cudf::detail::make_device_uvector_async( string_offset, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); auto const d_string_length = cudf::detail::make_device_uvector_async( From 63d197fe029ff2b57f4e0c7ab975bb35f844fc25 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Tue, 19 Sep 2023 19:27:10 -0700 Subject: [PATCH 079/150] Avoid circular cimports in _lib/cpp/reduce.pxd (#14125) This Cython modules contains some cimports from higher-level modules than it should, which introduces the possibility for circular import issues. Also it contains an unused import of DeviceScalar that can cause similar issues. Authors: - Vyas Ramasubramani (https://github.com/vyasr) Approvers: - Lawrence Mitchell (https://github.com/wence-) URL: https://github.com/rapidsai/cudf/pull/14125 --- python/cudf/cudf/_lib/cpp/reduce.pxd | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/python/cudf/cudf/_lib/cpp/reduce.pxd b/python/cudf/cudf/_lib/cpp/reduce.pxd index 7952c717916..997782dec6c 100644 --- a/python/cudf/cudf/_lib/cpp/reduce.pxd +++ b/python/cudf/cudf/_lib/cpp/reduce.pxd @@ -1,14 +1,13 @@ -# Copyright (c) 2020-2022, NVIDIA CORPORATION. +# Copyright (c) 2020-2023, NVIDIA CORPORATION. from libcpp.memory cimport unique_ptr from libcpp.utility cimport pair -from cudf._lib.aggregation cimport reduce_aggregation, scan_aggregation +from cudf._lib.cpp.aggregation cimport reduce_aggregation, scan_aggregation from cudf._lib.cpp.column.column cimport column from cudf._lib.cpp.column.column_view cimport column_view from cudf._lib.cpp.scalar.scalar cimport scalar from cudf._lib.cpp.types cimport data_type -from cudf._lib.scalar cimport DeviceScalar cdef extern from "cudf/reduction.hpp" namespace "cudf" nogil: From 2d4f22a9ab0709f808af9253097037e0eb5d00b1 Mon Sep 17 00:00:00 2001 From: Sam Turner <98767222+stmio@users.noreply.github.com> Date: Wed, 20 Sep 2023 13:57:26 +0100 Subject: [PATCH 080/150] Implement `GroupBy.value_counts` to match pandas API (#14114) This PR implements `GroupBy.value_counts`, matching the [pandas equivalent](https://pandas.pydata.org/docs/dev/reference/api/pandas.core.groupby.DataFrameGroupBy.value_counts.html) method. Tests currently ignore the returned Series/DataFrame's name, as this was [added to pandas in v2.0.0](https://github.com/pandas-dev/pandas/commit/bec92a43feb0057f06f4f9b9db26c1a09232b1c0). This can be removed if tests are against `pandas>=2.0.0`. Closes #12789 Authors: - Sam Turner (https://github.com/stmio) Approvers: - Bradley Dice (https://github.com/bdice) - GALI PREM SAGAR (https://github.com/galipremsagar) URL: https://github.com/rapidsai/cudf/pull/14114 --- python/cudf/cudf/core/groupby/groupby.py | 164 +++++++++++++++++++++++ python/cudf/cudf/tests/test_groupby.py | 67 +++++++++ 2 files changed, 231 insertions(+) diff --git a/python/cudf/cudf/core/groupby/groupby.py b/python/cudf/cudf/core/groupby/groupby.py index b300c55b537..e1740140b44 100644 --- a/python/cudf/cudf/core/groupby/groupby.py +++ b/python/cudf/cudf/core/groupby/groupby.py @@ -2336,6 +2336,170 @@ def pct_change( shifted = fill_grp.shift(periods=periods, freq=freq) return (filled / shifted) - 1 + def value_counts( + self, + subset=None, + normalize: bool = False, + sort: bool = True, + ascending: bool = False, + dropna: bool = True, + ) -> DataFrameOrSeries: + """ + Return a Series or DataFrame containing counts of unique rows. + + Parameters + ---------- + subset : list-like, optional + Columns to use when counting unique combinations. + normalize : bool, default False + Return proportions rather than frequencies. + sort : bool, default True + Sort by frequencies. + ascending : bool, default False + Sort in ascending order. + dropna : bool, default True + Don't include counts of rows that contain NA values. + + Returns + ------- + Series or DataFrame + Series if the groupby as_index is True, otherwise DataFrame. + + See Also + -------- + Series.value_counts: Equivalent method on Series. + DataFrame.value_counts: Equivalent method on DataFrame. + SeriesGroupBy.value_counts: Equivalent method on SeriesGroupBy. + + Notes + ----- + - If the groupby as_index is True then the returned Series will have a + MultiIndex with one level per input column. + - If the groupby as_index is False then the returned DataFrame will + have an additional column with the value_counts. The column is + labelled 'count' or 'proportion', depending on the ``normalize`` + parameter. + + By default, rows that contain any NA values are omitted from + the result. + + By default, the result will be in descending order so that the + first element of each group is the most frequently-occurring row. + + Examples + -------- + >>> import cudf + >>> df = cudf.DataFrame({ + ... 'gender': ['male', 'male', 'female', 'male', 'female', 'male'], + ... 'education': ['low', 'medium', 'high', 'low', 'high', 'low'], + ... 'country': ['US', 'FR', 'US', 'FR', 'FR', 'FR'] + ... }) + + >>> df + gender education country + 0 male low US + 1 male medium FR + 2 female high US + 3 male low FR + 4 female high FR + 5 male low FR + + >>> df.groupby('gender').value_counts() + gender education country + female high FR 1 + US 1 + male low FR 2 + US 1 + medium FR 1 + Name: count, dtype: int64 + + >>> df.groupby('gender').value_counts(ascending=True) + gender education country + female high FR 1 + US 1 + male low US 1 + medium FR 1 + low FR 2 + Name: count, dtype: int64 + + >>> df.groupby('gender').value_counts(normalize=True) + gender education country + female high FR 0.50 + US 0.50 + male low FR 0.50 + US 0.25 + medium FR 0.25 + Name: proportion, dtype: float64 + + >>> df.groupby('gender', as_index=False).value_counts() + gender education country count + 0 female high FR 1 + 1 female high US 1 + 2 male low FR 2 + 3 male low US 1 + 4 male medium FR 1 + + >>> df.groupby('gender', as_index=False).value_counts(normalize=True) + gender education country proportion + 0 female high FR 0.50 + 1 female high US 0.50 + 2 male low FR 0.50 + 3 male low US 0.25 + 4 male medium FR 0.25 + """ + + df = cudf.DataFrame.copy(self.obj) + groupings = self.grouping.names + name = "proportion" if normalize else "count" + + if subset is None: + subset = [i for i in df._column_names if i not in groupings] + # Check subset exists in dataframe + elif set(subset) - set(df._column_names): + raise ValueError( + f"Keys {set(subset) - set(df._column_names)} in subset " + f"do not exist in the DataFrame." + ) + # Catch case where groupby and subset share an element + elif set(subset) & set(groupings): + raise ValueError( + f"Keys {set(subset) & set(groupings)} in subset " + "cannot be in the groupby column keys." + ) + + df["__placeholder"] = 1 + result = ( + df.groupby(groupings + list(subset), dropna=dropna)[ + "__placeholder" + ] + .count() + .sort_index() + .astype(np.int64) + ) + + if normalize: + levels = list(range(len(groupings), result.index.nlevels)) + result /= result.groupby( + result.index.droplevel(levels), + ).transform("sum") + + if sort: + result = result.sort_values(ascending=ascending).sort_index( + level=range(len(groupings)), sort_remaining=False + ) + + if not self._as_index: + if name in df._column_names: + raise ValueError( + f"Column label '{name}' is duplicate of result column" + ) + result.name = name + result = result.to_frame().reset_index() + else: + result.name = name + + return result + def _mimic_pandas_order( self, result: DataFrameOrSeries ) -> DataFrameOrSeries: diff --git a/python/cudf/cudf/tests/test_groupby.py b/python/cudf/cudf/tests/test_groupby.py index 042f0e1aa38..376639d5226 100644 --- a/python/cudf/cudf/tests/test_groupby.py +++ b/python/cudf/cudf/tests/test_groupby.py @@ -3473,3 +3473,70 @@ def test_categorical_grouping_pandas_compatibility(): expected = pdf.groupby("key", sort=False).sum() assert_eq(actual, expected) + + +@pytest.mark.parametrize("normalize", [True, False]) +@pytest.mark.parametrize("sort", [True, False]) +@pytest.mark.parametrize("ascending", [True, False]) +@pytest.mark.parametrize("dropna", [True, False]) +@pytest.mark.parametrize("as_index", [True, False]) +def test_group_by_value_counts(normalize, sort, ascending, dropna, as_index): + # From Issue#12789 + df = cudf.DataFrame( + { + "gender": ["male", "male", "female", "male", "female", "male"], + "education": ["low", "medium", np.nan, "low", "high", "low"], + "country": ["US", "FR", "US", "FR", "FR", "FR"], + } + ) + pdf = df.to_pandas() + + actual = df.groupby("gender", as_index=as_index).value_counts( + normalize=normalize, sort=sort, ascending=ascending, dropna=dropna + ) + expected = pdf.groupby("gender", as_index=as_index).value_counts( + normalize=normalize, sort=sort, ascending=ascending, dropna=dropna + ) + + # TODO: Remove `check_names=False` once testing against `pandas>=2.0.0` + assert_groupby_results_equal( + actual, expected, check_names=False, check_index_type=False + ) + + +def test_group_by_value_counts_subset(): + # From Issue#12789 + df = cudf.DataFrame( + { + "gender": ["male", "male", "female", "male", "female", "male"], + "education": ["low", "medium", "high", "low", "high", "low"], + "country": ["US", "FR", "US", "FR", "FR", "FR"], + } + ) + pdf = df.to_pandas() + + actual = df.groupby("gender").value_counts(["education"]) + expected = pdf.groupby("gender").value_counts(["education"]) + + # TODO: Remove `check_names=False` once testing against `pandas>=2.0.0` + assert_groupby_results_equal( + actual, expected, check_names=False, check_index_type=False + ) + + +def test_group_by_value_counts_clash_with_subset(): + df = cudf.DataFrame({"a": [1, 5, 3], "b": [2, 5, 2]}) + with pytest.raises(ValueError): + df.groupby("a").value_counts(["a"]) + + +def test_group_by_value_counts_subset_not_exists(): + df = cudf.DataFrame({"a": [1, 5, 3], "b": [2, 5, 2]}) + with pytest.raises(ValueError): + df.groupby("a").value_counts(["c"]) + + +def test_group_by_value_counts_with_count_column(): + df = cudf.DataFrame({"a": [1, 5, 3], "count": [2, 5, 2]}) + with pytest.raises(ValueError): + df.groupby("a", as_index=False).value_counts() From 7b0693f6a5fd58e247a7669a813c6ffba850e4e0 Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Wed, 20 Sep 2023 04:46:35 -1000 Subject: [PATCH 081/150] Fix DataFrame.values with no columns but index (#14134) Fixes the following ```python In [32]: cudf.DataFrame(index=range(10)).values Out[32]: array([], shape=(0, 0), dtype=float64) ``` Authors: - Matthew Roeschke (https://github.com/mroeschke) Approvers: - GALI PREM SAGAR (https://github.com/galipremsagar) URL: https://github.com/rapidsai/cudf/pull/14134 --- python/cudf/cudf/core/frame.py | 2 +- python/cudf/cudf/tests/test_dataframe.py | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/python/cudf/cudf/core/frame.py b/python/cudf/cudf/core/frame.py index 6224793d6f1..1e6d177f8ca 100644 --- a/python/cudf/cudf/core/frame.py +++ b/python/cudf/cudf/core/frame.py @@ -437,7 +437,7 @@ def get_column_values_na(col): ncol = self._num_columns if ncol == 0: return make_empty_matrix( - shape=(0, 0), dtype=np.dtype("float64"), order="F" + shape=(len(self), ncol), dtype=np.dtype("float64"), order="F" ) if dtype is None: diff --git a/python/cudf/cudf/tests/test_dataframe.py b/python/cudf/cudf/tests/test_dataframe.py index cbef9bfa2d8..b69f22ade81 100644 --- a/python/cudf/cudf/tests/test_dataframe.py +++ b/python/cudf/cudf/tests/test_dataframe.py @@ -10374,3 +10374,9 @@ def test_dataframe_init_from_nested_dict(): pdf = pd.DataFrame(regular_dict) gdf = cudf.DataFrame(regular_dict) assert_eq(pdf, gdf) + + +def test_data_frame_values_no_cols_but_index(): + result = cudf.DataFrame(index=range(5)).values + expected = pd.DataFrame(index=range(5)).values + assert_eq(result, expected) From f7ca051145d41cf323cfb5a066068cb8b75d3fb3 Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Wed, 20 Sep 2023 10:49:06 -0500 Subject: [PATCH 082/150] Fix type of empty `Index` and raise warning in `Series` constructor (#14116) Fixes: #14091 This PR fixes empty inputs dtype in `Index` to default to `str` instead of `float64`. Another change is there is a deprecation warning for `Series` constructor to match pandas. Authors: - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cudf/pull/14116 --- python/cudf/cudf/core/algorithms.py | 21 +++++++---- python/cudf/cudf/core/dataframe.py | 2 +- python/cudf/cudf/core/index.py | 12 ++++++- python/cudf/cudf/core/series.py | 32 +++++++++++++++-- python/cudf/cudf/testing/_utils.py | 21 +++++++++-- python/cudf/cudf/tests/test_dataframe.py | 19 +++++----- python/cudf/cudf/tests/test_dropna.py | 9 +++-- python/cudf/cudf/tests/test_duplicates.py | 4 +-- python/cudf/cudf/tests/test_index.py | 16 ++++++--- python/cudf/cudf/tests/test_rolling.py | 9 +++-- python/cudf/cudf/tests/test_series.py | 43 ++++++++++++++--------- python/cudf/cudf/tests/test_stats.py | 23 ++++++------ 12 files changed, 148 insertions(+), 63 deletions(-) diff --git a/python/cudf/cudf/core/algorithms.py b/python/cudf/cudf/core/algorithms.py index a472142ece0..25d58029d6b 100644 --- a/python/cudf/cudf/core/algorithms.py +++ b/python/cudf/cudf/core/algorithms.py @@ -4,12 +4,13 @@ import cupy as cp import numpy as np +from cudf.core.column import as_column from cudf.core.copy_types import BooleanMask -from cudf.core.index import Index, RangeIndex +from cudf.core.index import RangeIndex, as_index from cudf.core.indexed_frame import IndexedFrame from cudf.core.scalar import Scalar -from cudf.core.series import Series from cudf.options import get_option +from cudf.utils.dtypes import can_convert_to_column def factorize( @@ -95,7 +96,13 @@ def factorize( return_cupy_array = isinstance(values, cp.ndarray) - values = Series(values) + if not can_convert_to_column(values): + raise TypeError( + "'values' can only be a Series, Index, or CuPy array, " + f"got {type(values)}" + ) + + values = as_column(values) if na_sentinel is None: na_sentinel = ( @@ -128,22 +135,22 @@ def factorize( warnings.warn("size_hint is not applicable for cudf.factorize") if use_na_sentinel is None or use_na_sentinel: - cats = values._column.dropna() + cats = values.dropna() else: - cats = values._column + cats = values cats = cats.unique().astype(values.dtype) if sort: cats = cats.sort_values() - labels = values._column._label_encoding( + labels = values._label_encoding( cats=cats, na_sentinel=Scalar(na_sentinel), dtype="int64" if get_option("mode.pandas_compatible") else None, ).values - return labels, cats.values if return_cupy_array else Index(cats) + return labels, cats.values if return_cupy_array else as_index(cats) def _linear_interpolation(column, index=None): diff --git a/python/cudf/cudf/core/dataframe.py b/python/cudf/cudf/core/dataframe.py index 84c16b71997..6e664468644 100644 --- a/python/cudf/cudf/core/dataframe.py +++ b/python/cudf/cudf/core/dataframe.py @@ -5607,7 +5607,7 @@ def quantile( result.name = q return result - result.index = list(map(float, qs)) + result.index = cudf.Index(list(map(float, qs)), dtype="float64") return result @_cudf_nvtx_annotate diff --git a/python/cudf/cudf/core/index.py b/python/cudf/cudf/core/index.py index 56ec9ce0359..de8a5948033 100644 --- a/python/cudf/cudf/core/index.py +++ b/python/cudf/cudf/core/index.py @@ -13,6 +13,7 @@ List, MutableMapping, Optional, + Sequence, Tuple, Type, Union, @@ -3467,7 +3468,7 @@ def __new__( "tupleize_cols != True is not yet supported" ) - return as_index( + res = as_index( data, copy=copy, dtype=dtype, @@ -3475,6 +3476,15 @@ def __new__( nan_as_null=nan_as_null, **kwargs, ) + if ( + isinstance(data, Sequence) + and not isinstance(data, range) + and len(data) == 0 + and dtype is None + and getattr(data, "dtype", None) is None + ): + return res.astype("str") + return res @classmethod @_cudf_nvtx_annotate diff --git a/python/cudf/cudf/core/series.py b/python/cudf/cudf/core/series.py index 7692d3015f8..a195738af54 100644 --- a/python/cudf/cudf/core/series.py +++ b/python/cudf/cudf/core/series.py @@ -9,7 +9,16 @@ import warnings from collections import abc from shutil import get_terminal_size -from typing import Any, Dict, MutableMapping, Optional, Set, Tuple, Union +from typing import ( + Any, + Dict, + MutableMapping, + Optional, + Sequence, + Set, + Tuple, + Union, +) import cupy import numpy as np @@ -500,6 +509,18 @@ def __init__( copy=False, nan_as_null=True, ): + if ( + isinstance(data, Sequence) + and len(data) == 0 + and dtype is None + and getattr(data, "dtype", None) is None + ): + warnings.warn( + "The default dtype for empty Series will be 'object' instead " + "of 'float64' in a future version. Specify a dtype explicitly " + "to silence this warning.", + FutureWarning, + ) if isinstance(data, pd.Series): if name is None: name = data.name @@ -656,7 +677,10 @@ def from_pandas(cls, s, nan_as_null=None): 3 NaN dtype: float64 """ - return cls(s, nan_as_null=nan_as_null) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + result = cls(s, nan_as_null=nan_as_null) + return result @property # type: ignore @_cudf_nvtx_annotate @@ -2642,7 +2666,9 @@ def mode(self, dropna=True): if len(val_counts) > 0: val_counts = val_counts[val_counts == val_counts.iloc[0]] - return Series(val_counts.index.sort_values(), name=self.name) + return Series._from_data( + {self.name: val_counts.index.sort_values()}, name=self.name + ) @_cudf_nvtx_annotate def round(self, decimals=0, how="half_even"): diff --git a/python/cudf/cudf/testing/_utils.py b/python/cudf/cudf/testing/_utils.py index e949f7d78e7..9182246826f 100644 --- a/python/cudf/cudf/testing/_utils.py +++ b/python/cudf/cudf/testing/_utils.py @@ -397,8 +397,12 @@ def assert_column_memory_ne( raise AssertionError("lhs and rhs holds the same memory.") -def _create_pandas_series(data=None, index=None, dtype=None, *args, **kwargs): - # Wrapper around pd.Series using a float64 default dtype for empty data. +def _create_pandas_series_float64_default( + data=None, index=None, dtype=None, *args, **kwargs +): + # Wrapper around pd.Series using a float64 + # default dtype for empty data to silence warnings. + # TODO: Remove this in pandas-2.0 upgrade if dtype is None and ( data is None or (not is_scalar(data) and len(data) == 0) ): @@ -406,6 +410,19 @@ def _create_pandas_series(data=None, index=None, dtype=None, *args, **kwargs): return pd.Series(data=data, index=index, dtype=dtype, *args, **kwargs) +def _create_cudf_series_float64_default( + data=None, index=None, dtype=None, *args, **kwargs +): + # Wrapper around cudf.Series using a float64 + # default dtype for empty data to silence warnings. + # TODO: Remove this in pandas-2.0 upgrade + if dtype is None and ( + data is None or (not is_scalar(data) and len(data) == 0) + ): + dtype = "float64" + return cudf.Series(data=data, index=index, dtype=dtype, *args, **kwargs) + + parametrize_numeric_dtypes_pairwise = pytest.mark.parametrize( "left_dtype,right_dtype", list(itertools.combinations_with_replacement(NUMERIC_TYPES, 2)), diff --git a/python/cudf/cudf/tests/test_dataframe.py b/python/cudf/cudf/tests/test_dataframe.py index b69f22ade81..bc85987c612 100644 --- a/python/cudf/cudf/tests/test_dataframe.py +++ b/python/cudf/cudf/tests/test_dataframe.py @@ -30,6 +30,7 @@ ALL_TYPES, DATETIME_TYPES, NUMERIC_TYPES, + _create_cudf_series_float64_default, assert_eq, assert_exceptions_equal, assert_neq, @@ -2000,8 +2001,8 @@ def test_series_shape(): def test_series_shape_empty(): - ps = pd.Series(dtype="float64") - cs = cudf.Series([]) + ps = pd.Series([], dtype="float64") + cs = cudf.Series([], dtype="float64") assert ps.shape == cs.shape @@ -2840,7 +2841,7 @@ def test_series_all_null(num_elements, null_type): @pytest.mark.parametrize("num_elements", [0, 2, 10, 100]) def test_series_all_valid_nan(num_elements): data = [np.nan] * num_elements - sr = cudf.Series(data, nan_as_null=False) + sr = _create_cudf_series_float64_default(data, nan_as_null=False) np.testing.assert_equal(sr.null_count, 0) @@ -4073,28 +4074,28 @@ def test_empty_dataframe_describe(): def test_as_column_types(): - col = column.as_column(cudf.Series([])) + col = column.as_column(cudf.Series([], dtype="float64")) assert_eq(col.dtype, np.dtype("float64")) gds = cudf.Series(col) pds = pd.Series(pd.Series([], dtype="float64")) assert_eq(pds, gds) - col = column.as_column(cudf.Series([]), dtype="float32") + col = column.as_column(cudf.Series([], dtype="float64"), dtype="float32") assert_eq(col.dtype, np.dtype("float32")) gds = cudf.Series(col) pds = pd.Series(pd.Series([], dtype="float32")) assert_eq(pds, gds) - col = column.as_column(cudf.Series([]), dtype="str") + col = column.as_column(cudf.Series([], dtype="float64"), dtype="str") assert_eq(col.dtype, np.dtype("object")) gds = cudf.Series(col) pds = pd.Series(pd.Series([], dtype="str")) assert_eq(pds, gds) - col = column.as_column(cudf.Series([]), dtype="object") + col = column.as_column(cudf.Series([], dtype="float64"), dtype="object") assert_eq(col.dtype, np.dtype("object")) gds = cudf.Series(col) pds = pd.Series(pd.Series([], dtype="object")) @@ -4469,7 +4470,7 @@ def test_create_dataframe_column(): ) def test_series_values_host_property(data): pds = pd.Series(data=data, dtype=None if data else float) - gds = cudf.Series(data) + gds = _create_cudf_series_float64_default(data) np.testing.assert_array_equal(pds.values, gds.values_host) @@ -4492,7 +4493,7 @@ def test_series_values_host_property(data): ) def test_series_values_property(data): pds = pd.Series(data=data, dtype=None if data else float) - gds = cudf.Series(data) + gds = _create_cudf_series_float64_default(data) gds_vals = gds.values assert isinstance(gds_vals, cupy.ndarray) np.testing.assert_array_equal(gds_vals.get(), pds.values) diff --git a/python/cudf/cudf/tests/test_dropna.py b/python/cudf/cudf/tests/test_dropna.py index 3277e52edb3..1def6597706 100644 --- a/python/cudf/cudf/tests/test_dropna.py +++ b/python/cudf/cudf/tests/test_dropna.py @@ -1,11 +1,14 @@ -# Copyright (c) 2020-2022, NVIDIA CORPORATION. +# Copyright (c) 2020-2023, NVIDIA CORPORATION. import numpy as np import pandas as pd import pytest import cudf -from cudf.testing._utils import _create_pandas_series, assert_eq +from cudf.testing._utils import ( + _create_pandas_series_float64_default, + assert_eq, +) @pytest.mark.parametrize( @@ -22,7 +25,7 @@ @pytest.mark.parametrize("inplace", [True, False]) def test_dropna_series(data, nulls, inplace): - psr = _create_pandas_series(data) + psr = _create_pandas_series_float64_default(data) if len(data) > 0: if nulls == "one": diff --git a/python/cudf/cudf/tests/test_duplicates.py b/python/cudf/cudf/tests/test_duplicates.py index f77e7b4d775..ddbfdf5eee2 100644 --- a/python/cudf/cudf/tests/test_duplicates.py +++ b/python/cudf/cudf/tests/test_duplicates.py @@ -10,7 +10,7 @@ import cudf from cudf import concat from cudf.testing._utils import ( - _create_pandas_series, + _create_pandas_series_float64_default, assert_eq, assert_exceptions_equal, ) @@ -62,7 +62,7 @@ def test_duplicated_with_misspelled_column_name(subset): ], ) def test_drop_duplicates_series(data, keep): - pds = _create_pandas_series(data) + pds = _create_pandas_series_float64_default(data) gds = cudf.from_pandas(pds) assert_df(pds.drop_duplicates(keep=keep), gds.drop_duplicates(keep=keep)) diff --git a/python/cudf/cudf/tests/test_index.py b/python/cudf/cudf/tests/test_index.py index b3791cddce3..29232f63e90 100644 --- a/python/cudf/cudf/tests/test_index.py +++ b/python/cudf/cudf/tests/test_index.py @@ -30,7 +30,8 @@ SIGNED_INTEGER_TYPES, SIGNED_TYPES, UNSIGNED_TYPES, - _create_pandas_series, + _create_cudf_series_float64_default, + _create_pandas_series_float64_default, assert_column_memory_eq, assert_column_memory_ne, assert_eq, @@ -1006,8 +1007,8 @@ def test_index_equal_misc(data, other): actual = gd_data.equals(np.array(gd_other)) assert_eq(expected, actual) - expected = pd_data.equals(_create_pandas_series(pd_other)) - actual = gd_data.equals(cudf.Series(gd_other)) + expected = pd_data.equals(_create_pandas_series_float64_default(pd_other)) + actual = gd_data.equals(_create_cudf_series_float64_default(gd_other)) assert_eq(expected, actual) expected = pd_data.astype("category").equals(pd_other) @@ -2275,7 +2276,7 @@ def test_index_nan_as_null(data, nan_idx, NA_idx, nan_as_null): ], ) def test_isin_index(data, values): - psr = _create_pandas_series(data) + psr = _create_pandas_series_float64_default(data) gsr = cudf.Series.from_pandas(psr) got = gsr.index.isin(values) @@ -2780,6 +2781,13 @@ def test_index_empty_from_pandas(request, dtype): assert_eq(pidx, gidx) +def test_empty_index_init(): + pidx = pd.Index([]) + gidx = cudf.Index([]) + + assert_eq(pidx, gidx) + + @pytest.mark.parametrize( "data", [[1, 2, 3], ["ab", "cd", "e", None], range(0, 10)] ) diff --git a/python/cudf/cudf/tests/test_rolling.py b/python/cudf/cudf/tests/test_rolling.py index b4e0983a9e3..43fa83e1735 100644 --- a/python/cudf/cudf/tests/test_rolling.py +++ b/python/cudf/cudf/tests/test_rolling.py @@ -9,7 +9,10 @@ import cudf from cudf.core._compat import PANDAS_GE_150, PANDAS_LT_140 -from cudf.testing._utils import _create_pandas_series, assert_eq +from cudf.testing._utils import ( + _create_pandas_series_float64_default, + assert_eq, +) from cudf.testing.dataset_generator import rand_dataframe @@ -55,7 +58,7 @@ def test_rolling_series_basic(data, index, agg, nulls, center): elif nulls == "all": data = [np.nan] * len(data) - psr = _create_pandas_series(data, index=index) + psr = _create_pandas_series_float64_default(data, index=index) gsr = cudf.Series(psr) for window_size in range(1, len(data) + 1): for min_periods in range(1, window_size + 1): @@ -313,7 +316,7 @@ def test_rolling_getitem_window(): @pytest.mark.parametrize("center", [True, False]) def test_rollling_series_numba_udf_basic(data, index, center): - psr = _create_pandas_series(data, index=index) + psr = _create_pandas_series_float64_default(data, index=index) gsr = cudf.from_pandas(psr) def some_func(A): diff --git a/python/cudf/cudf/tests/test_series.py b/python/cudf/cudf/tests/test_series.py index b1e991106ee..cfa571a0f54 100644 --- a/python/cudf/cudf/tests/test_series.py +++ b/python/cudf/cudf/tests/test_series.py @@ -19,7 +19,8 @@ NUMERIC_TYPES, SERIES_OR_INDEX_NAMES, TIMEDELTA_TYPES, - _create_pandas_series, + _create_cudf_series_float64_default, + _create_pandas_series_float64_default, assert_eq, assert_exceptions_equal, expect_warning_if, @@ -400,8 +401,8 @@ def test_series_tolist(data): [[], [None, None], ["a"], ["a", "b", "c"] * 500, [1.0, 2.0, 0.3] * 57], ) def test_series_size(data): - psr = _create_pandas_series(data) - gsr = cudf.Series(data) + psr = _create_pandas_series_float64_default(data) + gsr = _create_cudf_series_float64_default(data) assert_eq(psr.size, gsr.size) @@ -487,7 +488,7 @@ def test_series_describe_other_types(ps): ) @pytest.mark.parametrize("na_sentinel", [99999, 11, -1, 0]) def test_series_factorize(data, na_sentinel): - gsr = cudf.Series(data) + gsr = _create_cudf_series_float64_default(data) psr = gsr.to_pandas() with pytest.warns(FutureWarning): @@ -510,7 +511,7 @@ def test_series_factorize(data, na_sentinel): ) @pytest.mark.parametrize("use_na_sentinel", [True, False]) def test_series_factorize_use_na_sentinel(data, use_na_sentinel): - gsr = cudf.Series(data) + gsr = _create_cudf_series_float64_default(data) psr = gsr.to_pandas(nullable=True) expected_labels, expected_cats = psr.factorize( @@ -534,7 +535,7 @@ def test_series_factorize_use_na_sentinel(data, use_na_sentinel): ) @pytest.mark.parametrize("sort", [True, False]) def test_series_factorize_sort(data, sort): - gsr = cudf.Series(data) + gsr = _create_cudf_series_float64_default(data) psr = gsr.to_pandas(nullable=True) expected_labels, expected_cats = psr.factorize(sort=sort) @@ -734,7 +735,7 @@ def test_series_value_counts_optional_arguments(ascending, dropna, normalize): ], dtype="datetime64[ns]", ), - cudf.Series(name="empty series"), + cudf.Series(name="empty series", dtype="float64"), cudf.Series(["a", "b", "c", " ", "a", "b", "z"], dtype="category"), ], ) @@ -1415,7 +1416,7 @@ def test_series_hash_values_invalid_method(): def test_set_index_unequal_length(): - s = cudf.Series() + s = cudf.Series(dtype="float64") with pytest.raises(ValueError): s.index = [1, 2, 3] @@ -1682,7 +1683,7 @@ def test_series_nunique_index(data): ], ) def test_axes(data): - csr = cudf.Series(data) + csr = _create_cudf_series_float64_default(data) psr = csr.to_pandas() expected = psr.axes @@ -1760,7 +1761,7 @@ def test_series_truncate_datetimeindex(): ) def test_isin_numeric(data, values): index = np.random.randint(0, 100, len(data)) - psr = _create_pandas_series(data, index=index) + psr = _create_pandas_series_float64_default(data, index=index) gsr = cudf.Series.from_pandas(psr, nan_as_null=False) expected = psr.isin(values) @@ -1820,7 +1821,7 @@ def test_fill_new_category(): ], ) def test_isin_datetime(data, values): - psr = _create_pandas_series(data) + psr = _create_pandas_series_float64_default(data) gsr = cudf.Series.from_pandas(psr) got = gsr.isin(values) @@ -1849,7 +1850,7 @@ def test_isin_datetime(data, values): ], ) def test_isin_string(data, values): - psr = _create_pandas_series(data) + psr = _create_pandas_series_float64_default(data) gsr = cudf.Series.from_pandas(psr) got = gsr.isin(values) @@ -1878,7 +1879,7 @@ def test_isin_string(data, values): ], ) def test_isin_categorical(data, values): - psr = _create_pandas_series(data) + psr = _create_pandas_series_float64_default(data) gsr = cudf.Series.from_pandas(psr) got = gsr.isin(values) @@ -2099,7 +2100,7 @@ def test_series_to_dict(into): ], ) def test_series_hasnans(data): - gs = cudf.Series(data, nan_as_null=False) + gs = _create_cudf_series_float64_default(data, nan_as_null=False) ps = gs.to_pandas(nullable=True) assert_eq(gs.hasnans, ps.hasnans) @@ -2170,8 +2171,8 @@ def test_series_init_dict_with_index(data, index): "index", [None, ["b", "c"], ["d", "a", "c", "b"], ["a"]] ) def test_series_init_scalar_with_index(data, index): - pandas_series = _create_pandas_series(data, index=index) - cudf_series = cudf.Series(data, index=index) + pandas_series = _create_pandas_series_float64_default(data, index=index) + cudf_series = _create_cudf_series_float64_default(data, index=index) assert_eq( pandas_series, @@ -2313,7 +2314,15 @@ def test_series_round_builtin(data, digits): assert_eq(expected, actual) +def test_series_empty_warning(): + with pytest.warns(FutureWarning): + expected = pd.Series([]) + with pytest.warns(FutureWarning): + actual = cudf.Series([]) + assert_eq(expected, actual) + + def test_series_count_invalid_param(): - s = cudf.Series([]) + s = cudf.Series([], dtype="float64") with pytest.raises(TypeError): s.count(skipna=True) diff --git a/python/cudf/cudf/tests/test_stats.py b/python/cudf/cudf/tests/test_stats.py index 463cdb8a7f4..3ac605a1a4d 100644 --- a/python/cudf/cudf/tests/test_stats.py +++ b/python/cudf/cudf/tests/test_stats.py @@ -10,7 +10,8 @@ import cudf from cudf.datasets import randomdata from cudf.testing._utils import ( - _create_pandas_series, + _create_cudf_series_float64_default, + _create_pandas_series_float64_default, assert_eq, assert_exceptions_equal, expect_warning_if, @@ -222,8 +223,8 @@ def test_approx_quantiles_int(): ) def test_misc_quantiles(data, q): - pdf_series = _create_pandas_series(data) - gdf_series = cudf.Series(data) + pdf_series = _create_pandas_series_float64_default(data) + gdf_series = _create_cudf_series_float64_default(data) expected = pdf_series.quantile(q.get() if isinstance(q, cp.ndarray) else q) actual = gdf_series.quantile(q) @@ -242,7 +243,7 @@ def test_misc_quantiles(data, q): [5, 10, 53, None, np.nan, None, 12, 43, -423], nan_as_null=False ), cudf.Series([1.1032, 2.32, 43.4, 13, -312.0], index=[0, 4, 3, 19, 6]), - cudf.Series([]), + cudf.Series([], dtype="float64"), cudf.Series([-3]), ], ) @@ -292,7 +293,7 @@ def test_kurt_skew_error(op): [5, 10, 53, None, np.nan, None, 12, 43, -423], nan_as_null=False ), cudf.Series([1.1032, 2.32, 43.4, 13, -312.0], index=[0, 4, 3, 19, 6]), - cudf.Series([]), + cudf.Series([], dtype="float64"), cudf.Series([-3]), ], ) @@ -348,7 +349,7 @@ def test_series_median(dtype, num_na): np.zeros(100), np.array([1.123, 2.343, np.nan, 0.0]), np.array([-2, 3.75, 6, None, None, None, -8.5, None, 4.2]), - cudf.Series([]), + cudf.Series([], dtype="float64"), cudf.Series([-3]), ], ) @@ -376,7 +377,7 @@ def test_series_pct_change(data, periods, fill_method): np.array([1.123, 2.343, np.nan, 0.0]), cudf.Series([5, 10, 53, None, np.nan, None], nan_as_null=False), cudf.Series([1.1, 2.32, 43.4], index=[0, 4, 3]), - cudf.Series([]), + cudf.Series([], dtype="float64"), cudf.Series([-3]), ], ) @@ -420,7 +421,7 @@ def test_cov1d(data1, data2): np.array([1.123, 2.343, np.nan, 0.0]), cudf.Series([5, 10, 53, None, np.nan, None], nan_as_null=False), cudf.Series([1.1032, 2.32, 43.4], index=[0, 4, 3]), - cudf.Series([]), + cudf.Series([], dtype="float64"), cudf.Series([-3]), ], ) @@ -524,14 +525,14 @@ def test_df_corr(method): ) @pytest.mark.parametrize("skipna", [True, False]) def test_nans_stats(data, ops, skipna): - psr = _create_pandas_series(data) - gsr = cudf.Series(data, nan_as_null=False) + psr = _create_pandas_series_float64_default(data) + gsr = _create_cudf_series_float64_default(data, nan_as_null=False) assert_eq( getattr(psr, ops)(skipna=skipna), getattr(gsr, ops)(skipna=skipna) ) - gsr = cudf.Series(data, nan_as_null=False) + gsr = _create_cudf_series_float64_default(data, nan_as_null=False) # Since there is no concept of `nan_as_null` in pandas, # nulls will be returned in the operations. So only # testing for `skipna=True` when `nan_as_null=False` From eb6d134d169ed077000ee7d075d5363dec066578 Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Wed, 20 Sep 2023 06:49:14 -1000 Subject: [PATCH 083/150] Don't sort columns for DataFrame init from list of Series (#14136) closes #14132 This PR removes the re-sorting of dataframe columns when initialized by a series list. Authors: - Matthew Roeschke (https://github.com/mroeschke) Approvers: - GALI PREM SAGAR (https://github.com/galipremsagar) - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cudf/pull/14136 --- python/cudf/cudf/core/dataframe.py | 4 +--- python/cudf/cudf/tests/test_dataframe.py | 12 ++++++++++++ 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/python/cudf/cudf/core/dataframe.py b/python/cudf/cudf/core/dataframe.py index 6e664468644..1a780cc9e9f 100644 --- a/python/cudf/cudf/core/dataframe.py +++ b/python/cudf/cudf/core/dataframe.py @@ -7885,9 +7885,7 @@ def _get_union_of_indices(indexes): return indexes[0] else: merged_index = cudf.core.index.GenericIndex._concat(indexes) - merged_index = merged_index.drop_duplicates() - inds = merged_index._values.argsort() - return merged_index.take(inds) + return merged_index.drop_duplicates() def _get_union_of_series_names(series_list): diff --git a/python/cudf/cudf/tests/test_dataframe.py b/python/cudf/cudf/tests/test_dataframe.py index bc85987c612..6180162ecdd 100644 --- a/python/cudf/cudf/tests/test_dataframe.py +++ b/python/cudf/cudf/tests/test_dataframe.py @@ -221,6 +221,18 @@ def test_init_unaligned_with_index(): assert_eq(pdf, gdf, check_dtype=False) +def test_init_series_list_columns_unsort(): + pseries = [ + pd.Series(i, index=["b", "a", "c"], name=str(i)) for i in range(3) + ] + gseries = [ + cudf.Series(i, index=["b", "a", "c"], name=str(i)) for i in range(3) + ] + pdf = pd.DataFrame(pseries) + gdf = cudf.DataFrame(gseries) + assert_eq(pdf, gdf) + + def test_series_basic(): # Make series from buffer a1 = np.arange(10, dtype=np.float64) From 40d4cc5565f600864c3b16f30d3d26fd4904deaf Mon Sep 17 00:00:00 2001 From: Ed Seidl Date: Wed, 20 Sep 2023 11:03:44 -0700 Subject: [PATCH 084/150] Refactor parquet thrift reader (#14097) Refactors the current `CompactProtocolReader` used to parse parquet file metadata. The main goal of the refactor is to allow easier use of `std::optional` fields in the thrift structs to prevent situations as in #14024 where an optional field is an empty string. The writer cannot distinguish between present-but-empty and not-present, so chooses the latter when writing the field. This PR adds a `ParquetFieldOptional` functor that can wrap the other field functors, obviating the need to write a new optional functor for each type. Authors: - Ed Seidl (https://github.com/etseidl) Approvers: - Vukasin Milovanovic (https://github.com/vuule) - Yunsong Wang (https://github.com/PointKernel) URL: https://github.com/rapidsai/cudf/pull/14097 --- .../io/parquet/compact_protocol_reader.cpp | 691 +++++++++++++++--- .../io/parquet/compact_protocol_reader.hpp | 586 +-------------- .../io/parquet/compact_protocol_writer.cpp | 30 +- .../io/parquet/compact_protocol_writer.hpp | 3 + cpp/src/io/parquet/parquet.hpp | 18 +- cpp/src/io/parquet/parquet_common.hpp | 2 +- cpp/src/io/parquet/writer_impl.cu | 38 +- 7 files changed, 662 insertions(+), 706 deletions(-) diff --git a/cpp/src/io/parquet/compact_protocol_reader.cpp b/cpp/src/io/parquet/compact_protocol_reader.cpp index ae11af92f78..5c7b8ca3f8c 100644 --- a/cpp/src/io/parquet/compact_protocol_reader.cpp +++ b/cpp/src/io/parquet/compact_protocol_reader.cpp @@ -18,27 +18,474 @@ #include #include +#include #include namespace cudf { namespace io { namespace parquet { -uint8_t const CompactProtocolReader::g_list2struct[16] = {0, - 1, - 2, - ST_FLD_BYTE, - ST_FLD_DOUBLE, - 5, - ST_FLD_I16, - 7, - ST_FLD_I32, - 9, - ST_FLD_I64, - ST_FLD_BINARY, - ST_FLD_STRUCT, - ST_FLD_MAP, - ST_FLD_SET, - ST_FLD_LIST}; + +/** + * @brief Base class for parquet field functors. + * + * Holds the field value used by all of the specialized functors. + */ +class parquet_field { + private: + int _field_val; + + protected: + parquet_field(int f) : _field_val(f) {} + + public: + virtual ~parquet_field() = default; + int field() const { return _field_val; } +}; + +/** + * @brief Abstract base class for list functors. + */ +template +class parquet_field_list : public parquet_field { + private: + using read_func_type = std::function; + FieldType _expected_type; + read_func_type _read_value; + + protected: + std::vector& val; + + void bind_read_func(read_func_type fn) { _read_value = fn; } + + parquet_field_list(int f, std::vector& v, FieldType t) + : parquet_field(f), _expected_type(t), val(v) + { + } + + public: + inline bool operator()(CompactProtocolReader* cpr, int field_type) + { + if (field_type != ST_FLD_LIST) { return true; } + auto const [t, n] = cpr->get_listh(); + if (t != _expected_type) { return true; } + val.resize(n); + for (uint32_t i = 0; i < n; i++) { + if (_read_value(i, cpr)) { return true; } + } + return false; + } +}; + +/** + * @brief Functor to set value to bool read from CompactProtocolReader + * + * bool doesn't actually encode a value, we just use the field type to indicate true/false + * + * @return True if field type is not bool + */ +class parquet_field_bool : public parquet_field { + bool& val; + + public: + parquet_field_bool(int f, bool& v) : parquet_field(f), val(v) {} + + inline bool operator()(CompactProtocolReader* cpr, int field_type) + { + if (field_type != ST_FLD_TRUE && field_type != ST_FLD_FALSE) { return true; } + val = field_type == ST_FLD_TRUE; + return false; + } +}; + +/** + * @brief Functor to read a vector of booleans from CompactProtocolReader + * + * @return True if field types mismatch or if the process of reading a + * bool fails + */ +struct parquet_field_bool_list : public parquet_field_list { + parquet_field_bool_list(int f, std::vector& v) : parquet_field_list(f, v, ST_FLD_TRUE) + { + auto const read_value = [this](uint32_t i, CompactProtocolReader* cpr) { + auto const current_byte = cpr->getb(); + if (current_byte != ST_FLD_TRUE && current_byte != ST_FLD_FALSE) { return true; } + this->val[i] = current_byte == ST_FLD_TRUE; + return false; + }; + bind_read_func(read_value); + } +}; + +/** + * @brief Base type for a functor that reads an integer from CompactProtocolReader + * + * Assuming signed ints since the parquet spec does not use unsigned ints anywhere. + * + * @return True if there is a type mismatch + */ +template +class parquet_field_int : public parquet_field { + static constexpr bool is_byte = std::is_same_v; + + T& val; + + public: + parquet_field_int(int f, T& v) : parquet_field(f), val(v) {} + + inline bool operator()(CompactProtocolReader* cpr, int field_type) + { + if constexpr (is_byte) { + val = cpr->getb(); + } else { + val = cpr->get_zigzag(); + } + return (field_type != EXPECTED_TYPE); + } +}; + +using parquet_field_int8 = parquet_field_int; +using parquet_field_int32 = parquet_field_int; +using parquet_field_int64 = parquet_field_int; + +/** + * @brief Functor to read a vector of integers from CompactProtocolReader + * + * @return True if field types mismatch or if the process of reading an + * integer fails + */ +template +struct parquet_field_int_list : public parquet_field_list { + parquet_field_int_list(int f, std::vector& v) : parquet_field_list(f, v, EXPECTED_TYPE) + { + auto const read_value = [this](uint32_t i, CompactProtocolReader* cpr) { + this->val[i] = cpr->get_zigzag(); + return false; + }; + this->bind_read_func(read_value); + } +}; + +using parquet_field_int64_list = parquet_field_int_list; + +/** + * @brief Functor to read a string from CompactProtocolReader + * + * @return True if field type mismatches or if size of string exceeds bounds + * of the CompactProtocolReader + */ +class parquet_field_string : public parquet_field { + std::string& val; + + public: + parquet_field_string(int f, std::string& v) : parquet_field(f), val(v) {} + + inline bool operator()(CompactProtocolReader* cpr, int field_type) + { + if (field_type != ST_FLD_BINARY) { return true; } + auto const n = cpr->get_u32(); + if (n < static_cast(cpr->m_end - cpr->m_cur)) { + val.assign(reinterpret_cast(cpr->m_cur), n); + cpr->m_cur += n; + return false; + } else { + return true; + } + } +}; + +/** + * @brief Functor to read a vector of strings from CompactProtocolReader + * + * @return True if field types mismatch or if the process of reading a + * string fails + */ +struct parquet_field_string_list : public parquet_field_list { + parquet_field_string_list(int f, std::vector& v) + : parquet_field_list(f, v, ST_FLD_BINARY) + { + auto const read_value = [this](uint32_t i, CompactProtocolReader* cpr) { + auto const l = cpr->get_u32(); + if (l < static_cast(cpr->m_end - cpr->m_cur)) { + this->val[i].assign(reinterpret_cast(cpr->m_cur), l); + cpr->m_cur += l; + } else { + return true; + } + return false; + }; + bind_read_func(read_value); + } +}; + +/** + * @brief Functor to set value to enum read from CompactProtocolReader + * + * @return True if field type is not int32 + */ +template +class parquet_field_enum : public parquet_field { + Enum& val; + + public: + parquet_field_enum(int f, Enum& v) : parquet_field(f), val(v) {} + inline bool operator()(CompactProtocolReader* cpr, int field_type) + { + val = static_cast(cpr->get_i32()); + return (field_type != ST_FLD_I32); + } +}; + +/** + * @brief Functor to read a vector of enums from CompactProtocolReader + * + * @return True if field types mismatch or if the process of reading an + * enum fails + */ +template +struct parquet_field_enum_list : public parquet_field_list { + parquet_field_enum_list(int f, std::vector& v) : parquet_field_list(f, v, ST_FLD_I32) + { + auto const read_value = [this](uint32_t i, CompactProtocolReader* cpr) { + this->val[i] = static_cast(cpr->get_i32()); + return false; + }; + this->bind_read_func(read_value); + } +}; + +/** + * @brief Functor to read a structure from CompactProtocolReader + * + * @return True if field types mismatch or if the process of reading a + * struct fails + */ +template +class parquet_field_struct : public parquet_field { + T& val; + + public: + parquet_field_struct(int f, T& v) : parquet_field(f), val(v) {} + + inline bool operator()(CompactProtocolReader* cpr, int field_type) + { + return (field_type != ST_FLD_STRUCT || !(cpr->read(&val))); + } +}; + +/** + * @brief Functor to read optional structures in unions + * + * @return True if field types mismatch + */ +template +class parquet_field_union_struct : public parquet_field { + E& enum_val; + thrust::optional& val; // union structs are always wrapped in std::optional + + public: + parquet_field_union_struct(int f, E& ev, thrust::optional& v) + : parquet_field(f), enum_val(ev), val(v) + { + } + + inline bool operator()(CompactProtocolReader* cpr, int field_type) + { + T v; + bool const res = parquet_field_struct(field(), v).operator()(cpr, field_type); + if (!res) { + val = v; + enum_val = static_cast(field()); + } + return res; + } +}; + +/** + * @brief Functor to read empty structures in unions + * + * Added to avoid having to define read() functions for empty structs contained in unions. + * + * @return True if field types mismatch + */ +template +class parquet_field_union_enumerator : public parquet_field { + E& val; + + public: + parquet_field_union_enumerator(int f, E& v) : parquet_field(f), val(v) {} + + inline bool operator()(CompactProtocolReader* cpr, int field_type) + { + if (field_type != ST_FLD_STRUCT) { return true; } + cpr->skip_struct_field(field_type); + val = static_cast(field()); + return false; + } +}; + +/** + * @brief Functor to read a vector of structures from CompactProtocolReader + * + * @return True if field types mismatch or if the process of reading a + * struct fails + */ +template +struct parquet_field_struct_list : public parquet_field_list { + parquet_field_struct_list(int f, std::vector& v) : parquet_field_list(f, v, ST_FLD_STRUCT) + { + auto const read_value = [this](uint32_t i, CompactProtocolReader* cpr) { + if (not cpr->read(&this->val[i])) { return true; } + return false; + }; + this->bind_read_func(read_value); + } +}; + +// TODO(ets): replace current union handling (which mirrors thrift) to use std::optional fields +// in a struct +/** + * @brief Functor to read a union member from CompactProtocolReader + * + * @tparam is_empty True if tparam `T` type is empty type, else false. + * + * @return True if field types mismatch or if the process of reading a + * union member fails + */ +template +class ParquetFieldUnionFunctor : public parquet_field { + bool& is_set; + T& val; + + public: + ParquetFieldUnionFunctor(int f, bool& b, T& v) : parquet_field(f), is_set(b), val(v) {} + + inline bool operator()(CompactProtocolReader* cpr, int field_type) + { + if (field_type != ST_FLD_STRUCT) { + return true; + } else { + is_set = true; + return !cpr->read(&val); + } + } +}; + +template +class ParquetFieldUnionFunctor : public parquet_field { + bool& is_set; + T& val; + + public: + ParquetFieldUnionFunctor(int f, bool& b, T& v) : parquet_field(f), is_set(b), val(v) {} + + inline bool operator()(CompactProtocolReader* cpr, int field_type) + { + if (field_type != ST_FLD_STRUCT) { + return true; + } else { + is_set = true; + cpr->skip_struct_field(field_type); + return false; + } + } +}; + +template +ParquetFieldUnionFunctor> ParquetFieldUnion(int f, bool& b, T& v) +{ + return ParquetFieldUnionFunctor>(f, b, v); +} + +/** + * @brief Functor to read a binary from CompactProtocolReader + * + * @return True if field type mismatches or if size of binary exceeds bounds + * of the CompactProtocolReader + */ +class parquet_field_binary : public parquet_field { + std::vector& val; + + public: + parquet_field_binary(int f, std::vector& v) : parquet_field(f), val(v) {} + + inline bool operator()(CompactProtocolReader* cpr, int field_type) + { + if (field_type != ST_FLD_BINARY) { return true; } + auto const n = cpr->get_u32(); + if (n <= static_cast(cpr->m_end - cpr->m_cur)) { + val.resize(n); + val.assign(cpr->m_cur, cpr->m_cur + n); + cpr->m_cur += n; + return false; + } else { + return true; + } + } +}; + +/** + * @brief Functor to read a vector of binaries from CompactProtocolReader + * + * @return True if field types mismatch or if the process of reading a + * binary fails + */ +struct parquet_field_binary_list : public parquet_field_list> { + parquet_field_binary_list(int f, std::vector>& v) + : parquet_field_list(f, v, ST_FLD_BINARY) + { + auto const read_value = [this](uint32_t i, CompactProtocolReader* cpr) { + auto const l = cpr->get_u32(); + if (l <= static_cast(cpr->m_end - cpr->m_cur)) { + val[i].resize(l); + val[i].assign(cpr->m_cur, cpr->m_cur + l); + cpr->m_cur += l; + } else { + return true; + } + return false; + }; + bind_read_func(read_value); + } +}; + +/** + * @brief Functor to read a struct from CompactProtocolReader + * + * @return True if field type mismatches + */ +class parquet_field_struct_blob : public parquet_field { + std::vector& val; + + public: + parquet_field_struct_blob(int f, std::vector& v) : parquet_field(f), val(v) {} + inline bool operator()(CompactProtocolReader* cpr, int field_type) + { + if (field_type != ST_FLD_STRUCT) { return true; } + uint8_t const* const start = cpr->m_cur; + cpr->skip_struct_field(field_type); + if (cpr->m_cur > start) { val.assign(start, cpr->m_cur - 1); } + return false; + } +}; + +/** + * @brief functor to wrap functors for optional fields + */ +template +class parquet_field_optional : public parquet_field { + thrust::optional& val; + + public: + parquet_field_optional(int f, thrust::optional& v) : parquet_field(f), val(v) {} + + inline bool operator()(CompactProtocolReader* cpr, int field_type) + { + T v; + bool const res = FieldFunctor(field(), v).operator()(cpr, field_type); + if (!res) { val = v; } + return res; + } +}; /** * @brief Skips the number of bytes according to the specified struct type @@ -59,22 +506,21 @@ bool CompactProtocolReader::skip_struct_field(int t, int depth) case ST_FLD_BYTE: skip_bytes(1); break; case ST_FLD_DOUBLE: skip_bytes(8); break; case ST_FLD_BINARY: skip_bytes(get_u32()); break; - case ST_FLD_LIST: + case ST_FLD_LIST: [[fallthrough]]; case ST_FLD_SET: { - int c = getb(); - int n = c >> 4; - if (n == 0xf) n = get_i32(); - t = g_list2struct[c & 0xf]; - if (depth > 10) return false; - for (int32_t i = 0; i < n; i++) + auto const [t, n] = get_listh(); + if (depth > 10) { return false; } + for (uint32_t i = 0; i < n; i++) { skip_struct_field(t, depth + 1); + } } break; case ST_FLD_STRUCT: for (;;) { - int c = getb(); - t = c & 0xf; - if (!c) break; - if (depth > 10) return false; + int const c = getb(); + t = c & 0xf; + if (c == 0) { break; } // end of struct + if ((c & 0xf0) == 0) { get_i16(); } // field id is not a delta + if (depth > 10) { return false; } skip_struct_field(t, depth + 1); } break; @@ -125,11 +571,11 @@ inline bool function_builder(CompactProtocolReader* cpr, std::tuple int field = 0; while (true) { int const current_byte = cpr->getb(); - if (!current_byte) break; - int const field_delta = current_byte >> 4; - int const field_type = current_byte & 0xf; - field = field_delta ? field + field_delta : cpr->get_i16(); - bool exit_function = FunctionSwitchImpl::run(cpr, field_type, field, op); + if (!current_byte) { break; } + int const field_delta = current_byte >> 4; + int const field_type = current_byte & 0xf; + field = field_delta ? field + field_delta : cpr->get_i16(); + bool const exit_function = FunctionSwitchImpl::run(cpr, field_type, field, op); if (exit_function) { return false; } } return true; @@ -137,27 +583,30 @@ inline bool function_builder(CompactProtocolReader* cpr, std::tuple bool CompactProtocolReader::read(FileMetaData* f) { - auto op = std::make_tuple(ParquetFieldInt32(1, f->version), - ParquetFieldStructList(2, f->schema), - ParquetFieldInt64(3, f->num_rows), - ParquetFieldStructList(4, f->row_groups), - ParquetFieldStructList(5, f->key_value_metadata), - ParquetFieldString(6, f->created_by)); + using optional_list_column_order = + parquet_field_optional, parquet_field_struct_list>; + auto op = std::make_tuple(parquet_field_int32(1, f->version), + parquet_field_struct_list(2, f->schema), + parquet_field_int64(3, f->num_rows), + parquet_field_struct_list(4, f->row_groups), + parquet_field_struct_list(5, f->key_value_metadata), + parquet_field_string(6, f->created_by), + optional_list_column_order(7, f->column_orders)); return function_builder(this, op); } bool CompactProtocolReader::read(SchemaElement* s) { - auto op = std::make_tuple(ParquetFieldEnum(1, s->type), - ParquetFieldInt32(2, s->type_length), - ParquetFieldEnum(3, s->repetition_type), - ParquetFieldString(4, s->name), - ParquetFieldInt32(5, s->num_children), - ParquetFieldEnum(6, s->converted_type), - ParquetFieldInt32(7, s->decimal_scale), - ParquetFieldInt32(8, s->decimal_precision), - ParquetFieldOptionalInt32(9, s->field_id), - ParquetFieldStruct(10, s->logical_type)); + auto op = std::make_tuple(parquet_field_enum(1, s->type), + parquet_field_int32(2, s->type_length), + parquet_field_enum(3, s->repetition_type), + parquet_field_string(4, s->name), + parquet_field_int32(5, s->num_children), + parquet_field_enum(6, s->converted_type), + parquet_field_int32(7, s->decimal_scale), + parquet_field_int32(8, s->decimal_precision), + parquet_field_optional(9, s->field_id), + parquet_field_struct(10, s->logical_type)); return function_builder(this, op); } @@ -181,21 +630,21 @@ bool CompactProtocolReader::read(LogicalType* l) bool CompactProtocolReader::read(DecimalType* d) { - auto op = std::make_tuple(ParquetFieldInt32(1, d->scale), ParquetFieldInt32(2, d->precision)); + auto op = std::make_tuple(parquet_field_int32(1, d->scale), parquet_field_int32(2, d->precision)); return function_builder(this, op); } bool CompactProtocolReader::read(TimeType* t) { auto op = - std::make_tuple(ParquetFieldBool(1, t->isAdjustedToUTC), ParquetFieldStruct(2, t->unit)); + std::make_tuple(parquet_field_bool(1, t->isAdjustedToUTC), parquet_field_struct(2, t->unit)); return function_builder(this, op); } bool CompactProtocolReader::read(TimestampType* t) { auto op = - std::make_tuple(ParquetFieldBool(1, t->isAdjustedToUTC), ParquetFieldStruct(2, t->unit)); + std::make_tuple(parquet_field_bool(1, t->isAdjustedToUTC), parquet_field_struct(2, t->unit)); return function_builder(this, op); } @@ -209,123 +658,129 @@ bool CompactProtocolReader::read(TimeUnit* u) bool CompactProtocolReader::read(IntType* i) { - auto op = std::make_tuple(ParquetFieldInt8(1, i->bitWidth), ParquetFieldBool(2, i->isSigned)); + auto op = std::make_tuple(parquet_field_int8(1, i->bitWidth), parquet_field_bool(2, i->isSigned)); return function_builder(this, op); } bool CompactProtocolReader::read(RowGroup* r) { - auto op = std::make_tuple(ParquetFieldStructList(1, r->columns), - ParquetFieldInt64(2, r->total_byte_size), - ParquetFieldInt64(3, r->num_rows)); + auto op = std::make_tuple(parquet_field_struct_list(1, r->columns), + parquet_field_int64(2, r->total_byte_size), + parquet_field_int64(3, r->num_rows)); return function_builder(this, op); } bool CompactProtocolReader::read(ColumnChunk* c) { - auto op = std::make_tuple(ParquetFieldString(1, c->file_path), - ParquetFieldInt64(2, c->file_offset), - ParquetFieldStruct(3, c->meta_data), - ParquetFieldInt64(4, c->offset_index_offset), - ParquetFieldInt32(5, c->offset_index_length), - ParquetFieldInt64(6, c->column_index_offset), - ParquetFieldInt32(7, c->column_index_length)); + auto op = std::make_tuple(parquet_field_string(1, c->file_path), + parquet_field_int64(2, c->file_offset), + parquet_field_struct(3, c->meta_data), + parquet_field_int64(4, c->offset_index_offset), + parquet_field_int32(5, c->offset_index_length), + parquet_field_int64(6, c->column_index_offset), + parquet_field_int32(7, c->column_index_length)); return function_builder(this, op); } bool CompactProtocolReader::read(ColumnChunkMetaData* c) { - auto op = std::make_tuple(ParquetFieldEnum(1, c->type), - ParquetFieldEnumList(2, c->encodings), - ParquetFieldStringList(3, c->path_in_schema), - ParquetFieldEnum(4, c->codec), - ParquetFieldInt64(5, c->num_values), - ParquetFieldInt64(6, c->total_uncompressed_size), - ParquetFieldInt64(7, c->total_compressed_size), - ParquetFieldInt64(9, c->data_page_offset), - ParquetFieldInt64(10, c->index_page_offset), - ParquetFieldInt64(11, c->dictionary_page_offset), - ParquetFieldStruct(12, c->statistics)); + auto op = std::make_tuple(parquet_field_enum(1, c->type), + parquet_field_enum_list(2, c->encodings), + parquet_field_string_list(3, c->path_in_schema), + parquet_field_enum(4, c->codec), + parquet_field_int64(5, c->num_values), + parquet_field_int64(6, c->total_uncompressed_size), + parquet_field_int64(7, c->total_compressed_size), + parquet_field_int64(9, c->data_page_offset), + parquet_field_int64(10, c->index_page_offset), + parquet_field_int64(11, c->dictionary_page_offset), + parquet_field_struct(12, c->statistics)); return function_builder(this, op); } bool CompactProtocolReader::read(PageHeader* p) { - auto op = std::make_tuple(ParquetFieldEnum(1, p->type), - ParquetFieldInt32(2, p->uncompressed_page_size), - ParquetFieldInt32(3, p->compressed_page_size), - ParquetFieldStruct(5, p->data_page_header), - ParquetFieldStruct(7, p->dictionary_page_header), - ParquetFieldStruct(8, p->data_page_header_v2)); + auto op = std::make_tuple(parquet_field_enum(1, p->type), + parquet_field_int32(2, p->uncompressed_page_size), + parquet_field_int32(3, p->compressed_page_size), + parquet_field_struct(5, p->data_page_header), + parquet_field_struct(7, p->dictionary_page_header), + parquet_field_struct(8, p->data_page_header_v2)); return function_builder(this, op); } bool CompactProtocolReader::read(DataPageHeader* d) { - auto op = std::make_tuple(ParquetFieldInt32(1, d->num_values), - ParquetFieldEnum(2, d->encoding), - ParquetFieldEnum(3, d->definition_level_encoding), - ParquetFieldEnum(4, d->repetition_level_encoding)); + auto op = std::make_tuple(parquet_field_int32(1, d->num_values), + parquet_field_enum(2, d->encoding), + parquet_field_enum(3, d->definition_level_encoding), + parquet_field_enum(4, d->repetition_level_encoding)); return function_builder(this, op); } bool CompactProtocolReader::read(DictionaryPageHeader* d) { - auto op = std::make_tuple(ParquetFieldInt32(1, d->num_values), - ParquetFieldEnum(2, d->encoding)); + auto op = std::make_tuple(parquet_field_int32(1, d->num_values), + parquet_field_enum(2, d->encoding)); return function_builder(this, op); } bool CompactProtocolReader::read(DataPageHeaderV2* d) { - auto op = std::make_tuple(ParquetFieldInt32(1, d->num_values), - ParquetFieldInt32(2, d->num_nulls), - ParquetFieldInt32(3, d->num_rows), - ParquetFieldEnum(4, d->encoding), - ParquetFieldInt32(5, d->definition_levels_byte_length), - ParquetFieldInt32(6, d->repetition_levels_byte_length), - ParquetFieldBool(7, d->is_compressed)); + auto op = std::make_tuple(parquet_field_int32(1, d->num_values), + parquet_field_int32(2, d->num_nulls), + parquet_field_int32(3, d->num_rows), + parquet_field_enum(4, d->encoding), + parquet_field_int32(5, d->definition_levels_byte_length), + parquet_field_int32(6, d->repetition_levels_byte_length), + parquet_field_bool(7, d->is_compressed)); return function_builder(this, op); } bool CompactProtocolReader::read(KeyValue* k) { - auto op = std::make_tuple(ParquetFieldString(1, k->key), ParquetFieldString(2, k->value)); + auto op = std::make_tuple(parquet_field_string(1, k->key), parquet_field_string(2, k->value)); return function_builder(this, op); } bool CompactProtocolReader::read(PageLocation* p) { - auto op = std::make_tuple(ParquetFieldInt64(1, p->offset), - ParquetFieldInt32(2, p->compressed_page_size), - ParquetFieldInt64(3, p->first_row_index)); + auto op = std::make_tuple(parquet_field_int64(1, p->offset), + parquet_field_int32(2, p->compressed_page_size), + parquet_field_int64(3, p->first_row_index)); return function_builder(this, op); } bool CompactProtocolReader::read(OffsetIndex* o) { - auto op = std::make_tuple(ParquetFieldStructList(1, o->page_locations)); + auto op = std::make_tuple(parquet_field_struct_list(1, o->page_locations)); return function_builder(this, op); } bool CompactProtocolReader::read(ColumnIndex* c) { - auto op = std::make_tuple(ParquetFieldBoolList(1, c->null_pages), - ParquetFieldBinaryList(2, c->min_values), - ParquetFieldBinaryList(3, c->max_values), - ParquetFieldEnum(4, c->boundary_order), - ParquetFieldInt64List(5, c->null_counts)); + auto op = std::make_tuple(parquet_field_bool_list(1, c->null_pages), + parquet_field_binary_list(2, c->min_values), + parquet_field_binary_list(3, c->max_values), + parquet_field_enum(4, c->boundary_order), + parquet_field_int64_list(5, c->null_counts)); return function_builder(this, op); } bool CompactProtocolReader::read(Statistics* s) { - auto op = std::make_tuple(ParquetFieldBinary(1, s->max), - ParquetFieldBinary(2, s->min), - ParquetFieldInt64(3, s->null_count), - ParquetFieldInt64(4, s->distinct_count), - ParquetFieldBinary(5, s->max_value), - ParquetFieldBinary(6, s->min_value)); + auto op = std::make_tuple(parquet_field_binary(1, s->max), + parquet_field_binary(2, s->min), + parquet_field_int64(3, s->null_count), + parquet_field_int64(4, s->distinct_count), + parquet_field_binary(5, s->max_value), + parquet_field_binary(6, s->min_value)); + return function_builder(this, op); +} + +bool CompactProtocolReader::read(ColumnOrder* c) +{ + auto op = std::make_tuple(parquet_field_union_enumerator(1, c->type)); return function_builder(this, op); } @@ -338,7 +793,7 @@ bool CompactProtocolReader::read(Statistics* s) */ bool CompactProtocolReader::InitSchema(FileMetaData* md) { - if (static_cast(WalkSchema(md)) != md->schema.size()) return false; + if (static_cast(WalkSchema(md)) != md->schema.size()) { return false; } /* Inside FileMetaData, there is a std::vector of RowGroups and each RowGroup contains a * a std::vector of ColumnChunks. Each ColumnChunk has a member ColumnMetaData, which contains @@ -353,13 +808,15 @@ bool CompactProtocolReader::InitSchema(FileMetaData* md) for (auto const& path : column.meta_data.path_in_schema) { auto const it = [&] { // find_if starting at (current_schema_index + 1) and then wrapping - auto schema = [&](auto const& e) { return e.parent_idx == parent && e.name == path; }; - auto mid = md->schema.cbegin() + current_schema_index + 1; - auto it = std::find_if(mid, md->schema.cend(), schema); - if (it != md->schema.cend()) return it; + auto const schema = [&](auto const& e) { + return e.parent_idx == parent && e.name == path; + }; + auto const mid = md->schema.cbegin() + current_schema_index + 1; + auto const it = std::find_if(mid, md->schema.cend(), schema); + if (it != md->schema.cend()) { return it; } return std::find_if(md->schema.cbegin(), mid, schema); }(); - if (it == md->schema.cend()) return false; + if (it == md->schema.cend()) { return false; } current_schema_index = std::distance(md->schema.cbegin(), it); column.schema_idx = current_schema_index; parent = current_schema_index; @@ -401,9 +858,9 @@ int CompactProtocolReader::WalkSchema( if (e->num_children > 0) { for (int i = 0; i < e->num_children; i++) { e->children_idx.push_back(idx); - int idx_old = idx; - idx = WalkSchema(md, idx, parent_idx, max_def_level, max_rep_level); - if (idx <= idx_old) break; // Error + int const idx_old = idx; + idx = WalkSchema(md, idx, parent_idx, max_def_level, max_rep_level); + if (idx <= idx_old) { break; } // Error } } return idx; diff --git a/cpp/src/io/parquet/compact_protocol_reader.hpp b/cpp/src/io/parquet/compact_protocol_reader.hpp index 62ccacaac37..619815db503 100644 --- a/cpp/src/io/parquet/compact_protocol_reader.hpp +++ b/cpp/src/io/parquet/compact_protocol_reader.hpp @@ -22,6 +22,7 @@ #include #include #include +#include #include namespace cudf { @@ -40,9 +41,6 @@ namespace parquet { * compression codecs are supported yet. */ class CompactProtocolReader { - protected: - static const uint8_t g_list2struct[16]; - public: explicit CompactProtocolReader(uint8_t const* base = nullptr, size_t len = 0) { init(base, len); } void init(uint8_t const* base, size_t len) @@ -57,45 +55,46 @@ class CompactProtocolReader { bytecnt = std::min(bytecnt, (size_t)(m_end - m_cur)); m_cur += bytecnt; } - uint32_t get_u32() noexcept + + // returns a varint encoded integer + template + T get_varint() noexcept { - uint32_t v = 0; + T v = 0; for (uint32_t l = 0;; l += 7) { - uint32_t c = getb(); + T c = getb(); v |= (c & 0x7f) << l; - if (c < 0x80) break; + if (c < 0x80) { break; } } return v; } - uint64_t get_u64() noexcept - { - uint64_t v = 0; - for (uint64_t l = 0;; l += 7) { - uint64_t c = getb(); - v |= (c & 0x7f) << l; - if (c < 0x80) break; - } - return v; - } - int32_t get_i16() noexcept { return get_i32(); } - int32_t get_i32() noexcept - { - uint32_t u = get_u32(); - return (int32_t)((u >> 1u) ^ -(int32_t)(u & 1)); - } - int64_t get_i64() noexcept + + // returns a zigzag encoded signed integer + template + T get_zigzag() noexcept { - uint64_t u = get_u64(); - return (int64_t)((u >> 1u) ^ -(int64_t)(u & 1)); + using U = std::make_unsigned_t; + U const u = get_varint(); + return static_cast((u >> 1u) ^ -static_cast(u & 1)); } - int32_t get_listh(uint8_t* el_type) noexcept + + // thrift spec says to use zigzag i32 for i16 types + int32_t get_i16() noexcept { return get_zigzag(); } + int32_t get_i32() noexcept { return get_zigzag(); } + int64_t get_i64() noexcept { return get_zigzag(); } + + uint32_t get_u32() noexcept { return get_varint(); } + uint64_t get_u64() noexcept { return get_varint(); } + + [[nodiscard]] std::pair get_listh() noexcept { - uint32_t c = getb(); - int32_t sz = c >> 4; - *el_type = c & 0xf; - if (sz == 0xf) sz = get_u32(); - return sz; + uint32_t const c = getb(); + uint32_t sz = c >> 4; + uint8_t t = c & 0xf; + if (sz == 0xf) { sz = get_u32(); } + return {t, sz}; } + bool skip_struct_field(int t, int depth = 0); public: @@ -120,6 +119,7 @@ class CompactProtocolReader { bool read(OffsetIndex* o); bool read(ColumnIndex* c); bool read(Statistics* s); + bool read(ColumnOrder* c); public: static int NumRequiredBits(uint32_t max_level) noexcept @@ -140,523 +140,11 @@ class CompactProtocolReader { uint8_t const* m_cur = nullptr; uint8_t const* m_end = nullptr; - friend class ParquetFieldBool; - friend class ParquetFieldBoolList; - friend class ParquetFieldInt8; - friend class ParquetFieldInt32; - friend class ParquetFieldOptionalInt32; - friend class ParquetFieldInt64; - friend class ParquetFieldInt64List; - template - friend class ParquetFieldStructListFunctor; - friend class ParquetFieldString; - template - friend class ParquetFieldStructFunctor; - template - friend class ParquetFieldUnionFunctor; - template - friend class ParquetFieldEnum; - template - friend class ParquetFieldEnumListFunctor; - friend class ParquetFieldStringList; - friend class ParquetFieldBinary; - friend class ParquetFieldBinaryList; - friend class ParquetFieldStructBlob; -}; - -/** - * @brief Functor to set value to bool read from CompactProtocolReader - * - * @return True if field type is not bool - */ -class ParquetFieldBool { - int field_val; - bool& val; - - public: - ParquetFieldBool(int f, bool& v) : field_val(f), val(v) {} - - inline bool operator()(CompactProtocolReader* cpr, int field_type) - { - return (field_type != ST_FLD_TRUE && field_type != ST_FLD_FALSE) || - !(val = (field_type == ST_FLD_TRUE), true); - } - - int field() { return field_val; } -}; - -/** - * @brief Functor to read a vector of booleans from CompactProtocolReader - * - * @return True if field types mismatch or if the process of reading a - * bool fails - */ -class ParquetFieldBoolList { - int field_val; - std::vector& val; - - public: - ParquetFieldBoolList(int f, std::vector& v) : field_val(f), val(v) {} - inline bool operator()(CompactProtocolReader* cpr, int field_type) - { - if (field_type != ST_FLD_LIST) return true; - uint8_t t; - int32_t n = cpr->get_listh(&t); - if (t != ST_FLD_TRUE) return true; - val.resize(n); - for (int32_t i = 0; i < n; i++) { - unsigned int current_byte = cpr->getb(); - if (current_byte != ST_FLD_TRUE && current_byte != ST_FLD_FALSE) return true; - val[i] = current_byte == ST_FLD_TRUE; - } - return false; - } - - int field() { return field_val; } -}; - -/** - * @brief Functor to set value to 8 bit integer read from CompactProtocolReader - * - * @return True if field type is not int8 - */ -class ParquetFieldInt8 { - int field_val; - int8_t& val; - - public: - ParquetFieldInt8(int f, int8_t& v) : field_val(f), val(v) {} - - inline bool operator()(CompactProtocolReader* cpr, int field_type) - { - val = cpr->getb(); - return (field_type != ST_FLD_BYTE); - } - - int field() { return field_val; } -}; - -/** - * @brief Functor to set value to 32 bit integer read from CompactProtocolReader - * - * @return True if field type is not int32 - */ -class ParquetFieldInt32 { - int field_val; - int32_t& val; - - public: - ParquetFieldInt32(int f, int32_t& v) : field_val(f), val(v) {} - - inline bool operator()(CompactProtocolReader* cpr, int field_type) - { - val = cpr->get_i32(); - return (field_type != ST_FLD_I32); - } - - int field() { return field_val; } -}; - -/** - * @brief Functor to set value to optional 32 bit integer read from CompactProtocolReader - * - * @return True if field type is not int32 - */ -class ParquetFieldOptionalInt32 { - int field_val; - std::optional& val; - - public: - ParquetFieldOptionalInt32(int f, std::optional& v) : field_val(f), val(v) {} - - inline bool operator()(CompactProtocolReader* cpr, int field_type) - { - val = cpr->get_i32(); - return (field_type != ST_FLD_I32); - } - - int field() { return field_val; } -}; - -/** - * @brief Functor to set value to 64 bit integer read from CompactProtocolReader - * - * @return True if field type is not int32 or int64 - */ -class ParquetFieldInt64 { - int field_val; - int64_t& val; - - public: - ParquetFieldInt64(int f, int64_t& v) : field_val(f), val(v) {} - - inline bool operator()(CompactProtocolReader* cpr, int field_type) - { - val = cpr->get_i64(); - return (field_type < ST_FLD_I16 || field_type > ST_FLD_I64); - } - - int field() { return field_val; } -}; - -/** - * @brief Functor to read a vector of 64-bit integers from CompactProtocolReader - * - * @return True if field types mismatch or if the process of reading an - * int64 fails - */ -class ParquetFieldInt64List { - int field_val; - std::vector& val; - - public: - ParquetFieldInt64List(int f, std::vector& v) : field_val(f), val(v) {} - inline bool operator()(CompactProtocolReader* cpr, int field_type) - { - if (field_type != ST_FLD_LIST) return true; - uint8_t t; - int32_t n = cpr->get_listh(&t); - if (t != ST_FLD_I64) return true; - val.resize(n); - for (int32_t i = 0; i < n; i++) { - val[i] = cpr->get_i64(); - } - return false; - } - - int field() { return field_val; } -}; - -/** - * @brief Functor to read a vector of structures from CompactProtocolReader - * - * @return True if field types mismatch or if the process of reading a - * struct fails - */ -template -class ParquetFieldStructListFunctor { - int field_val; - std::vector& val; - - public: - ParquetFieldStructListFunctor(int f, std::vector& v) : field_val(f), val(v) {} - - inline bool operator()(CompactProtocolReader* cpr, int field_type) - { - if (field_type != ST_FLD_LIST) return true; - - int current_byte = cpr->getb(); - if ((current_byte & 0xf) != ST_FLD_STRUCT) return true; - int n = current_byte >> 4; - if (n == 0xf) n = cpr->get_u32(); - val.resize(n); - for (int32_t i = 0; i < n; i++) { - if (!(cpr->read(&val[i]))) { return true; } - } - - return false; - } - - int field() { return field_val; } -}; - -template -ParquetFieldStructListFunctor ParquetFieldStructList(int f, std::vector& v) -{ - return ParquetFieldStructListFunctor(f, v); -} - -/** - * @brief Functor to read a string from CompactProtocolReader - * - * @return True if field type mismatches or if size of string exceeds bounds - * of the CompactProtocolReader - */ -class ParquetFieldString { - int field_val; - std::string& val; - - public: - ParquetFieldString(int f, std::string& v) : field_val(f), val(v) {} - - inline bool operator()(CompactProtocolReader* cpr, int field_type) - { - if (field_type != ST_FLD_BINARY) return true; - uint32_t n = cpr->get_u32(); - if (n < (size_t)(cpr->m_end - cpr->m_cur)) { - val.assign((char const*)cpr->m_cur, n); - cpr->m_cur += n; - return false; - } else { - return true; - } - } - - int field() { return field_val; } -}; - -/** - * @brief Functor to read a structure from CompactProtocolReader - * - * @return True if field types mismatch or if the process of reading a - * struct fails - */ -template -class ParquetFieldStructFunctor { - int field_val; - T& val; - - public: - ParquetFieldStructFunctor(int f, T& v) : field_val(f), val(v) {} - - inline bool operator()(CompactProtocolReader* cpr, int field_type) - { - return (field_type != ST_FLD_STRUCT || !(cpr->read(&val))); - } - - int field() { return field_val; } -}; - -template -ParquetFieldStructFunctor ParquetFieldStruct(int f, T& v) -{ - return ParquetFieldStructFunctor(f, v); -} - -/** - * @brief Functor to read a union member from CompactProtocolReader - * - * @tparam is_empty True if tparam `T` type is empty type, else false. - * - * @return True if field types mismatch or if the process of reading a - * union member fails - */ -template -class ParquetFieldUnionFunctor { - int field_val; - bool& is_set; - T& val; - - public: - ParquetFieldUnionFunctor(int f, bool& b, T& v) : field_val(f), is_set(b), val(v) {} - - inline bool operator()(CompactProtocolReader* cpr, int field_type) - { - if (field_type != ST_FLD_STRUCT) { - return true; - } else { - is_set = true; - return !cpr->read(&val); - } - } - - int field() { return field_val; } -}; - -template -struct ParquetFieldUnionFunctor { - int field_val; - bool& is_set; - T& val; - - public: - ParquetFieldUnionFunctor(int f, bool& b, T& v) : field_val(f), is_set(b), val(v) {} - - inline bool operator()(CompactProtocolReader* cpr, int field_type) - { - if (field_type != ST_FLD_STRUCT) { - return true; - } else { - is_set = true; - cpr->skip_struct_field(field_type); - return false; - } - } - - int field() { return field_val; } -}; - -template -ParquetFieldUnionFunctor> ParquetFieldUnion(int f, bool& b, T& v) -{ - return ParquetFieldUnionFunctor>(f, b, v); -} - -/** - * @brief Functor to set value to enum read from CompactProtocolReader - * - * @return True if field type is not int32 - */ -template -class ParquetFieldEnum { - int field_val; - Enum& val; - - public: - ParquetFieldEnum(int f, Enum& v) : field_val(f), val(v) {} - inline bool operator()(CompactProtocolReader* cpr, int field_type) - { - val = static_cast(cpr->get_i32()); - return (field_type != ST_FLD_I32); - } - - int field() { return field_val; } -}; - -/** - * @brief Functor to read a vector of enums from CompactProtocolReader - * - * @return True if field types mismatch or if the process of reading an - * enum fails - */ -template -class ParquetFieldEnumListFunctor { - int field_val; - std::vector& val; - - public: - ParquetFieldEnumListFunctor(int f, std::vector& v) : field_val(f), val(v) {} - inline bool operator()(CompactProtocolReader* cpr, int field_type) - { - if (field_type != ST_FLD_LIST) return true; - int current_byte = cpr->getb(); - if ((current_byte & 0xf) != ST_FLD_I32) return true; - int n = current_byte >> 4; - if (n == 0xf) n = cpr->get_u32(); - val.resize(n); - for (int32_t i = 0; i < n; i++) { - val[i] = static_cast(cpr->get_i32()); - } - return false; - } - - int field() { return field_val; } -}; - -template -ParquetFieldEnumListFunctor ParquetFieldEnumList(int field, std::vector& v) -{ - return ParquetFieldEnumListFunctor(field, v); -} - -/** - * @brief Functor to read a vector of strings from CompactProtocolReader - * - * @return True if field types mismatch or if the process of reading a - * string fails - */ -class ParquetFieldStringList { - int field_val; - std::vector& val; - - public: - ParquetFieldStringList(int f, std::vector& v) : field_val(f), val(v) {} - inline bool operator()(CompactProtocolReader* cpr, int field_type) - { - if (field_type != ST_FLD_LIST) return true; - uint8_t t; - int32_t n = cpr->get_listh(&t); - if (t != ST_FLD_BINARY) return true; - val.resize(n); - for (int32_t i = 0; i < n; i++) { - uint32_t l = cpr->get_u32(); - if (l < (size_t)(cpr->m_end - cpr->m_cur)) { - val[i].assign((char const*)cpr->m_cur, l); - cpr->m_cur += l; - } else - return true; - } - return false; - } - - int field() { return field_val; } -}; - -/** - * @brief Functor to read a binary from CompactProtocolReader - * - * @return True if field type mismatches or if size of binary exceeds bounds - * of the CompactProtocolReader - */ -class ParquetFieldBinary { - int field_val; - std::vector& val; - - public: - ParquetFieldBinary(int f, std::vector& v) : field_val(f), val(v) {} - - inline bool operator()(CompactProtocolReader* cpr, int field_type) - { - if (field_type != ST_FLD_BINARY) return true; - uint32_t n = cpr->get_u32(); - if (n <= (size_t)(cpr->m_end - cpr->m_cur)) { - val.resize(n); - val.assign(cpr->m_cur, cpr->m_cur + n); - cpr->m_cur += n; - return false; - } else { - return true; - } - } - - int field() { return field_val; } -}; - -/** - * @brief Functor to read a vector of binaries from CompactProtocolReader - * - * @return True if field types mismatch or if the process of reading a - * binary fails - */ -class ParquetFieldBinaryList { - int field_val; - std::vector>& val; - - public: - ParquetFieldBinaryList(int f, std::vector>& v) : field_val(f), val(v) {} - inline bool operator()(CompactProtocolReader* cpr, int field_type) - { - if (field_type != ST_FLD_LIST) return true; - uint8_t t; - int32_t n = cpr->get_listh(&t); - if (t != ST_FLD_BINARY) return true; - val.resize(n); - for (int32_t i = 0; i < n; i++) { - uint32_t l = cpr->get_u32(); - if (l <= (size_t)(cpr->m_end - cpr->m_cur)) { - val[i].resize(l); - val[i].assign(cpr->m_cur, cpr->m_cur + l); - cpr->m_cur += l; - } else - return true; - } - return false; - } - - int field() { return field_val; } -}; - -/** - * @brief Functor to read a struct from CompactProtocolReader - * - * @return True if field type mismatches - */ -class ParquetFieldStructBlob { - int field_val; - std::vector& val; - - public: - ParquetFieldStructBlob(int f, std::vector& v) : field_val(f), val(v) {} - inline bool operator()(CompactProtocolReader* cpr, int field_type) - { - if (field_type != ST_FLD_STRUCT) return true; - uint8_t const* start = cpr->m_cur; - cpr->skip_struct_field(field_type); - if (cpr->m_cur > start) { val.assign(start, cpr->m_cur - 1); } - return false; - } - - int field() { return field_val; } + friend class parquet_field_string; + friend class parquet_field_string_list; + friend class parquet_field_binary; + friend class parquet_field_binary_list; + friend class parquet_field_struct_blob; }; } // namespace parquet diff --git a/cpp/src/io/parquet/compact_protocol_writer.cpp b/cpp/src/io/parquet/compact_protocol_writer.cpp index b2c0c97c52d..60bc8984d81 100644 --- a/cpp/src/io/parquet/compact_protocol_writer.cpp +++ b/cpp/src/io/parquet/compact_protocol_writer.cpp @@ -33,18 +33,7 @@ size_t CompactProtocolWriter::write(FileMetaData const& f) c.field_struct_list(4, f.row_groups); if (not f.key_value_metadata.empty()) { c.field_struct_list(5, f.key_value_metadata); } if (not f.created_by.empty()) { c.field_string(6, f.created_by); } - if (f.column_order_listsize != 0) { - // Dummy list of struct containing an empty field1 struct - c.put_field_header(7, c.current_field(), ST_FLD_LIST); - c.put_byte((uint8_t)((std::min(f.column_order_listsize, 0xfu) << 4) | ST_FLD_STRUCT)); - if (f.column_order_listsize >= 0xf) c.put_uint(f.column_order_listsize); - for (uint32_t i = 0; i < f.column_order_listsize; i++) { - c.put_field_header(1, 0, ST_FLD_STRUCT); - c.put_byte(0); // ColumnOrder.field1 struct end - c.put_byte(0); // ColumnOrder struct end - } - c.set_current_field(7); - } + if (f.column_orders.has_value()) { c.field_struct_list(7, f.column_orders.value()); } return c.value(); } @@ -233,6 +222,16 @@ size_t CompactProtocolWriter::write(OffsetIndex const& s) return c.value(); } +size_t CompactProtocolWriter::write(ColumnOrder const& co) +{ + CompactProtocolFieldWriter c(*this); + switch (co) { + case ColumnOrder::TYPE_ORDER: c.field_empty_struct(1); break; + default: break; + } + return c.value(); +} + void CompactProtocolFieldWriter::put_byte(uint8_t v) { writer.m_buf.push_back(v); } void CompactProtocolFieldWriter::put_byte(uint8_t const* raw, uint32_t len) @@ -320,6 +319,13 @@ inline void CompactProtocolFieldWriter::field_struct(int field, T const& val) current_field_value = field; } +inline void CompactProtocolFieldWriter::field_empty_struct(int field) +{ + put_field_header(field, current_field_value, ST_FLD_STRUCT); + put_byte(0); // add a stop field + current_field_value = field; +} + template inline void CompactProtocolFieldWriter::field_struct_list(int field, std::vector const& val) { diff --git a/cpp/src/io/parquet/compact_protocol_writer.hpp b/cpp/src/io/parquet/compact_protocol_writer.hpp index 8d7b0961934..26d66527aa5 100644 --- a/cpp/src/io/parquet/compact_protocol_writer.hpp +++ b/cpp/src/io/parquet/compact_protocol_writer.hpp @@ -53,6 +53,7 @@ class CompactProtocolWriter { size_t write(Statistics const&); size_t write(PageLocation const&); size_t write(OffsetIndex const&); + size_t write(ColumnOrder const&); protected: std::vector& m_buf; @@ -94,6 +95,8 @@ class CompactProtocolFieldWriter { template inline void field_struct(int field, T const& val); + inline void field_empty_struct(int field); + template inline void field_struct_list(int field, std::vector const& val); diff --git a/cpp/src/io/parquet/parquet.hpp b/cpp/src/io/parquet/parquet.hpp index f7318bb9935..c2affc774c2 100644 --- a/cpp/src/io/parquet/parquet.hpp +++ b/cpp/src/io/parquet/parquet.hpp @@ -18,6 +18,8 @@ #include "parquet_common.hpp" +#include + #include #include #include @@ -118,6 +120,16 @@ struct LogicalType { BsonType BSON; }; +/** + * Union to specify the order used for the min_value and max_value fields for a column. + */ +struct ColumnOrder { + enum Type { UNDEFINED, TYPE_ORDER }; + Type type; + + operator Type() const { return type; } +}; + /** * @brief Struct for describing an element/field in the Parquet format schema * @@ -135,7 +147,7 @@ struct SchemaElement { int32_t num_children = 0; int32_t decimal_scale = 0; int32_t decimal_precision = 0; - std::optional field_id = std::nullopt; + thrust::optional field_id = thrust::nullopt; bool output_as_byte_array = false; // The following fields are filled in later during schema initialization @@ -284,8 +296,8 @@ struct FileMetaData { int64_t num_rows = 0; std::vector row_groups; std::vector key_value_metadata; - std::string created_by = ""; - uint32_t column_order_listsize = 0; + std::string created_by = ""; + thrust::optional> column_orders; }; /** diff --git a/cpp/src/io/parquet/parquet_common.hpp b/cpp/src/io/parquet/parquet_common.hpp index 5f8f1617cb9..5a1716bb547 100644 --- a/cpp/src/io/parquet/parquet_common.hpp +++ b/cpp/src/io/parquet/parquet_common.hpp @@ -141,7 +141,7 @@ enum BoundaryOrder { /** * @brief Thrift compact protocol struct field types */ -enum { +enum FieldType { ST_FLD_TRUE = 1, ST_FLD_FALSE = 2, ST_FLD_BYTE = 3, diff --git a/cpp/src/io/parquet/writer_impl.cu b/cpp/src/io/parquet/writer_impl.cu index d2976a3f5d9..a124f352ee4 100644 --- a/cpp/src/io/parquet/writer_impl.cu +++ b/cpp/src/io/parquet/writer_impl.cu @@ -74,8 +74,11 @@ struct aggregate_writer_metadata { for (size_t i = 0; i < partitions.size(); ++i) { this->files[i].num_rows = partitions[i].num_rows; } - this->column_order_listsize = - (stats_granularity != statistics_freq::STATISTICS_NONE) ? num_columns : 0; + + if (stats_granularity != statistics_freq::STATISTICS_NONE) { + ColumnOrder default_order = {ColumnOrder::TYPE_ORDER}; + this->column_orders = std::vector(num_columns, default_order); + } for (size_t p = 0; p < kv_md.size(); ++p) { std::transform(kv_md[p].begin(), @@ -102,13 +105,13 @@ struct aggregate_writer_metadata { { CUDF_EXPECTS(part < files.size(), "Invalid part index queried"); FileMetaData meta{}; - meta.version = this->version; - meta.schema = this->schema; - meta.num_rows = this->files[part].num_rows; - meta.row_groups = this->files[part].row_groups; - meta.key_value_metadata = this->files[part].key_value_metadata; - meta.created_by = this->created_by; - meta.column_order_listsize = this->column_order_listsize; + meta.version = this->version; + meta.schema = this->schema; + meta.num_rows = this->files[part].num_rows; + meta.row_groups = this->files[part].row_groups; + meta.key_value_metadata = this->files[part].key_value_metadata; + meta.created_by = this->created_by; + meta.column_orders = this->column_orders; return meta; } @@ -170,8 +173,8 @@ struct aggregate_writer_metadata { std::vector> column_indexes; }; std::vector files; - std::string created_by = ""; - uint32_t column_order_listsize = 0; + std::string created_by = ""; + thrust::optional> column_orders = thrust::nullopt; }; namespace { @@ -2373,20 +2376,7 @@ std::unique_ptr> writer::merge_row_group_metadata( md.num_rows += tmp.num_rows; } } - // Reader doesn't currently populate column_order, so infer it here - if (not md.row_groups.empty()) { - auto const is_valid_stats = [](auto const& stats) { - return not stats.max.empty() || not stats.min.empty() || stats.null_count != -1 || - stats.distinct_count != -1 || not stats.max_value.empty() || - not stats.min_value.empty(); - }; - uint32_t num_columns = static_cast(md.row_groups[0].columns.size()); - md.column_order_listsize = - (num_columns > 0 && is_valid_stats(md.row_groups[0].columns[0].meta_data.statistics)) - ? num_columns - : 0; - } // Thrift-encode the resulting output file_header_s fhdr; file_ender_s fendr; From e87d2fc1df6105d802b300bad19a9937f8155613 Mon Sep 17 00:00:00 2001 From: Lawrence Mitchell Date: Wed, 20 Sep 2023 21:18:31 +0100 Subject: [PATCH 085/150] Reduce memory usage of as_categorical_column (#14138) The main culprit is in the way the codes returned from _label_encoding were being ordered. We were generating an int64 column for the order, gathering through the left gather map, and then argsorting, before using that ordering as a gather map for the codes. We note that gather(y, with=argsort(x)) is equivalent to sort_by_key(y, with=x) so use that instead (avoiding an unnecessary gather). Furthermore we also note that gather([0..n), with=x) is just equivalent to x, so we can avoid a gather too. This reduces the peak memory footprint of categorifying a random column of 500_000_000 int32 values where there are 100 unique values from 24.75 GiB to 11.67 GiB. ### Test code ```python import cudf import cupy as cp K = 100 N = 500_000_000 rng = cp.random._generator.RandomState() column = cudf.core.column.as_column(rng.choice(cp.arange(K, dtype="int32"), size=(N,), replace=True)) column = column.astype("category", ordered=False) ``` ### Before ![Screenshot from 2023-09-20 14-49-27](https://github.com/rapidsai/cudf/assets/1126981/08782501-c233-4efd-b4d6-a378cea82a82) ### After ![Screenshot from 2023-09-20 14-49-42](https://github.com/rapidsai/cudf/assets/1126981/93193bfb-a93e-45bf-8e5a-24289efc77c4) Authors: - Lawrence Mitchell (https://github.com/wence-) Approvers: - GALI PREM SAGAR (https://github.com/galipremsagar) - Bradley Dice (https://github.com/bdice) - Ashwin Srinath (https://github.com/shwina) URL: https://github.com/rapidsai/cudf/pull/14138 --- python/cudf/cudf/core/column/column.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/python/cudf/cudf/core/column/column.py b/python/cudf/cudf/core/column/column.py index d2e2f11a12e..0bc50a521e2 100644 --- a/python/cudf/cudf/core/column/column.py +++ b/python/cudf/cudf/core/column/column.py @@ -1390,20 +1390,19 @@ def _return_sentinel_column(): except ValueError: return _return_sentinel_column() - codes = arange(len(cats), dtype=dtype) left_gather_map, right_gather_map = cpp_join( [self], [cats], how="left" ) - codes = codes.take( - right_gather_map, nullify=True, check_bounds=False - ).fillna(na_sentinel.value) - + codes = libcudf.copying.gather( + [arange(len(cats), dtype=dtype)], right_gather_map, nullify=True + ) + del right_gather_map # reorder `codes` so that its values correspond to the # values of `self`: - order = arange(len(self)) - order = order.take(left_gather_map, check_bounds=False).argsort() - codes = codes.take(order) - return codes + (codes,) = libcudf.sort.sort_by_key( + codes, [left_gather_map], [True], ["last"], stable=True + ) + return codes.fillna(na_sentinel.value) def column_empty_like( From fe99e4baa3a7cd0f87658bf1ea77b17ec61fd7dc Mon Sep 17 00:00:00 2001 From: David Wendt <45795991+davidwendt@users.noreply.github.com> Date: Thu, 21 Sep 2023 10:42:32 -0400 Subject: [PATCH 086/150] Expose stream parameter in public strings find APIs (#14060) Add stream parameter to public APIs: - `cudf::strings::find()` - `cudf::strings::rfind()` - `cudf::strings::contains()` - `cudf::strings::starts_with()` - `cudf::strings::ends_with()` - `cudf::strings::findall()` - `cudf::strings::find_multiple()` Also cleaned up some of the doxygen comments. Reference #13744 Authors: - David Wendt (https://github.com/davidwendt) Approvers: - Vyas Ramasubramani (https://github.com/vyasr) - Vukasin Milovanovic (https://github.com/vuule) URL: https://github.com/rapidsai/cudf/pull/14060 --- cpp/include/cudf/strings/find.hpp | 102 ++++++++++++--------- cpp/include/cudf/strings/find_multiple.hpp | 12 ++- cpp/include/cudf/strings/findall.hpp | 2 + cpp/src/strings/search/find.cu | 24 +++-- cpp/src/strings/search/find_multiple.cu | 7 +- cpp/src/strings/search/findall.cu | 3 +- cpp/tests/CMakeLists.txt | 5 +- cpp/tests/streams/strings/find_test.cpp | 49 ++++++++++ 8 files changed, 143 insertions(+), 61 deletions(-) create mode 100644 cpp/tests/streams/strings/find_test.cpp diff --git a/cpp/include/cudf/strings/find.hpp b/cpp/include/cudf/strings/find.hpp index 2fed36862b9..c1aa8b294b3 100644 --- a/cpp/include/cudf/strings/find.hpp +++ b/cpp/include/cudf/strings/find.hpp @@ -43,19 +43,21 @@ namespace strings { * * @throw cudf::logic_error if start position is greater than stop position. * - * @param strings Strings instance for this operation. - * @param target UTF-8 encoded string to search for in each string. - * @param start First character position to include in the search. + * @param input Strings instance for this operation + * @param target UTF-8 encoded string to search for in each string + * @param start First character position to include in the search * @param stop Last position (exclusive) to include in the search. * Default of -1 will search to the end of the string. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New integer column with character position values. + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New integer column with character position values */ std::unique_ptr find( - strings_column_view const& strings, + strings_column_view const& input, string_scalar const& target, size_type start = 0, size_type stop = -1, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -72,19 +74,21 @@ std::unique_ptr find( * * @throw cudf::logic_error if start position is greater than stop position. * - * @param strings Strings instance for this operation. - * @param target UTF-8 encoded string to search for in each string. - * @param start First position to include in the search. + * @param input Strings instance for this operation + * @param target UTF-8 encoded string to search for in each string + * @param start First position to include in the search * @param stop Last position (exclusive) to include in the search. * Default of -1 will search starting at the end of the string. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New integer column with character position values. + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New integer column with character position values */ std::unique_ptr rfind( - strings_column_view const& strings, + strings_column_view const& input, string_scalar const& target, size_type start = 0, size_type stop = -1, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -123,37 +127,41 @@ std::unique_ptr find( * * Any null string entries return corresponding null entries in the output columns. * - * @param strings Strings instance for this operation. - * @param target UTF-8 encoded string to search for in each string. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New type_id::BOOL8 column. + * @param input Strings instance for this operation + * @param target UTF-8 encoded string to search for in each string + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New BOOL8 column */ std::unique_ptr contains( - strings_column_view const& strings, + strings_column_view const& input, string_scalar const& target, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Returns a column of boolean values for each string where true indicates * the corresponding target string was found within that string in the provided column. * - * The 'output[i] = true` if string `targets[i]` is found inside `strings[i]` otherwise + * The 'output[i] = true` if string `targets[i]` is found inside `input[i]` otherwise * `output[i] = false`. * If `target[i]` is an empty string, true is returned for `output[i]`. * If `target[i]` is null, false is returned for `output[i]`. * - * Any null `strings[i]` row results in a null `output[i]` row. + * Any null string entries return corresponding null entries in the output columns. * * @throw cudf::logic_error if `strings.size() != targets.size()`. * - * @param strings Strings instance for this operation. - * @param targets Strings column of targets to check row-wise in `strings`. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New type_id::BOOL8 column. + * @param input Strings instance for this operation + * @param targets Strings column of targets to check row-wise in `strings` + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New BOOL8 column */ std::unique_ptr contains( - strings_column_view const& strings, + strings_column_view const& input, strings_column_view const& targets, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -166,14 +174,16 @@ std::unique_ptr contains( * * Any null string entries return corresponding null entries in the output columns. * - * @param strings Strings instance for this operation. - * @param target UTF-8 encoded string to search for in each string. - * @param mr Device memory resource used to allocate the returned column's device memory. + * @param input Strings instance for this operation + * @param target UTF-8 encoded string to search for in each string + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory * @return New type_id::BOOL8 column. */ std::unique_ptr starts_with( - strings_column_view const& strings, + strings_column_view const& input, string_scalar const& target, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -190,14 +200,16 @@ std::unique_ptr starts_with( * * @throw cudf::logic_error if `strings.size() != targets.size()`. * - * @param strings Strings instance for this operation. - * @param targets Strings instance for this operation. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New type_id::BOOL8 column. + * @param input Strings instance for this operation + * @param targets Strings instance for this operation + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New BOOL8 column */ std::unique_ptr starts_with( - strings_column_view const& strings, + strings_column_view const& input, strings_column_view const& targets, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -210,14 +222,16 @@ std::unique_ptr starts_with( * * Any null string entries return corresponding null entries in the output columns. * - * @param strings Strings instance for this operation. - * @param target UTF-8 encoded string to search for in each string. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New type_id::BOOL8 column. + * @param input Strings instance for this operation + * @param target UTF-8 encoded string to search for in each string + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New BOOL8 column */ std::unique_ptr ends_with( - strings_column_view const& strings, + strings_column_view const& input, string_scalar const& target, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -234,14 +248,16 @@ std::unique_ptr ends_with( * * @throw cudf::logic_error if `strings.size() != targets.size()`. * - * @param strings Strings instance for this operation. - * @param targets Strings instance for this operation. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New type_id::BOOL8 column. + * @param input Strings instance for this operation + * @param targets Strings instance for this operation + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New BOOL8 column */ std::unique_ptr ends_with( - strings_column_view const& strings, + strings_column_view const& input, strings_column_view const& targets, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of doxygen group } // namespace strings diff --git a/cpp/include/cudf/strings/find_multiple.hpp b/cpp/include/cudf/strings/find_multiple.hpp index 21cfdb15146..06b851c5012 100644 --- a/cpp/include/cudf/strings/find_multiple.hpp +++ b/cpp/include/cudf/strings/find_multiple.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -48,14 +48,16 @@ namespace strings { * * @throw cudf::logic_error if `targets` is empty or contains nulls * - * @param input Strings instance for this operation. - * @param targets Strings to search for in each string. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return Lists column with character position values. + * @param input Strings instance for this operation + * @param targets Strings to search for in each string + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return Lists column with character position values */ std::unique_ptr find_multiple( strings_column_view const& input, strings_column_view const& targets, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of doxygen group diff --git a/cpp/include/cudf/strings/findall.hpp b/cpp/include/cudf/strings/findall.hpp index 745f0fc19ff..379b9624dc6 100644 --- a/cpp/include/cudf/strings/findall.hpp +++ b/cpp/include/cudf/strings/findall.hpp @@ -57,12 +57,14 @@ struct regex_program; * * @param input Strings instance for this operation * @param prog Regex program instance + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return New lists column of strings */ std::unique_ptr findall( strings_column_view const& input, regex_program const& prog, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of doxygen group diff --git a/cpp/src/strings/search/find.cu b/cpp/src/strings/search/find.cu index 3de9dd34d83..1299e552565 100644 --- a/cpp/src/strings/search/find.cu +++ b/cpp/src/strings/search/find.cu @@ -305,20 +305,22 @@ std::unique_ptr find(strings_column_view const& strings, string_scalar const& target, size_type start, size_type stop, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::find(strings, target, start, stop, cudf::get_default_stream(), mr); + return detail::find(strings, target, start, stop, stream, mr); } std::unique_ptr rfind(strings_column_view const& strings, string_scalar const& target, size_type start, size_type stop, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::rfind(strings, target, start, stop, cudf::get_default_stream(), mr); + return detail::rfind(strings, target, start, stop, stream, mr); } std::unique_ptr find(strings_column_view const& input, @@ -618,50 +620,56 @@ std::unique_ptr ends_with(strings_column_view const& strings, std::unique_ptr contains(strings_column_view const& strings, string_scalar const& target, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::contains(strings, target, cudf::get_default_stream(), mr); + return detail::contains(strings, target, stream, mr); } std::unique_ptr contains(strings_column_view const& strings, strings_column_view const& targets, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::contains(strings, targets, cudf::get_default_stream(), mr); + return detail::contains(strings, targets, stream, mr); } std::unique_ptr starts_with(strings_column_view const& strings, string_scalar const& target, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::starts_with(strings, target, cudf::get_default_stream(), mr); + return detail::starts_with(strings, target, stream, mr); } std::unique_ptr starts_with(strings_column_view const& strings, strings_column_view const& targets, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::starts_with(strings, targets, cudf::get_default_stream(), mr); + return detail::starts_with(strings, targets, stream, mr); } std::unique_ptr ends_with(strings_column_view const& strings, string_scalar const& target, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::ends_with(strings, target, cudf::get_default_stream(), mr); + return detail::ends_with(strings, target, stream, mr); } std::unique_ptr ends_with(strings_column_view const& strings, strings_column_view const& targets, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::ends_with(strings, targets, cudf::get_default_stream(), mr); + return detail::ends_with(strings, targets, stream, mr); } } // namespace strings diff --git a/cpp/src/strings/search/find_multiple.cu b/cpp/src/strings/search/find_multiple.cu index 4a823ad1dcb..fcaec835f4d 100644 --- a/cpp/src/strings/search/find_multiple.cu +++ b/cpp/src/strings/search/find_multiple.cu @@ -70,8 +70,8 @@ std::unique_ptr find_multiple(strings_column_view const& input, results->set_null_count(0); auto offsets = cudf::detail::sequence(strings_count + 1, - numeric_scalar(0), - numeric_scalar(targets_count), + numeric_scalar(0, true, stream), + numeric_scalar(targets_count, true, stream), stream, mr); return make_lists_column(strings_count, @@ -88,10 +88,11 @@ std::unique_ptr find_multiple(strings_column_view const& input, // external API std::unique_ptr find_multiple(strings_column_view const& input, strings_column_view const& targets, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::find_multiple(input, targets, cudf::get_default_stream(), mr); + return detail::find_multiple(input, targets, stream, mr); } } // namespace strings diff --git a/cpp/src/strings/search/findall.cu b/cpp/src/strings/search/findall.cu index 2df64c6a0a7..acea4ff1c51 100644 --- a/cpp/src/strings/search/findall.cu +++ b/cpp/src/strings/search/findall.cu @@ -134,10 +134,11 @@ std::unique_ptr findall(strings_column_view const& input, std::unique_ptr findall(strings_column_view const& input, regex_program const& prog, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::findall(input, prog, cudf::get_default_stream(), mr); + return detail::findall(input, prog, stream, mr); } } // namespace strings diff --git a/cpp/tests/CMakeLists.txt b/cpp/tests/CMakeLists.txt index 4923ef5c903..6414962903e 100644 --- a/cpp/tests/CMakeLists.txt +++ b/cpp/tests/CMakeLists.txt @@ -627,7 +627,10 @@ ConfigureTest(STREAM_CONCATENATE_TEST streams/concatenate_test.cpp STREAM_MODE t ConfigureTest(STREAM_FILLING_TEST streams/filling_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_REPLACE_TEST streams/replace_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_SEARCH_TEST streams/search_test.cpp STREAM_MODE testing) -ConfigureTest(STREAM_STRINGS_TEST streams/strings/case_test.cpp STREAM_MODE testing) +ConfigureTest( + STREAM_STRINGS_TEST streams/strings/case_test.cpp streams/strings/find_test.cpp STREAM_MODE + testing +) # ################################################################################################## # Install tests #################################################################################### diff --git a/cpp/tests/streams/strings/find_test.cpp b/cpp/tests/streams/strings/find_test.cpp new file mode 100644 index 00000000000..b734a1738cc --- /dev/null +++ b/cpp/tests/streams/strings/find_test.cpp @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include +#include +#include + +#include + +class StringsFindTest : public cudf::test::BaseFixture {}; + +TEST_F(StringsFindTest, Find) +{ + auto input = cudf::test::strings_column_wrapper({"Héllo", "thesé", "tést strings", ""}); + auto view = cudf::strings_column_view(input); + + auto const target = cudf::string_scalar("é", true, cudf::test::get_default_stream()); + cudf::strings::find(view, target, 0, -1, cudf::test::get_default_stream()); + cudf::strings::rfind(view, target, 0, -1, cudf::test::get_default_stream()); + cudf::strings::find(view, view, 0, cudf::test::get_default_stream()); + cudf::strings::find_multiple(view, view, cudf::test::get_default_stream()); + cudf::strings::contains(view, target, cudf::test::get_default_stream()); + cudf::strings::starts_with(view, target, cudf::test::get_default_stream()); + cudf::strings::starts_with(view, view, cudf::test::get_default_stream()); + cudf::strings::ends_with(view, target, cudf::test::get_default_stream()); + cudf::strings::ends_with(view, view, cudf::test::get_default_stream()); + + auto const pattern = std::string("[a-z]"); + auto const prog = cudf::strings::regex_program::create(pattern); + cudf::strings::findall(view, *prog, cudf::test::get_default_stream()); +} From 05ee2604d8f4e7c6525d12926100e2b11b6d6cb0 Mon Sep 17 00:00:00 2001 From: David Wendt <45795991+davidwendt@users.noreply.github.com> Date: Thu, 21 Sep 2023 10:45:11 -0400 Subject: [PATCH 087/150] Fix kernel launch error for cudf::io::orc::gpu::rowgroup_char_counts_kernel (#14139) Fixes memcheck error found during the nightly builds found in gtest `OrcWriterNumericTypeTest/0.SingleColumn` ``` # compute-sanitizer --tool memcheck gtests/ORC_TEST --gtest_filter=OrcWriterNumericTypeTest/0.SingleColumn --rmm_mode=cuda ========= COMPUTE-SANITIZER Note: Google Test filter = OrcWriterNumericTypeTest/0.SingleColumn [==========] Running 1 test from 1 test suite. [----------] Global test environment set-up. [----------] 1 test from OrcWriterNumericTypeTest/0, where TypeParam = signed char [ RUN ] OrcWriterNumericTypeTest/0.SingleColumn ========= Program hit cudaErrorInvalidConfiguration (error 9) due to "invalid configuration argument" on CUDA API call to cudaLaunchKernel. ========= Saved host backtrace up to driver entry point at error ========= Host Frame: [0x480aa6] ========= in /usr/lib/x86_64-linux-gnu/libcuda.so.1 ========= Host Frame:cudaLaunchKernel [0x6c358] ========= in /conda/envs/rapids/lib/libcudart.so.11.0 ========= Host Frame:__device_stub__ZN4cudf2io3orc3gpu27rowgroup_char_counts_kernelENS_6detail11base_2dspanIiNS_11device_spanEEENS5_IKNS1_22orc_column_device_viewELm18446744073709551615EEENS4_IKNS1_13rowgroup_rowsES5_EENS5_IKjLm18446744073709551615EEE(cudf::detail::base_2dspan&, cudf::device_span&, cudf::detail::base_2dspan&, cudf::device_span&) [0x14fccb4] ``` Adds a check to avoid the kernel launch if the number of strings column is zero. Authors: - David Wendt (https://github.com/davidwendt) Approvers: - Nghia Truong (https://github.com/ttnghia) - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cudf/pull/14139 --- cpp/src/io/orc/dict_enc.cu | 1 + 1 file changed, 1 insertion(+) diff --git a/cpp/src/io/orc/dict_enc.cu b/cpp/src/io/orc/dict_enc.cu index 0007530a5af..1d2262a1ccc 100644 --- a/cpp/src/io/orc/dict_enc.cu +++ b/cpp/src/io/orc/dict_enc.cu @@ -60,6 +60,7 @@ void rowgroup_char_counts(device_2dspan counts, auto const num_rowgroups = rowgroup_bounds.size().first; auto const num_str_cols = str_col_indexes.size(); + if (num_str_cols == 0) { return; } int block_size = 0; // suggested thread count to use int min_grid_size = 0; // minimum block count required From ec744de69d88ada46d744c5121e137c817cb2709 Mon Sep 17 00:00:00 2001 From: MithunR Date: Thu, 21 Sep 2023 11:45:13 -0700 Subject: [PATCH 088/150] Support negative preceding/following for ROW window functions (#14093) This commit adds support for "offset" ROW windows, where the preceding and following window bounds are allowed to have negative values. This allows window definitions to exclude the current row entirely. Prior to this change, ROW-based windows *had* to include the current row, causing `preceding` and `following` to support only non-negative values. Additionally, the inclusion of the current row would count against the `min_periods` check. The following is an example of the new "negative" semantics. Consider the input: ```c++ auto const row = ints_column{1, 2, 3, 4}; ``` If the window bounds are specified as (preceding=3, following=-1), then the window for the third row (`3`) is `{1, 2}`. `following=-1` indicates a "following" row *before* the current row. A negative value for `preceding` follows the existing convention of including the current row. This makes it slightly more involved: 1. `preceding=2` indicates *one* row before the current row. 2. `preceding=1` indicates the current row. 3. `preceding=0` indicates one row past (i.e. after) the current row. 4. `preceding=-1` indicates two rows after the current row. Et cetera. `min_periods` checks continue to be honoured as before, but the requirement for positive `min_periods` is dropped. `min_periods` only need be non-negative. Authors: - MithunR (https://github.com/mythrocks) Approvers: - Divye Gala (https://github.com/divyegala) - Robert Maynard (https://github.com/robertmaynard) URL: https://github.com/rapidsai/cudf/pull/14093 --- cpp/include/cudf/rolling.hpp | 24 +- cpp/src/rolling/detail/rolling.cuh | 33 +- .../rolling/detail/rolling_fixed_window.cu | 30 +- cpp/src/rolling/grouped_rolling.cu | 188 +++++++--- cpp/src/rolling/rolling.cu | 4 +- cpp/tests/CMakeLists.txt | 1 + cpp/tests/rolling/grouped_rolling_test.cpp | 5 +- cpp/tests/rolling/offset_row_window_test.cpp | 343 ++++++++++++++++++ cpp/tests/rolling/rolling_test.cpp | 23 +- 9 files changed, 552 insertions(+), 99 deletions(-) create mode 100644 cpp/tests/rolling/offset_row_window_test.cpp diff --git a/cpp/include/cudf/rolling.hpp b/cpp/include/cudf/rolling.hpp index efdb85691bd..ec93c709163 100644 --- a/cpp/include/cudf/rolling.hpp +++ b/cpp/include/cudf/rolling.hpp @@ -199,10 +199,30 @@ struct window_bounds { * column of the same type as the input. Therefore it is suggested to convert integer column types * (especially low-precision integers) to `FLOAT32` or `FLOAT64` before doing a rolling `MEAN`. * + * Note: `preceding_window` and `following_window` could well have negative values. This yields + * windows where the current row might not be included at all. For instance, consider a window + * defined as (preceding=3, following=-1). This produces a window from 2 (i.e. 3-1) rows preceding + * the current row, and 1 row *preceding* the current row. For the example above, the window for + * row#3 is: + * + * [ 10, 20, 10, 50, 60, 20, 30, 80, 40 ] + * <--window--> ^ + * | + * current_row + * + * Similarly, `preceding` could have a negative value, indicating that the window begins at a + * position after the current row. It differs slightly from the semantics for `following`, because + * `preceding` includes the current row. Therefore: + * 1. preceding=1 => Window starts at the current row. + * 2. preceding=0 => Window starts at 1 past the current row. + * 3. preceding=-1 => Window starts at 2 past the current row. Etc. + * * @param[in] group_keys The (pre-sorted) grouping columns * @param[in] input The input column (to be aggregated) - * @param[in] preceding_window The static rolling window size in the backward direction - * @param[in] following_window The static rolling window size in the forward direction + * @param[in] preceding_window The static rolling window size in the backward direction (for + * positive values), or forward direction (for negative values) + * @param[in] following_window The static rolling window size in the forward direction (for positive + * values), or backward direction (for negative values) * @param[in] min_periods Minimum number of observations in window required to have a value, * otherwise element `i` is null. * @param[in] aggr The rolling window aggregation type (SUM, MAX, MIN, etc.) diff --git a/cpp/src/rolling/detail/rolling.cuh b/cpp/src/rolling/detail/rolling.cuh index 3b6d53f43c4..0648ef3d30f 100644 --- a/cpp/src/rolling/detail/rolling.cuh +++ b/cpp/src/rolling/detail/rolling.cuh @@ -70,7 +70,22 @@ namespace cudf { namespace detail { -namespace { // anonymous +/// Helper function to materialize preceding/following offsets. +template +std::unique_ptr expand_to_column(Calculator const& calc, + size_type const& num_rows, + rmm::cuda_stream_view stream) +{ + auto window_column = cudf::make_numeric_column( + cudf::data_type{type_to_id()}, num_rows, cudf::mask_state::UNALLOCATED, stream); + + auto begin = cudf::detail::make_counting_transform_iterator(0, calc); + + thrust::copy_n( + rmm::exec_policy(stream), begin, num_rows, window_column->mutable_view().data()); + + return window_column; +} /** * @brief Operator for applying a generic (non-specialized) rolling aggregation on a single window. @@ -91,14 +106,14 @@ struct DeviceRolling { // operations we do support template - DeviceRolling(size_type _min_periods, std::enable_if_t()>* = nullptr) + explicit DeviceRolling(size_type _min_periods, std::enable_if_t()>* = nullptr) : min_periods(_min_periods) { } // operations we don't support template - DeviceRolling(size_type _min_periods, std::enable_if_t()>* = nullptr) + explicit DeviceRolling(size_type _min_periods, std::enable_if_t()>* = nullptr) : min_periods(_min_periods) { CUDF_FAIL("Invalid aggregation/type pair"); @@ -111,7 +126,7 @@ struct DeviceRolling { mutable_column_device_view& output, size_type start_index, size_type end_index, - size_type current_index) + size_type current_index) const { using AggOp = typename corresponding_operator::type; AggOp agg_op; @@ -144,7 +159,7 @@ struct DeviceRolling { template struct DeviceRollingArgMinMaxBase { size_type min_periods; - DeviceRollingArgMinMaxBase(size_type _min_periods) : min_periods(_min_periods) {} + explicit DeviceRollingArgMinMaxBase(size_type _min_periods) : min_periods(_min_periods) {} static constexpr bool is_supported() { @@ -162,7 +177,7 @@ struct DeviceRollingArgMinMaxBase { */ template struct DeviceRollingArgMinMaxString : DeviceRollingArgMinMaxBase { - DeviceRollingArgMinMaxString(size_type _min_periods) + explicit DeviceRollingArgMinMaxString(size_type _min_periods) : DeviceRollingArgMinMaxBase(_min_periods) { } @@ -461,8 +476,8 @@ struct agg_specific_empty_output { } }; -std::unique_ptr empty_output_for_rolling_aggregation(column_view const& input, - rolling_aggregation const& agg) +static std::unique_ptr empty_output_for_rolling_aggregation(column_view const& input, + rolling_aggregation const& agg) { // TODO: // Ideally, for UDF aggregations, the returned column would match @@ -1215,8 +1230,6 @@ struct dispatch_rolling { } }; -} // namespace - // Applies a user-defined rolling window function to the values in a column. template std::unique_ptr rolling_window_udf(column_view const& input, diff --git a/cpp/src/rolling/detail/rolling_fixed_window.cu b/cpp/src/rolling/detail/rolling_fixed_window.cu index fb7b1b5f590..e951db955e5 100644 --- a/cpp/src/rolling/detail/rolling_fixed_window.cu +++ b/cpp/src/rolling/detail/rolling_fixed_window.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, NVIDIA CORPORATION. + * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,8 +19,9 @@ #include #include +#include + #include -#include namespace cudf::detail { @@ -43,6 +44,9 @@ std::unique_ptr rolling_window(column_view const& input, CUDF_EXPECTS((default_outputs.is_empty() || default_outputs.size() == input.size()), "Defaults column must be either empty or have as many rows as the input column."); + CUDF_EXPECTS(-(preceding_window - 1) <= following_window, + "Preceding window bounds must precede the following window bounds."); + if (agg.kind == aggregation::CUDA || agg.kind == aggregation::PTX) { // TODO: In future, might need to clamp preceding/following to column boundaries. return cudf::detail::rolling_window_udf(input, @@ -58,18 +62,22 @@ std::unique_ptr rolling_window(column_view const& input, // Clamp preceding/following to column boundaries. // E.g. If preceding_window == 2, then for a column of 5 elements, preceding_window will be: // [1, 2, 2, 2, 1] - auto const preceding_window_begin = cudf::detail::make_counting_transform_iterator( - 0, - [preceding_window] __device__(size_type i) { return thrust::min(i + 1, preceding_window); }); - auto const following_window_begin = cudf::detail::make_counting_transform_iterator( - 0, [col_size = input.size(), following_window] __device__(size_type i) { - return thrust::min(col_size - i - 1, following_window); - }); + auto const preceding_calc = [preceding_window] __device__(size_type i) { + return thrust::min(i + 1, preceding_window); + }; + + auto const following_calc = [col_size = input.size(), + following_window] __device__(size_type i) { + return thrust::min(col_size - i - 1, following_window); + }; + + auto const preceding_column = expand_to_column(preceding_calc, input.size(), stream); + auto const following_column = expand_to_column(following_calc, input.size(), stream); return cudf::detail::rolling_window(input, default_outputs, - preceding_window_begin, - following_window_begin, + preceding_column->view().begin(), + following_column->view().begin(), min_periods, agg, stream, diff --git a/cpp/src/rolling/grouped_rolling.cu b/cpp/src/rolling/grouped_rolling.cu index ca5c04d1c4f..6e69b5157c2 100644 --- a/cpp/src/rolling/grouped_rolling.cu +++ b/cpp/src/rolling/grouped_rolling.cu @@ -30,7 +30,6 @@ #include #include -#include #include #include #include @@ -94,6 +93,109 @@ std::unique_ptr grouped_rolling_window(table_view const& group_keys, namespace detail { +/// Preceding window calculation functor. +template +struct row_based_preceding_calc { + cudf::size_type const* _group_offsets_begin; + cudf::size_type const* _group_labels_begin; + cudf::size_type const _preceding_window; + + row_based_preceding_calc(rmm::device_uvector const& group_offsets, + rmm::device_uvector const& group_labels, + cudf::size_type const& preceding_window) + : _group_offsets_begin(group_offsets.data()), + _group_labels_begin(group_labels.data()), + _preceding_window(preceding_window) + { + } + + __device__ cudf::size_type operator()(cudf::size_type const& idx) const + { + auto group_label = _group_labels_begin[idx]; + if constexpr (preceding_less_than_1) { // where 1 indicates only the current row. + auto group_end = _group_offsets_begin[group_label + 1]; + return thrust::maximum{}(_preceding_window, -(group_end - 1 - idx)); + } else { + auto group_start = _group_offsets_begin[group_label]; + return thrust::minimum{}(_preceding_window, + idx - group_start + 1); // Preceding includes current row. + } + } +}; + +/// Helper to materialize preceding-window column, corrected to respect group boundaries. +/// E.g. If preceding window == 5, then, +/// 1. For the first row in the group, the preceding is set to 1, +/// 2. For the next row in the group, preceding is set to 2, etc. +std::unique_ptr make_preceding_column( + rmm::device_uvector const& group_offsets, + rmm::device_uvector const& group_labels, + cudf::size_type const& preceding_window, + cudf::size_type const& num_rows, + rmm::cuda_stream_view stream) +{ + if (preceding_window < 1) { + auto const calc = row_based_preceding_calc(group_offsets, group_labels, preceding_window); + return cudf::detail::expand_to_column(calc, num_rows, stream); + } else { + auto const calc = + row_based_preceding_calc(group_offsets, group_labels, preceding_window); + return cudf::detail::expand_to_column(calc, num_rows, stream); + } +} + +/// Following window calculation functor. +template +struct row_based_following_calc { + cudf::size_type const* _group_offsets_begin; + cudf::size_type const* _group_labels_begin; + cudf::size_type const _following_window; + + row_based_following_calc(rmm::device_uvector const& group_offsets, + rmm::device_uvector const& group_labels, + cudf::size_type const& following_window) + : _group_offsets_begin(group_offsets.data()), + _group_labels_begin(group_labels.data()), + _following_window(following_window) + { + } + + __device__ cudf::size_type operator()(cudf::size_type const& idx) const + { + auto group_label = _group_labels_begin[idx]; + if constexpr (following_less_than_0) { + auto group_start = _group_offsets_begin[group_label]; + return thrust::maximum{}(_following_window, -(idx - group_start) - 1); + } else { + auto group_end = + _group_offsets_begin[group_label + 1]; // Cannot fall off the end, since offsets + // is capped with `input.size()`. + return thrust::minimum{}(_following_window, (group_end - 1) - idx); + } + } +}; + +/// Helper to materialize following-window column, corrected to respect group boundaries. +/// i.e. If following window == 5, then: +/// 1. For the last row in the group, the following is set to 0. +/// 2. For the second last row in the group, following is set to 1, etc. +std::unique_ptr make_following_column( + rmm::device_uvector const& group_offsets, + rmm::device_uvector const& group_labels, + cudf::size_type const& following_window, + cudf::size_type const& num_rows, + rmm::cuda_stream_view stream) +{ + if (following_window < 0) { + auto const calc = row_based_following_calc(group_offsets, group_labels, following_window); + return cudf::detail::expand_to_column(calc, num_rows, stream); + } else { + auto const calc = + row_based_following_calc(group_offsets, group_labels, following_window); + return cudf::detail::expand_to_column(calc, num_rows, stream); + } +} + std::unique_ptr grouped_rolling_window(table_view const& group_keys, column_view const& input, column_view const& default_outputs, @@ -111,7 +213,7 @@ std::unique_ptr grouped_rolling_window(table_view const& group_keys, CUDF_EXPECTS((group_keys.num_columns() == 0 || group_keys.num_rows() == input.size()), "Size mismatch between group_keys and input vector."); - CUDF_EXPECTS((min_periods > 0), "min_periods must be positive"); + CUDF_EXPECTS((min_periods >= 0), "min_periods must be non-negative"); CUDF_EXPECTS((default_outputs.is_empty() || default_outputs.size() == input.size()), "Defaults column must be either empty or have as many rows as the input column."); @@ -127,6 +229,9 @@ std::unique_ptr grouped_rolling_window(table_view const& group_keys, auto const preceding_window = preceding_window_bounds.value(); auto const following_window = following_window_bounds.value(); + CUDF_EXPECTS(-(preceding_window - 1) <= following_window, + "Preceding window bounds must precede the following window bounds."); + if (group_keys.num_columns() == 0) { // No Groupby columns specified. Treat as one big group. return rolling_window( @@ -157,24 +262,6 @@ std::unique_ptr grouped_rolling_window(table_view const& group_keys, group_offsets.element(group_offsets.size() - 1, stream) == input.size() && "Must have at least one group."); - auto preceding_calculator = [d_group_offsets = group_offsets.data(), - d_group_labels = group_labels.data(), - preceding_window] __device__(size_type idx) { - auto group_label = d_group_labels[idx]; - auto group_start = d_group_offsets[group_label]; - return thrust::minimum{}(preceding_window, - idx - group_start + 1); // Preceding includes current row. - }; - - auto following_calculator = [d_group_offsets = group_offsets.data(), - d_group_labels = group_labels.data(), - following_window] __device__(size_type idx) { - auto group_label = d_group_labels[idx]; - auto group_end = d_group_offsets[group_label + 1]; // Cannot fall off the end, since offsets - // is capped with `input.size()`. - return thrust::minimum{}(following_window, (group_end - 1) - idx); - }; - if (aggr.kind == aggregation::CUDA || aggr.kind == aggregation::PTX) { cudf::detail::preceding_window_wrapper grouped_preceding_window{ group_offsets.data(), group_labels.data(), preceding_window}; @@ -192,15 +279,18 @@ std::unique_ptr grouped_rolling_window(table_view const& group_keys, stream, mr); } else { - return cudf::detail::rolling_window( - input, - default_outputs, - cudf::detail::make_counting_transform_iterator(0, preceding_calculator), - cudf::detail::make_counting_transform_iterator(0, following_calculator), - min_periods, - aggr, - stream, - mr); + auto const preceding_column = + make_preceding_column(group_offsets, group_labels, preceding_window, input.size(), stream); + auto const following_column = + make_following_column(group_offsets, group_labels, following_window, input.size(), stream); + return cudf::detail::rolling_window(input, + default_outputs, + preceding_column->view().begin(), + following_column->view().begin(), + min_periods, + aggr, + stream, + mr); } } @@ -321,22 +411,6 @@ std::tuple get_null_bounds_for_orderby_column( : std::make_tuple(num_rows - num_nulls, num_rows); } -template -std::unique_ptr expand_to_column(Calculator const& calc, - size_type const& num_rows, - rmm::cuda_stream_view stream) -{ - auto window_column = cudf::make_numeric_column( - cudf::data_type{type_to_id()}, num_rows, cudf::mask_state::UNALLOCATED, stream); - - auto begin = cudf::detail::make_counting_transform_iterator(0, calc); - - thrust::copy_n( - rmm::exec_policy(stream), begin, num_rows, window_column->mutable_view().data()); - - return window_column; -} - /// Range window computation, with /// 1. no grouping keys specified /// 2. rows in ASCENDING order. @@ -390,7 +464,8 @@ std::unique_ptr range_window_ASC(column_view const& input, 1; // Add 1, for `preceding` to account for current row. }; - auto const preceding_column = expand_to_column(preceding_calculator, input.size(), stream); + auto const preceding_column = + cudf::detail::expand_to_column(preceding_calculator, input.size(), stream); auto const following_calculator = [nulls_begin_idx = h_nulls_begin_idx, @@ -425,7 +500,8 @@ std::unique_ptr range_window_ASC(column_view const& input, 1; }; - auto const following_column = expand_to_column(following_calculator, input.size(), stream); + auto const following_column = + cudf::detail::expand_to_column(following_calculator, input.size(), stream); return cudf::detail::rolling_window( input, preceding_column->view(), following_column->view(), min_periods, aggr, stream, mr); @@ -570,7 +646,8 @@ std::unique_ptr range_window_ASC(column_view const& input, 1; // Add 1, for `preceding` to account for current row. }; - auto const preceding_column = expand_to_column(preceding_calculator, input.size(), stream); + auto const preceding_column = + cudf::detail::expand_to_column(preceding_calculator, input.size(), stream); auto const following_calculator = [d_group_offsets = group_offsets.data(), @@ -616,7 +693,8 @@ std::unique_ptr range_window_ASC(column_view const& input, 1; }; - auto const following_column = expand_to_column(following_calculator, input.size(), stream); + auto const following_column = + cudf::detail::expand_to_column(following_calculator, input.size(), stream); return cudf::detail::rolling_window( input, preceding_column->view(), following_column->view(), min_periods, aggr, stream, mr); @@ -675,7 +753,8 @@ std::unique_ptr range_window_DESC(column_view const& input, 1; // Add 1, for `preceding` to account for current row. }; - auto const preceding_column = expand_to_column(preceding_calculator, input.size(), stream); + auto const preceding_column = + cudf::detail::expand_to_column(preceding_calculator, input.size(), stream); auto const following_calculator = [nulls_begin_idx = h_nulls_begin_idx, @@ -710,7 +789,8 @@ std::unique_ptr range_window_DESC(column_view const& input, 1; }; - auto const following_column = expand_to_column(following_calculator, input.size(), stream); + auto const following_column = + cudf::detail::expand_to_column(following_calculator, input.size(), stream); return cudf::detail::rolling_window( input, preceding_column->view(), following_column->view(), min_periods, aggr, stream, mr); @@ -774,7 +854,8 @@ std::unique_ptr range_window_DESC(column_view const& input, 1; // Add 1, for `preceding` to account for current row. }; - auto const preceding_column = expand_to_column(preceding_calculator, input.size(), stream); + auto const preceding_column = + cudf::detail::expand_to_column(preceding_calculator, input.size(), stream); auto const following_calculator = [d_group_offsets = group_offsets.data(), @@ -817,7 +898,8 @@ std::unique_ptr range_window_DESC(column_view const& input, 1; }; - auto const following_column = expand_to_column(following_calculator, input.size(), stream); + auto const following_column = + cudf::detail::expand_to_column(following_calculator, input.size(), stream); if (aggr.kind == aggregation::CUDA || aggr.kind == aggregation::PTX) { CUDF_FAIL("Ranged rolling window does NOT (yet) support UDF."); diff --git a/cpp/src/rolling/rolling.cu b/cpp/src/rolling/rolling.cu index d699d7bea85..5c78cc4382d 100644 --- a/cpp/src/rolling/rolling.cu +++ b/cpp/src/rolling/rolling.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,8 +20,6 @@ #include #include -#include - namespace cudf { // Applies a fixed-size rolling window function to the values in a column, with default output diff --git a/cpp/tests/CMakeLists.txt b/cpp/tests/CMakeLists.txt index 6414962903e..d1e50442058 100644 --- a/cpp/tests/CMakeLists.txt +++ b/cpp/tests/CMakeLists.txt @@ -453,6 +453,7 @@ ConfigureTest( rolling/grouped_rolling_test.cpp rolling/lead_lag_test.cpp rolling/nth_element_test.cpp + rolling/offset_row_window_test.cpp rolling/range_comparator_test.cu rolling/range_rolling_window_test.cpp rolling/range_window_bounds_test.cpp diff --git a/cpp/tests/rolling/grouped_rolling_test.cpp b/cpp/tests/rolling/grouped_rolling_test.cpp index 774f2f7fc40..7dd72ace53c 100644 --- a/cpp/tests/rolling/grouped_rolling_test.cpp +++ b/cpp/tests/rolling/grouped_rolling_test.cpp @@ -33,9 +33,6 @@ #include #include -#include -#include - const std::string cuda_func{ R"***( template @@ -637,7 +634,7 @@ TYPED_TEST(GroupedRollingTest, ZeroWindow) key_1_vec.end()); const cudf::table_view grouping_keys{std::vector{key_0, key_1}}; - cudf::size_type preceding_window = 0; + cudf::size_type preceding_window = 1; cudf::size_type following_window = 0; std::vector expected_group_offsets{0, 4, 8, DATA_SIZE}; diff --git a/cpp/tests/rolling/offset_row_window_test.cpp b/cpp/tests/rolling/offset_row_window_test.cpp new file mode 100644 index 00000000000..ec726878b34 --- /dev/null +++ b/cpp/tests/rolling/offset_row_window_test.cpp @@ -0,0 +1,343 @@ +/* + * Copyright (c) 2021-2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +template +using fwcw = cudf::test::fixed_width_column_wrapper; +template +using decimals_column = cudf::test::fixed_point_column_wrapper; +using ints_column = fwcw; +using bigints_column = fwcw; +using strings_column = cudf::test::strings_column_wrapper; +using lists_column = cudf::test::lists_column_wrapper; +using column_ptr = std::unique_ptr; +using cudf::test::iterators::all_nulls; +using cudf::test::iterators::no_nulls; +using cudf::test::iterators::nulls_at; + +auto constexpr null = int32_t{0}; // NULL representation for int32_t; + +struct OffsetRowWindowTest : public cudf::test::BaseFixture { + static ints_column const _keys; // {0, 0, 0, 0, 0, 0, 1, 1, 1, 1}; + static ints_column const _values; // {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; + + struct rolling_runner { + cudf::window_bounds _preceding, _following; + cudf::size_type _min_periods; + bool _grouped = true; + + rolling_runner(cudf::window_bounds const& preceding, + cudf::window_bounds const& following, + cudf::size_type min_periods_ = 1) + : _preceding{preceding}, _following{following}, _min_periods{min_periods_} + { + } + + rolling_runner& min_periods(cudf::size_type min_periods_) + { + _min_periods = min_periods_; + return *this; + } + + rolling_runner& grouped(bool grouped_) + { + _grouped = grouped_; + return *this; + } + + std::unique_ptr operator()(cudf::rolling_aggregation const& agg) const + { + auto const grouping_keys = + _grouped ? std::vector{_keys} : std::vector{}; + return cudf::grouped_rolling_window( + cudf::table_view{grouping_keys}, _values, _preceding, _following, _min_periods, agg); + } + }; +}; + +ints_column const OffsetRowWindowTest::_keys{0, 0, 0, 0, 0, 0, 1, 1, 1, 1}; +ints_column const OffsetRowWindowTest::_values{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; + +auto const AGG_COUNT_NON_NULL = + cudf::make_count_aggregation(cudf::null_policy::EXCLUDE); +auto const AGG_COUNT_ALL = + cudf::make_count_aggregation(cudf::null_policy::INCLUDE); +auto const AGG_MIN = cudf::make_min_aggregation(); +auto const AGG_MAX = cudf::make_max_aggregation(); +auto const AGG_SUM = cudf::make_sum_aggregation(); +auto const AGG_COLLECT_LIST = cudf::make_collect_list_aggregation(); + +TEST_F(OffsetRowWindowTest, OffsetRowWindow_Grouped_3_to_Minus_1) +{ + auto const preceding = cudf::window_bounds::get(3); + auto const following = cudf::window_bounds::get(-1); + auto run_rolling = rolling_runner{preceding, following}.min_periods(1).grouped(true); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*run_rolling(*AGG_COUNT_NON_NULL), + ints_column{{0, 1, 2, 2, 2, 2, 0, 1, 2, 2}, nulls_at({0, 6})}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*run_rolling(*AGG_COUNT_ALL), + ints_column{{0, 1, 2, 2, 2, 2, 0, 1, 2, 2}, nulls_at({0, 6})}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL( + *run_rolling(*AGG_MIN), ints_column{{null, 0, 0, 1, 2, 3, null, 6, 6, 7}, nulls_at({0, 6})}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL( + *run_rolling(*AGG_MAX), ints_column{{null, 0, 1, 2, 3, 4, null, 6, 7, 8}, nulls_at({0, 6})}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL( + *run_rolling(*AGG_SUM), + bigints_column{{null, 0, 1, 3, 5, 7, null, 6, 13, 15}, nulls_at({0, 6})}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL( + *run_rolling(*AGG_COLLECT_LIST), + lists_column{{{}, {0}, {0, 1}, {1, 2}, {2, 3}, {3, 4}, {}, {6}, {6, 7}, {7, 8}}, + nulls_at({0, 6})}); + + run_rolling.min_periods(0); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*run_rolling(*AGG_COUNT_NON_NULL), + ints_column{{0, 1, 2, 2, 2, 2, 0, 1, 2, 2}, no_nulls()}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*run_rolling(*AGG_COUNT_ALL), + ints_column{{0, 1, 2, 2, 2, 2, 0, 1, 2, 2}, no_nulls()}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL( + *run_rolling(*AGG_COLLECT_LIST), + lists_column{{{}, {0}, {0, 1}, {1, 2}, {2, 3}, {3, 4}, {}, {6}, {6, 7}, {7, 8}}, no_nulls()}); +} + +TEST_F(OffsetRowWindowTest, OffsetRowWindow_Ungrouped_3_to_Minus_1) +{ + auto const preceding = cudf::window_bounds::get(3); + auto const following = cudf::window_bounds::get(-1); + auto run_rolling = rolling_runner{preceding, following}.min_periods(1).grouped(false); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*run_rolling(*AGG_COUNT_NON_NULL), + ints_column{{0, 1, 2, 2, 2, 2, 2, 2, 2, 2}, nulls_at({0})}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*run_rolling(*AGG_COUNT_ALL), + ints_column{{0, 1, 2, 2, 2, 2, 2, 2, 2, 2}, nulls_at({0})}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*run_rolling(*AGG_MIN), + ints_column{{null, 0, 0, 1, 2, 3, 4, 5, 6, 7}, nulls_at({0})}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*run_rolling(*AGG_MAX), + ints_column{{null, 0, 1, 2, 3, 4, 5, 6, 7, 8}, nulls_at({0})}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL( + *run_rolling(*AGG_SUM), bigints_column{{null, 0, 1, 3, 5, 7, 9, 11, 13, 15}, nulls_at({0})}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL( + *run_rolling(*AGG_COLLECT_LIST), + lists_column{{{}, {0}, {0, 1}, {1, 2}, {2, 3}, {3, 4}, {4, 5}, {5, 6}, {6, 7}, {7, 8}}, + nulls_at({0})}); + + run_rolling.min_periods(0); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*run_rolling(*AGG_COUNT_NON_NULL), + ints_column{{0, 1, 2, 2, 2, 2, 2, 2, 2, 2}, no_nulls()}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*run_rolling(*AGG_COUNT_ALL), + ints_column{{0, 1, 2, 2, 2, 2, 2, 2, 2, 2}, no_nulls()}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL( + *run_rolling(*AGG_COLLECT_LIST), + lists_column{{{}, {0}, {0, 1}, {1, 2}, {2, 3}, {3, 4}, {4, 5}, {5, 6}, {6, 7}, {7, 8}}, + no_nulls()}); +} + +TEST_F(OffsetRowWindowTest, OffsetRowWindow_Grouped_0_to_2) +{ + auto const preceding = cudf::window_bounds::get(0); + auto const following = cudf::window_bounds::get(2); + auto run_rolling = rolling_runner{preceding, following}.min_periods(1).grouped(true); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL( + *run_rolling(*AGG_COUNT_NON_NULL), + ints_column{{2, 2, 2, 2, 1, null, 2, 2, 1, null}, nulls_at({5, 9})}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL( + *run_rolling(*AGG_COUNT_ALL), + ints_column{{2, 2, 2, 2, 1, null, 2, 2, 1, null}, nulls_at({5, 9})}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL( + *run_rolling(*AGG_MIN), ints_column{{1, 2, 3, 4, 5, null, 7, 8, 9, null}, nulls_at({5, 9})}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL( + *run_rolling(*AGG_MAX), ints_column{{2, 3, 4, 5, 5, null, 8, 9, 9, null}, nulls_at({5, 9})}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL( + *run_rolling(*AGG_SUM), + bigints_column{{3, 5, 7, 9, 5, null, 15, 17, 9, null}, nulls_at({5, 9})}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL( + *run_rolling(*AGG_COLLECT_LIST), + lists_column{{{1, 2}, {2, 3}, {3, 4}, {4, 5}, {5}, {}, {7, 8}, {8, 9}, {9}, {}}, + nulls_at({5, 9})}); + + run_rolling.min_periods(0); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*run_rolling(*AGG_COUNT_NON_NULL), + ints_column{{2, 2, 2, 2, 1, 0, 2, 2, 1, 0}, no_nulls()}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*run_rolling(*AGG_COUNT_ALL), + ints_column{{2, 2, 2, 2, 1, 0, 2, 2, 1, 0}, no_nulls()}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL( + *run_rolling(*AGG_COLLECT_LIST), + lists_column{{{1, 2}, {2, 3}, {3, 4}, {4, 5}, {5}, {}, {7, 8}, {8, 9}, {9}, {}}, no_nulls}); +} + +TEST_F(OffsetRowWindowTest, OffsetRowWindow_Ungrouped_0_to_2) +{ + auto const preceding = cudf::window_bounds::get(0); + auto const following = cudf::window_bounds::get(2); + auto run_rolling = rolling_runner{preceding, following}.min_periods(1).grouped(false); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*run_rolling(*AGG_COUNT_NON_NULL), + ints_column{{2, 2, 2, 2, 2, 2, 2, 2, 1, null}, nulls_at({9})}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*run_rolling(*AGG_COUNT_ALL), + ints_column{{2, 2, 2, 2, 2, 2, 2, 2, 1, null}, nulls_at({9})}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*run_rolling(*AGG_MIN), + ints_column{{1, 2, 3, 4, 5, 6, 7, 8, 9, null}, nulls_at({9})}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*run_rolling(*AGG_MAX), + ints_column{{2, 3, 4, 5, 6, 7, 8, 9, 9, null}, nulls_at({9})}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL( + *run_rolling(*AGG_SUM), bigints_column{{3, 5, 7, 9, 11, 13, 15, 17, 9, null}, nulls_at({9})}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL( + *run_rolling(*AGG_COLLECT_LIST), + lists_column{{{1, 2}, {2, 3}, {3, 4}, {4, 5}, {5, 6}, {6, 7}, {7, 8}, {8, 9}, {9}, {}}, + nulls_at({9})}); + + run_rolling.min_periods(0); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*run_rolling(*AGG_COUNT_NON_NULL), + ints_column{{2, 2, 2, 2, 2, 2, 2, 2, 1, 0}, no_nulls()}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*run_rolling(*AGG_COUNT_ALL), + ints_column{{2, 2, 2, 2, 2, 2, 2, 2, 1, 0}, no_nulls()}); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL( + *run_rolling(*AGG_COLLECT_LIST), + lists_column{{{1, 2}, {2, 3}, {3, 4}, {4, 5}, {5, 6}, {6, 7}, {7, 8}, {8, 9}, {9}, {}}, + no_nulls}); +} + +// To test that preceding bounds are clamped correctly at group boundaries. +TEST_F(OffsetRowWindowTest, TestNegativeBoundsClamp) +{ + auto const grp_iter = + thrust::make_transform_iterator(thrust::make_counting_iterator(0), [](auto const& i) { + return i / 10; // 0-9 in the first group, 10-19 in the second, etc. + }); + auto const agg_iter = thrust::make_constant_iterator(1); + + auto const grp = ints_column(grp_iter, grp_iter + 30); + auto const agg = ints_column(agg_iter, agg_iter + 30); + + auto const min_periods = 0; + auto const rolling_sum = [&](auto const preceding, auto const following) { + return cudf::grouped_rolling_window( + cudf::table_view{{grp}}, agg, preceding, following, min_periods, *AGG_SUM); + }; + + // Testing negative preceding. + for (auto const preceding : {0, -1, -2, -5, -10, -20, -50}) { + auto const results = rolling_sum(preceding, 100); + auto const expected_fun = [&](auto const& i) { + assert(preceding < 1); + auto const index_in_group = i % 10; + auto const start = std::min(-(preceding - 1) + index_in_group, 10); + return int64_t{10 - start}; + }; + auto const expected_iter = + thrust::make_transform_iterator(thrust::make_counting_iterator(0), expected_fun); + auto const expected = bigints_column(expected_iter, expected_iter + 30, no_nulls()); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); + } + + // Testing negative following. + for (auto const following : {-1, -2, -5, -10, -20, -50}) { + auto const results = rolling_sum(100, following); + auto const expected_fun = [&](auto const& i) { + assert(following < 0); + auto const index_in_group = i % 10; + auto const end = std::max(index_in_group + following, -1); + return int64_t{end + 1}; + }; + auto const expected_iter = + thrust::make_transform_iterator(thrust::make_counting_iterator(0), expected_fun); + auto const expected = bigints_column(expected_iter, expected_iter + 30, no_nulls()); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); + } +} + +TEST_F(OffsetRowWindowTest, CheckGroupBoundaries) +{ + auto grp_iter = + thrust::make_transform_iterator(thrust::make_counting_iterator(0), [](auto const& i) { + if (i < 10) return 1; + if (i < 20) return 2; + return 3; + }); + auto const grp = ints_column(grp_iter, grp_iter + 30); + auto const agg = ints_column(grp_iter, grp_iter + 30); + { + auto const results = + cudf::grouped_rolling_window(cudf::table_view{{grp}}, + agg, + -80, + 100, + 1, + *cudf::make_max_aggregation()); + auto const null_iter = thrust::make_constant_iterator(null); + auto const expected = ints_column(null_iter, null_iter + 30, all_nulls()); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL(results->view(), expected); + } + { + auto const results = + cudf::grouped_rolling_window(cudf::table_view{{grp}}, + agg, + -1, + 4, + 1, + *cudf::make_min_aggregation()); + auto const expected = + ints_column{{1, 1, 1, 1, 1, 1, 1, 1, null, null, 2, 2, 2, 2, 2, + 2, 2, 2, null, null, 3, 3, 3, 3, 3, 3, 3, 3, null, null}, + nulls_at({8, 9, 18, 19, 28, 29})}; + + CUDF_TEST_EXPECT_COLUMNS_EQUAL(results->view(), expected); + } +} diff --git a/cpp/tests/rolling/rolling_test.cpp b/cpp/tests/rolling/rolling_test.cpp index e410e2488b3..d0181974479 100644 --- a/cpp/tests/rolling/rolling_test.cpp +++ b/cpp/tests/rolling/rolling_test.cpp @@ -148,20 +148,6 @@ TEST_F(RollingStringTest, MinPeriods) CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected_count_all, got_count_all->view()); } -TEST_F(RollingStringTest, ZeroWindowSize) -{ - cudf::test::strings_column_wrapper input( - {"This", "is", "rolling", "test", "being", "operated", "on", "string", "column"}, - {1, 0, 0, 1, 0, 1, 1, 1, 0}); - cudf::test::fixed_width_column_wrapper expected_count( - {0, 0, 0, 0, 0, 0, 0, 0, 0}, {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}); - - auto got_count = cudf::rolling_window( - input, 0, 0, 0, *cudf::make_count_aggregation()); - - CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected_count, got_count->view()); -} - // ========================================================================================= class RollingStructTest : public cudf::test::BaseFixture {}; @@ -970,6 +956,7 @@ TEST_F(RollingtVarStdTestUntyped, SimpleStaticVarianceStdInfNaN) #undef XXX } +/* // negative sizes TYPED_TEST(RollingTest, NegativeWindowSizes) { @@ -980,10 +967,12 @@ TYPED_TEST(RollingTest, NegativeWindowSizes) std::vector window{3}; std::vector negative_window{-2}; + this->run_test_col_agg(input, negative_window, window, 1); this->run_test_col_agg(input, window, negative_window, 1); this->run_test_col_agg(input, negative_window, negative_window, 1); } + */ // simple example from Pandas docs: TYPED_TEST(RollingTest, SimpleDynamic) @@ -1033,6 +1022,7 @@ TYPED_TEST(RollingTest, AllInvalid) } // window = following_window = 0 +// Note: Preceding includes current row, so its value is set to 1. TYPED_TEST(RollingTest, ZeroWindow) { cudf::size_type num_rows = 1000; @@ -1042,10 +1032,11 @@ TYPED_TEST(RollingTest, ZeroWindow) cudf::test::fixed_width_column_wrapper input( col_data.begin(), col_data.end(), col_mask.begin()); - std::vector window({0}); + std::vector preceding({0}); + std::vector following({1}); cudf::size_type periods = num_rows; - this->run_test_col_agg(input, window, window, periods); + this->run_test_col_agg(input, preceding, following, periods); } // min_periods = 0 From dcac6cc6a719e2caf1c461be32acd2f7e78308e2 Mon Sep 17 00:00:00 2001 From: Jake Awe <50372925+AyodeAwe@users.noreply.github.com> Date: Thu, 21 Sep 2023 14:04:09 -0500 Subject: [PATCH 089/150] Update image names (#14145) PR updates `rapidsai/ci` references to `rapidsai/ci-conda` Authors: - Jake Awe (https://github.com/AyodeAwe) Approvers: - AJ Schmidt (https://github.com/ajschmidt8) URL: https://github.com/rapidsai/cudf/pull/14145 --- .github/workflows/build.yaml | 2 +- .github/workflows/pr.yaml | 6 +++--- .github/workflows/test.yaml | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 91ec0904103..0e120d34bb1 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -62,7 +62,7 @@ jobs: arch: "amd64" branch: ${{ inputs.branch }} build_type: ${{ inputs.build_type || 'branch' }} - container_image: "rapidsai/ci:latest" + container_image: "rapidsai/ci-conda:latest" date: ${{ inputs.date }} node_type: "gpu-v100-latest-1" run_script: "ci/build_docs.sh" diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index b47a40b13d2..054ea7968c8 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -73,7 +73,7 @@ jobs: build_type: pull-request node_type: "gpu-v100-latest-1" arch: "amd64" - container_image: "rapidsai/ci:latest" + container_image: "rapidsai/ci-conda:latest" run_script: "ci/test_java.sh" conda-notebook-tests: needs: conda-python-build @@ -83,7 +83,7 @@ jobs: build_type: pull-request node_type: "gpu-v100-latest-1" arch: "amd64" - container_image: "rapidsai/ci:latest" + container_image: "rapidsai/ci-conda:latest" run_script: "ci/test_notebooks.sh" docs-build: needs: conda-python-build @@ -93,7 +93,7 @@ jobs: build_type: pull-request node_type: "gpu-v100-latest-1" arch: "amd64" - container_image: "rapidsai/ci:latest" + container_image: "rapidsai/ci-conda:latest" run_script: "ci/build_docs.sh" wheel-build-cudf: needs: checks diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 6bd2787d6dc..030f2e41db4 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -32,7 +32,7 @@ jobs: sha: ${{ inputs.sha }} node_type: "gpu-v100-latest-1" arch: "amd64" - container_image: "rapidsai/ci:latest" + container_image: "rapidsai/ci-conda:latest" run_script: "ci/test_cpp_memcheck.sh" conda-python-cudf-tests: secrets: inherit @@ -63,7 +63,7 @@ jobs: sha: ${{ inputs.sha }} node_type: "gpu-v100-latest-1" arch: "amd64" - container_image: "rapidsai/ci:latest" + container_image: "rapidsai/ci-conda:latest" run_script: "ci/test_java.sh" conda-notebook-tests: secrets: inherit @@ -75,7 +75,7 @@ jobs: sha: ${{ inputs.sha }} node_type: "gpu-v100-latest-1" arch: "amd64" - container_image: "rapidsai/ci:latest" + container_image: "rapidsai/ci-conda:latest" run_script: "ci/test_notebooks.sh" wheel-tests-cudf: secrets: inherit From f0ba8598dd9792e137ca7aa3a1b22dbb84393cc1 Mon Sep 17 00:00:00 2001 From: Bradley Dice Date: Thu, 21 Sep 2023 16:28:29 -0700 Subject: [PATCH 090/150] Pin to numpy<1.25 and numba<0.58 to avoid errors and deprecation warnings-as-errors. (#14156) Closes #14155. Related: #14160. (Will newer numpy support be backported to pandas 1.x? edit: no, see below) Authors: - Bradley Dice (https://github.com/bdice) Approvers: - Vyas Ramasubramani (https://github.com/vyasr) - Benjamin Zaitlen (https://github.com/quasiben) - Ray Douglass (https://github.com/raydouglass) - GALI PREM SAGAR (https://github.com/galipremsagar) URL: https://github.com/rapidsai/cudf/pull/14156 --- conda/environments/all_cuda-118_arch-x86_64.yaml | 4 ++-- conda/environments/all_cuda-120_arch-x86_64.yaml | 4 ++-- conda/recipes/cudf/meta.yaml | 6 ++++-- dependencies.yaml | 8 +++++--- python/cudf/pyproject.toml | 6 +++--- python/cudf_kafka/pyproject.toml | 2 +- python/dask_cudf/pyproject.toml | 4 ++-- 7 files changed, 19 insertions(+), 15 deletions(-) diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index 692ba78f317..d4abc28cf13 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -55,8 +55,8 @@ dependencies: - nbsphinx - ninja - notebook -- numba>=0.57 -- numpy>=1.21 +- numba>=0.57,<0.58 +- numpy>=1.21,<1.25 - numpydoc - nvcc_linux-64=11.8 - nvcomp==2.6.1 diff --git a/conda/environments/all_cuda-120_arch-x86_64.yaml b/conda/environments/all_cuda-120_arch-x86_64.yaml index cf1bf4b8733..9a98e400e6d 100644 --- a/conda/environments/all_cuda-120_arch-x86_64.yaml +++ b/conda/environments/all_cuda-120_arch-x86_64.yaml @@ -54,8 +54,8 @@ dependencies: - nbsphinx - ninja - notebook -- numba>=0.57 -- numpy>=1.21 +- numba>=0.57,<0.58 +- numpy>=1.21,<1.25 - numpydoc - nvcomp==2.6.1 - nvtx>=0.2.1 diff --git a/conda/recipes/cudf/meta.yaml b/conda/recipes/cudf/meta.yaml index a909b72c878..54b687faa69 100644 --- a/conda/recipes/cudf/meta.yaml +++ b/conda/recipes/cudf/meta.yaml @@ -78,8 +78,10 @@ requirements: - typing_extensions >=4.0.0 - pandas >=1.3,<1.6.0dev0 - cupy >=12.0.0 - - numba >=0.57 - - numpy >=1.21 + # TODO: Pin to numba<0.58 until #14160 is resolved + - numba >=0.57,<0.58 + # TODO: Pin to numpy<1.25 until cudf requires pandas 2 + - numpy >=1.21,<1.25 - {{ pin_compatible('pyarrow', max_pin='x.x.x') }} - libcudf ={{ version }} - {{ pin_compatible('rmm', max_pin='x.x') }} diff --git a/dependencies.yaml b/dependencies.yaml index 398ae193fe6..376e43094a7 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -259,7 +259,8 @@ dependencies: # Hard pin the patch version used during the build. This must be kept # in sync with the version pinned in get_arrow.cmake. - pyarrow==12.0.1.* - - numpy>=1.21 + # TODO: Pin to numpy<1.25 until cudf requires pandas 2 + - &numpy numpy>=1.21,<1.25 build_python: common: - output_types: [conda, requirements, pyproject] @@ -425,14 +426,15 @@ dependencies: - output_types: [conda, requirements, pyproject] packages: - fsspec>=0.6.0 - - numpy>=1.21 + - *numpy - pandas>=1.3,<1.6.0dev0 run_cudf: common: - output_types: [conda, requirements, pyproject] packages: - cachetools - - &numba numba>=0.57 + # TODO: Pin to numba<0.58 until #14160 is resolved + - &numba numba>=0.57,<0.58 - nvtx>=0.2.1 - packaging - rmm==23.10.* diff --git a/python/cudf/pyproject.toml b/python/cudf/pyproject.toml index 574769f68d1..085d78afc7c 100644 --- a/python/cudf/pyproject.toml +++ b/python/cudf/pyproject.toml @@ -6,7 +6,7 @@ requires = [ "cmake>=3.26.4", "cython>=3.0.0", "ninja", - "numpy>=1.21", + "numpy>=1.21,<1.25", "protoc-wheel", "pyarrow==12.0.1.*", "rmm==23.10.*", @@ -31,8 +31,8 @@ dependencies = [ "cuda-python>=11.7.1,<12.0a0", "cupy-cuda11x>=12.0.0", "fsspec>=0.6.0", - "numba>=0.57", - "numpy>=1.21", + "numba>=0.57,<0.58", + "numpy>=1.21,<1.25", "nvtx>=0.2.1", "packaging", "pandas>=1.3,<1.6.0dev0", diff --git a/python/cudf_kafka/pyproject.toml b/python/cudf_kafka/pyproject.toml index a6ef867451b..386cdc32ab1 100644 --- a/python/cudf_kafka/pyproject.toml +++ b/python/cudf_kafka/pyproject.toml @@ -4,7 +4,7 @@ requires = [ "cython>=3.0.0", - "numpy>=1.21", + "numpy>=1.21,<1.25", "pyarrow==12.0.1.*", "setuptools", "wheel", diff --git a/python/dask_cudf/pyproject.toml b/python/dask_cudf/pyproject.toml index 2464abca71a..922da366422 100644 --- a/python/dask_cudf/pyproject.toml +++ b/python/dask_cudf/pyproject.toml @@ -23,7 +23,7 @@ dependencies = [ "dask>=2023.7.1", "distributed>=2023.7.1", "fsspec>=0.6.0", - "numpy>=1.21", + "numpy>=1.21,<1.25", "pandas>=1.3,<1.6.0dev0", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. classifiers = [ @@ -40,7 +40,7 @@ dynamic = ["entry-points"] [project.optional-dependencies] test = [ "dask-cuda==23.10.*", - "numba>=0.57", + "numba>=0.57,<0.58", "pytest", "pytest-cov", "pytest-xdist", From dd58dc4e9dae387c878afbe6cb32a311ce76fe68 Mon Sep 17 00:00:00 2001 From: Ben Jarmak <104460670+jarmak-nv@users.noreply.github.com> Date: Fri, 22 Sep 2023 07:58:56 -0500 Subject: [PATCH 091/150] Remove outdated GitHub project actions (#14161) This PR removes two GitHub Actions that are no-longer needed: - `.github/workflows/add_to_project.yml` - This automatically adds issues and PRs to the cuDF/Dask/Numba/UCX project, but this is now a built-in functionality to projects - `.github/workflows/new-issues-to-triage-projects.yml` - This tries to add issues to a now closed project Authors: - Ben Jarmak (https://github.com/jarmak-nv) Approvers: - AJ Schmidt (https://github.com/ajschmidt8) --- .github/workflows/add_to_project.yml | 20 ----------- .../new-issues-to-triage-projects.yml | 35 ------------------- 2 files changed, 55 deletions(-) delete mode 100644 .github/workflows/add_to_project.yml delete mode 100644 .github/workflows/new-issues-to-triage-projects.yml diff --git a/.github/workflows/add_to_project.yml b/.github/workflows/add_to_project.yml deleted file mode 100644 index b301c56a999..00000000000 --- a/.github/workflows/add_to_project.yml +++ /dev/null @@ -1,20 +0,0 @@ -name: Add new issue/PR to project - -on: - issues: - types: - - opened - - pull_request_target: - types: - - opened - -jobs: - add-to-project: - name: Add issue or PR to project - runs-on: ubuntu-latest - steps: - - uses: actions/add-to-project@v0.3.0 - with: - project-url: https://github.com/orgs/rapidsai/projects/51 - github-token: ${{ secrets.ADD_TO_PROJECT_GITHUB_TOKEN }} diff --git a/.github/workflows/new-issues-to-triage-projects.yml b/.github/workflows/new-issues-to-triage-projects.yml deleted file mode 100644 index cf9b0c379f1..00000000000 --- a/.github/workflows/new-issues-to-triage-projects.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: Auto Assign New Issues to Triage Project - -on: - issues: - types: [opened] - -env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - -jobs: - assign_one_project: - runs-on: ubuntu-latest - name: Assign to New Issues to Triage Project - steps: - - name: Process bug issues - uses: docker://takanabe/github-actions-automate-projects:v0.0.1 - if: contains(github.event.issue.labels.*.name, 'bug') && contains(github.event.issue.labels.*.name, '? - Needs Triage') - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - GITHUB_PROJECT_URL: https://github.com/rapidsai/cudf/projects/1 - GITHUB_PROJECT_COLUMN_NAME: 'Needs prioritizing' - - name: Process feature issues - uses: docker://takanabe/github-actions-automate-projects:v0.0.1 - if: contains(github.event.issue.labels.*.name, 'feature request') && contains(github.event.issue.labels.*.name, '? - Needs Triage') - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - GITHUB_PROJECT_URL: https://github.com/rapidsai/cudf/projects/9 - GITHUB_PROJECT_COLUMN_NAME: 'Needs prioritizing' - - name: Process other issues - uses: docker://takanabe/github-actions-automate-projects:v0.0.1 - if: contains(github.event.issue.labels.*.name, '? - Needs Triage') && (!contains(github.event.issue.labels.*.name, 'bug') && !contains(github.event.issue.labels.*.name, 'feature request')) - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - GITHUB_PROJECT_URL: https://github.com/rapidsai/cudf/projects/10 - GITHUB_PROJECT_COLUMN_NAME: 'Needs prioritizing' From f42231f9193952b45cd9ba4642adcca392a7ce14 Mon Sep 17 00:00:00 2001 From: Ray Douglass Date: Fri, 22 Sep 2023 10:19:16 -0400 Subject: [PATCH 092/150] v23.12 Updates [skip ci] --- .github/workflows/build.yaml | 16 +++++------ .github/workflows/pr.yaml | 28 +++++++++---------- .github/workflows/test.yaml | 16 +++++------ README.md | 2 +- ci/build_docs.sh | 2 +- ci/check_style.sh | 2 +- ci/test_wheel_dask_cudf.sh | 2 +- .../all_cuda-118_arch-x86_64.yaml | 8 +++--- .../all_cuda-120_arch-x86_64.yaml | 8 +++--- cpp/CMakeLists.txt | 2 +- cpp/doxygen/Doxyfile | 4 +-- cpp/examples/basic/CMakeLists.txt | 2 +- cpp/examples/strings/CMakeLists.txt | 2 +- cpp/libcudf_kafka/CMakeLists.txt | 2 +- dependencies.yaml | 16 +++++------ docs/cudf/source/conf.py | 4 +-- docs/dask_cudf/source/conf.py | 4 +-- fetch_rapids.cmake | 2 +- java/ci/README.md | 4 +-- java/pom.xml | 2 +- java/src/main/native/CMakeLists.txt | 2 +- python/cudf/CMakeLists.txt | 2 +- python/cudf/cudf/__init__.py | 2 +- python/cudf/pyproject.toml | 6 ++-- python/cudf_kafka/pyproject.toml | 4 +-- python/custreamz/pyproject.toml | 6 ++-- python/dask_cudf/dask_cudf/__init__.py | 2 +- python/dask_cudf/pyproject.toml | 6 ++-- 28 files changed, 79 insertions(+), 79 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 0e120d34bb1..ab028eb89cc 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -28,7 +28,7 @@ concurrency: jobs: cpp-build: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-build.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-build.yaml@branch-23.12 with: build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} @@ -37,7 +37,7 @@ jobs: python-build: needs: [cpp-build] secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-build.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-build.yaml@branch-23.12 with: build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} @@ -46,7 +46,7 @@ jobs: upload-conda: needs: [cpp-build, python-build] secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-upload-packages.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-upload-packages.yaml@branch-23.12 with: build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} @@ -57,7 +57,7 @@ jobs: if: github.ref_type == 'branch' needs: python-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.12 with: arch: "amd64" branch: ${{ inputs.branch }} @@ -69,7 +69,7 @@ jobs: sha: ${{ inputs.sha }} wheel-build-cudf: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-build.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-build.yaml@branch-23.12 with: build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} @@ -79,7 +79,7 @@ jobs: wheel-publish-cudf: needs: wheel-build-cudf secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-publish.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-publish.yaml@branch-23.12 with: build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} @@ -89,7 +89,7 @@ jobs: wheel-build-dask-cudf: needs: wheel-publish-cudf secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-build.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-build.yaml@branch-23.12 with: matrix_filter: map(select(.ARCH == "amd64" and .PY_VER == "3.10" and (.CUDA_VER == "11.8.0" or .CUDA_VER == "12.0.1"))) build_type: ${{ inputs.build_type || 'branch' }} @@ -100,7 +100,7 @@ jobs: wheel-publish-dask-cudf: needs: wheel-build-dask-cudf secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-publish.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-publish.yaml@branch-23.12 with: build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index 054ea7968c8..214f9c90b41 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -26,34 +26,34 @@ jobs: - wheel-build-dask-cudf - wheel-tests-dask-cudf secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/pr-builder.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/pr-builder.yaml@branch-23.12 checks: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/checks.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/checks.yaml@branch-23.12 with: enable_check_generated_files: false conda-cpp-build: needs: checks secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-build.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-build.yaml@branch-23.12 with: build_type: pull-request conda-cpp-tests: needs: conda-cpp-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-tests.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-tests.yaml@branch-23.12 with: build_type: pull-request conda-python-build: needs: conda-cpp-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-build.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-build.yaml@branch-23.12 with: build_type: pull-request conda-python-cudf-tests: needs: conda-python-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@branch-23.12 with: build_type: pull-request test_script: "ci/test_python_cudf.sh" @@ -61,14 +61,14 @@ jobs: # Tests for dask_cudf, custreamz, cudf_kafka are separated for CI parallelism needs: conda-python-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@branch-23.12 with: build_type: pull-request test_script: "ci/test_python_other.sh" conda-java-tests: needs: conda-cpp-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.12 with: build_type: pull-request node_type: "gpu-v100-latest-1" @@ -78,7 +78,7 @@ jobs: conda-notebook-tests: needs: conda-python-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.12 with: build_type: pull-request node_type: "gpu-v100-latest-1" @@ -88,7 +88,7 @@ jobs: docs-build: needs: conda-python-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.12 with: build_type: pull-request node_type: "gpu-v100-latest-1" @@ -98,21 +98,21 @@ jobs: wheel-build-cudf: needs: checks secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-build.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-build.yaml@branch-23.12 with: build_type: pull-request script: "ci/build_wheel_cudf.sh" wheel-tests-cudf: needs: wheel-build-cudf secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-test.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-test.yaml@branch-23.12 with: build_type: pull-request script: ci/test_wheel_cudf.sh wheel-build-dask-cudf: needs: wheel-tests-cudf secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-build.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-build.yaml@branch-23.12 with: matrix_filter: map(select(.ARCH == "amd64" and .PY_VER == "3.10" and (.CUDA_VER == "11.8.0" or .CUDA_VER == "12.0.1"))) build_type: pull-request @@ -120,7 +120,7 @@ jobs: wheel-tests-dask-cudf: needs: wheel-build-dask-cudf secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-test.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-test.yaml@branch-23.12 with: matrix_filter: map(select(.ARCH == "amd64" and .PY_VER == "3.10" and (.CUDA_VER == "11.8.0" or .CUDA_VER == "12.0.1"))) build_type: pull-request diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 030f2e41db4..9ca32bcfe03 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -16,7 +16,7 @@ on: jobs: conda-cpp-tests: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-tests.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-tests.yaml@branch-23.12 with: build_type: nightly branch: ${{ inputs.branch }} @@ -24,7 +24,7 @@ jobs: sha: ${{ inputs.sha }} conda-cpp-memcheck-tests: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.12 with: build_type: nightly branch: ${{ inputs.branch }} @@ -36,7 +36,7 @@ jobs: run_script: "ci/test_cpp_memcheck.sh" conda-python-cudf-tests: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@branch-23.12 with: build_type: nightly branch: ${{ inputs.branch }} @@ -46,7 +46,7 @@ jobs: conda-python-other-tests: # Tests for dask_cudf, custreamz, cudf_kafka are separated for CI parallelism secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@branch-23.12 with: build_type: nightly branch: ${{ inputs.branch }} @@ -55,7 +55,7 @@ jobs: test_script: "ci/test_python_other.sh" conda-java-tests: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.12 with: build_type: nightly branch: ${{ inputs.branch }} @@ -67,7 +67,7 @@ jobs: run_script: "ci/test_java.sh" conda-notebook-tests: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.12 with: build_type: nightly branch: ${{ inputs.branch }} @@ -79,7 +79,7 @@ jobs: run_script: "ci/test_notebooks.sh" wheel-tests-cudf: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-test.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-test.yaml@branch-23.12 with: build_type: nightly branch: ${{ inputs.branch }} @@ -88,7 +88,7 @@ jobs: script: ci/test_wheel_cudf.sh wheel-tests-dask-cudf: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-test.yaml@branch-23.10 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-test.yaml@branch-23.12 with: matrix_filter: map(select(.ARCH == "amd64" and .PY_VER == "3.10" and (.CUDA_VER == "11.8.0" or .CUDA_VER == "12.0.1"))) build_type: nightly diff --git a/README.md b/README.md index 64c980d0cb3..5f2ce014dba 100644 --- a/README.md +++ b/README.md @@ -65,7 +65,7 @@ cuDF can be installed with conda (via [miniconda](https://conda.io/miniconda.htm ```bash conda install -c rapidsai -c conda-forge -c nvidia \ - cudf=23.10 python=3.10 cuda-version=11.8 + cudf=23.12 python=3.10 cuda-version=11.8 ``` We also provide [nightly Conda packages](https://anaconda.org/rapidsai-nightly) built from the HEAD diff --git a/ci/build_docs.sh b/ci/build_docs.sh index 1ed047a500b..11e7a96b751 100755 --- a/ci/build_docs.sh +++ b/ci/build_docs.sh @@ -25,7 +25,7 @@ rapids-mamba-retry install \ --channel "${PYTHON_CHANNEL}" \ libcudf cudf dask-cudf -export RAPIDS_VERSION_NUMBER="23.10" +export RAPIDS_VERSION_NUMBER="23.12" export RAPIDS_DOCS_DIR="$(mktemp -d)" rapids-logger "Build CPP docs" diff --git a/ci/check_style.sh b/ci/check_style.sh index e96ad8bf1db..a01cf4dcc6b 100755 --- a/ci/check_style.sh +++ b/ci/check_style.sh @@ -14,7 +14,7 @@ rapids-dependency-file-generator \ rapids-mamba-retry env create --force -f env.yaml -n checks conda activate checks -FORMAT_FILE_URL=https://raw.githubusercontent.com/rapidsai/rapids-cmake/branch-23.10/cmake-format-rapids-cmake.json +FORMAT_FILE_URL=https://raw.githubusercontent.com/rapidsai/rapids-cmake/branch-23.12/cmake-format-rapids-cmake.json export RAPIDS_CMAKE_FORMAT_FILE=/tmp/rapids_cmake_ci/cmake-formats-rapids-cmake.json mkdir -p $(dirname ${RAPIDS_CMAKE_FORMAT_FILE}) wget -O ${RAPIDS_CMAKE_FORMAT_FILE} ${FORMAT_FILE_URL} diff --git a/ci/test_wheel_dask_cudf.sh b/ci/test_wheel_dask_cudf.sh index d6e7f4bf65e..050aa4561c7 100755 --- a/ci/test_wheel_dask_cudf.sh +++ b/ci/test_wheel_dask_cudf.sh @@ -11,7 +11,7 @@ RAPIDS_PY_WHEEL_NAME="cudf_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from python -m pip install --no-deps ./local-cudf-dep/cudf*.whl # Always install latest dask for testing -python -m pip install git+https://github.com/dask/dask.git@main git+https://github.com/dask/distributed.git@main git+https://github.com/rapidsai/dask-cuda.git@branch-23.10 +python -m pip install git+https://github.com/dask/dask.git@main git+https://github.com/dask/distributed.git@main git+https://github.com/rapidsai/dask-cuda.git@branch-23.12 # echo to expand wildcard before adding `[extra]` requires for pip python -m pip install $(echo ./dist/dask_cudf*.whl)[test] diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index d4abc28cf13..151d250d7e9 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -25,7 +25,7 @@ dependencies: - cxx-compiler - cython>=3.0.0 - dask-core>=2023.7.1 -- dask-cuda==23.10.* +- dask-cuda==23.12.* - dask>=2023.7.1 - distributed>=2023.7.1 - dlpack>=0.5,<0.6.0a0 @@ -44,9 +44,9 @@ dependencies: - libcufile=1.4.0.31 - libcurand-dev=10.3.0.86 - libcurand=10.3.0.86 -- libkvikio==23.10.* +- libkvikio==23.12.* - librdkafka>=1.9.0,<1.10.0a0 -- librmm==23.10.* +- librmm==23.12.* - make - mimesis>=4.1.0 - moto>=4.0.8 @@ -80,7 +80,7 @@ dependencies: - python-snappy>=0.6.0 - python>=3.9,<3.11 - pytorch<1.12.0 -- rmm==23.10.* +- rmm==23.12.* - s3fs>=2022.3.0 - scikit-build>=0.13.1 - scipy diff --git a/conda/environments/all_cuda-120_arch-x86_64.yaml b/conda/environments/all_cuda-120_arch-x86_64.yaml index 9a98e400e6d..ad52b8f8b97 100644 --- a/conda/environments/all_cuda-120_arch-x86_64.yaml +++ b/conda/environments/all_cuda-120_arch-x86_64.yaml @@ -26,7 +26,7 @@ dependencies: - cxx-compiler - cython>=3.0.0 - dask-core>=2023.7.1 -- dask-cuda==23.10.* +- dask-cuda==23.12.* - dask>=2023.7.1 - distributed>=2023.7.1 - dlpack>=0.5,<0.6.0a0 @@ -43,9 +43,9 @@ dependencies: - libarrow==12.0.1.* - libcufile-dev - libcurand-dev -- libkvikio==23.10.* +- libkvikio==23.12.* - librdkafka>=1.9.0,<1.10.0a0 -- librmm==23.10.* +- librmm==23.12.* - make - mimesis>=4.1.0 - moto>=4.0.8 @@ -77,7 +77,7 @@ dependencies: - python-snappy>=0.6.0 - python>=3.9,<3.11 - pytorch<1.12.0 -- rmm==23.10.* +- rmm==23.12.* - s3fs>=2022.3.0 - scikit-build>=0.13.1 - scipy diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index a84f7bd5224..38713d99622 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -25,7 +25,7 @@ rapids_cuda_init_architectures(CUDF) project( CUDF - VERSION 23.10.00 + VERSION 23.12.00 LANGUAGES C CXX CUDA ) if(CMAKE_CUDA_COMPILER_ID STREQUAL "NVIDIA" AND CMAKE_CUDA_COMPILER_VERSION VERSION_LESS 11.5) diff --git a/cpp/doxygen/Doxyfile b/cpp/doxygen/Doxyfile index b072d252881..adefaaa1479 100644 --- a/cpp/doxygen/Doxyfile +++ b/cpp/doxygen/Doxyfile @@ -38,7 +38,7 @@ PROJECT_NAME = libcudf # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 23.10.00 +PROJECT_NUMBER = 23.12.00 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a @@ -2226,7 +2226,7 @@ SKIP_FUNCTION_MACROS = YES # the path). If a tag file is not located in the directory in which doxygen is # run, you must also specify the path to the tagfile here. -TAGFILES = rmm.tag=https://docs.rapids.ai/api/librmm/23.10 +TAGFILES = rmm.tag=https://docs.rapids.ai/api/librmm/23.12 # When a file name is specified after GENERATE_TAGFILE, doxygen will create a # tag file that is based on the input files it reads. See section "Linking to diff --git a/cpp/examples/basic/CMakeLists.txt b/cpp/examples/basic/CMakeLists.txt index 1c1952c4616..9ff716f41e4 100644 --- a/cpp/examples/basic/CMakeLists.txt +++ b/cpp/examples/basic/CMakeLists.txt @@ -16,7 +16,7 @@ file( ) include(${CMAKE_BINARY_DIR}/cmake/get_cpm.cmake) -set(CUDF_TAG branch-23.10) +set(CUDF_TAG branch-23.12) CPMFindPackage( NAME cudf GIT_REPOSITORY https://github.com/rapidsai/cudf GIT_TAG ${CUDF_TAG} diff --git a/cpp/examples/strings/CMakeLists.txt b/cpp/examples/strings/CMakeLists.txt index 31a6b12a4bc..4b500d3a92e 100644 --- a/cpp/examples/strings/CMakeLists.txt +++ b/cpp/examples/strings/CMakeLists.txt @@ -16,7 +16,7 @@ file( ) include(${CMAKE_BINARY_DIR}/cmake/get_cpm.cmake) -set(CUDF_TAG branch-23.10) +set(CUDF_TAG branch-23.12) CPMFindPackage( NAME cudf GIT_REPOSITORY https://github.com/rapidsai/cudf GIT_TAG ${CUDF_TAG} diff --git a/cpp/libcudf_kafka/CMakeLists.txt b/cpp/libcudf_kafka/CMakeLists.txt index 33bd04fffb3..1a15a3ec2cd 100644 --- a/cpp/libcudf_kafka/CMakeLists.txt +++ b/cpp/libcudf_kafka/CMakeLists.txt @@ -22,7 +22,7 @@ include(rapids-find) project( CUDA_KAFKA - VERSION 23.10.00 + VERSION 23.12.00 LANGUAGES CXX ) diff --git a/dependencies.yaml b/dependencies.yaml index 376e43094a7..66417b214ff 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -214,8 +214,8 @@ dependencies: common: - output_types: [conda, requirements] packages: - - librmm==23.10.* - - libkvikio==23.10.* + - librmm==23.12.* + - libkvikio==23.12.* - output_types: conda packages: - fmt>=9.1.0,<10 @@ -266,7 +266,7 @@ dependencies: - output_types: [conda, requirements, pyproject] packages: - scikit-build>=0.13.1 - - rmm==23.10.* + - rmm==23.12.* - output_types: conda packages: - &protobuf protobuf>=4.21,<5 @@ -385,7 +385,7 @@ dependencies: common: - output_types: [conda] packages: - - dask-cuda==23.10.* + - dask-cuda==23.12.* - *doxygen - make - myst-nb @@ -437,7 +437,7 @@ dependencies: - &numba numba>=0.57,<0.58 - nvtx>=0.2.1 - packaging - - rmm==23.10.* + - rmm==23.12.* - typing_extensions>=4.0.0 - *protobuf - output_types: conda @@ -498,7 +498,7 @@ dependencies: - dask-core>=2023.7.1 # dask-core in conda is the actual package & dask is the meta package - output_types: pyproject packages: - - &cudf cudf==23.10.* + - &cudf cudf==23.12.* - *cupy_pip run_cudf_kafka: common: @@ -517,7 +517,7 @@ dependencies: packages: - confluent-kafka>=1.9.0,<1.10.0a0 - *cudf - - cudf_kafka==23.10.* + - cudf_kafka==23.12.* test_cpp: common: - output_types: conda @@ -599,5 +599,5 @@ dependencies: common: - output_types: [conda, requirements, pyproject] packages: - - dask-cuda==23.10.* + - dask-cuda==23.12.* - *numba diff --git a/docs/cudf/source/conf.py b/docs/cudf/source/conf.py index 03b1bb7039b..acb2a5d17f3 100644 --- a/docs/cudf/source/conf.py +++ b/docs/cudf/source/conf.py @@ -79,9 +79,9 @@ # built documents. # # The short X.Y version. -version = '23.10' +version = '23.12' # The full version, including alpha/beta/rc tags. -release = '23.10.00' +release = '23.12.00' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/docs/dask_cudf/source/conf.py b/docs/dask_cudf/source/conf.py index ad629b5e949..6861a9b90f6 100644 --- a/docs/dask_cudf/source/conf.py +++ b/docs/dask_cudf/source/conf.py @@ -11,8 +11,8 @@ project = "dask-cudf" copyright = "2018-2023, NVIDIA Corporation" author = "NVIDIA Corporation" -version = '23.10' -release = '23.10.00' +version = '23.12' +release = '23.12.00' language = "en" diff --git a/fetch_rapids.cmake b/fetch_rapids.cmake index 4a68c7dbc60..e79d9d86fce 100644 --- a/fetch_rapids.cmake +++ b/fetch_rapids.cmake @@ -12,7 +12,7 @@ # the License. # ============================================================================= if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/CUDF_RAPIDS.cmake) - file(DOWNLOAD https://raw.githubusercontent.com/rapidsai/rapids-cmake/branch-23.10/RAPIDS.cmake + file(DOWNLOAD https://raw.githubusercontent.com/rapidsai/rapids-cmake/branch-23.12/RAPIDS.cmake ${CMAKE_CURRENT_BINARY_DIR}/CUDF_RAPIDS.cmake ) endif() diff --git a/java/ci/README.md b/java/ci/README.md index e9599b33bf1..12a2bb2dc51 100644 --- a/java/ci/README.md +++ b/java/ci/README.md @@ -34,7 +34,7 @@ nvidia-docker run -it cudf-build:11.8.0-devel-centos7 bash You can download the cuDF repo in the docker container or you can mount it into the container. Here I choose to download again in the container. ```bash -git clone --recursive https://github.com/rapidsai/cudf.git -b branch-23.10 +git clone --recursive https://github.com/rapidsai/cudf.git -b branch-23.12 ``` ### Build cuDF jar with devtoolset @@ -47,4 +47,4 @@ scl enable devtoolset-11 "java/ci/build-in-docker.sh" ### The output -You can find the cuDF jar in java/target/ like cudf-23.10.0-SNAPSHOT-cuda11.jar. +You can find the cuDF jar in java/target/ like cudf-23.12.0-SNAPSHOT-cuda11.jar. diff --git a/java/pom.xml b/java/pom.xml index afcc0e15a2c..cc880312d34 100644 --- a/java/pom.xml +++ b/java/pom.xml @@ -21,7 +21,7 @@ ai.rapids cudf - 23.10.0-SNAPSHOT + 23.12.0-SNAPSHOT cudfjni diff --git a/java/src/main/native/CMakeLists.txt b/java/src/main/native/CMakeLists.txt index 128989fe77f..0dcfee2cffe 100644 --- a/java/src/main/native/CMakeLists.txt +++ b/java/src/main/native/CMakeLists.txt @@ -28,7 +28,7 @@ rapids_cuda_init_architectures(CUDF_JNI) project( CUDF_JNI - VERSION 23.10.00 + VERSION 23.12.00 LANGUAGES C CXX CUDA ) diff --git a/python/cudf/CMakeLists.txt b/python/cudf/CMakeLists.txt index 6f3e428d291..a8b91c27095 100644 --- a/python/cudf/CMakeLists.txt +++ b/python/cudf/CMakeLists.txt @@ -14,7 +14,7 @@ cmake_minimum_required(VERSION 3.26.4 FATAL_ERROR) -set(cudf_version 23.10.00) +set(cudf_version 23.12.00) include(../../fetch_rapids.cmake) include(rapids-cuda) diff --git a/python/cudf/cudf/__init__.py b/python/cudf/cudf/__init__.py index e5c78fca893..8d25d478676 100644 --- a/python/cudf/cudf/__init__.py +++ b/python/cudf/cudf/__init__.py @@ -99,7 +99,7 @@ rmm.register_reinitialize_hook(clear_cache) -__version__ = "23.10.00" +__version__ = "23.12.00" __all__ = [ "BaseIndex", diff --git a/python/cudf/pyproject.toml b/python/cudf/pyproject.toml index 085d78afc7c..39a8dca0267 100644 --- a/python/cudf/pyproject.toml +++ b/python/cudf/pyproject.toml @@ -9,7 +9,7 @@ requires = [ "numpy>=1.21,<1.25", "protoc-wheel", "pyarrow==12.0.1.*", - "rmm==23.10.*", + "rmm==23.12.*", "scikit-build>=0.13.1", "setuptools", "wheel", @@ -17,7 +17,7 @@ requires = [ [project] name = "cudf" -version = "23.10.00" +version = "23.12.00" description = "cuDF - GPU Dataframe" readme = { file = "README.md", content-type = "text/markdown" } authors = [ @@ -39,7 +39,7 @@ dependencies = [ "protobuf>=4.21,<5", "ptxcompiler", "pyarrow==12.*", - "rmm==23.10.*", + "rmm==23.12.*", "typing_extensions>=4.0.0", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. classifiers = [ diff --git a/python/cudf_kafka/pyproject.toml b/python/cudf_kafka/pyproject.toml index 386cdc32ab1..78a7a83ac3a 100644 --- a/python/cudf_kafka/pyproject.toml +++ b/python/cudf_kafka/pyproject.toml @@ -12,7 +12,7 @@ requires = [ [project] name = "cudf_kafka" -version = "23.10.00" +version = "23.12.00" description = "cuDF Kafka Datasource" readme = { file = "README.md", content-type = "text/markdown" } authors = [ @@ -21,7 +21,7 @@ authors = [ license = { text = "Apache 2.0" } requires-python = ">=3.9" dependencies = [ - "cudf==23.10.*", + "cudf==23.12.*", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. [project.optional-dependencies] diff --git a/python/custreamz/pyproject.toml b/python/custreamz/pyproject.toml index 47ade91b5eb..e6328ed045d 100644 --- a/python/custreamz/pyproject.toml +++ b/python/custreamz/pyproject.toml @@ -9,7 +9,7 @@ requires = [ [project] name = "custreamz" -version = "23.10.00" +version = "23.12.00" description = "cuStreamz - GPU Accelerated Streaming" readme = { file = "README.md", content-type = "text/markdown" } authors = [ @@ -19,8 +19,8 @@ license = { text = "Apache 2.0" } requires-python = ">=3.9" dependencies = [ "confluent-kafka>=1.9.0,<1.10.0a0", - "cudf==23.10.*", - "cudf_kafka==23.10.*", + "cudf==23.12.*", + "cudf_kafka==23.12.*", "streamz", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. classifiers = [ diff --git a/python/dask_cudf/dask_cudf/__init__.py b/python/dask_cudf/dask_cudf/__init__.py index 6952c3d5882..7c81f5da481 100644 --- a/python/dask_cudf/dask_cudf/__init__.py +++ b/python/dask_cudf/dask_cudf/__init__.py @@ -14,7 +14,7 @@ except ImportError: pass -__version__ = "23.10.00" +__version__ = "23.12.00" __all__ = [ "DataFrame", diff --git a/python/dask_cudf/pyproject.toml b/python/dask_cudf/pyproject.toml index 922da366422..08441c6b5f7 100644 --- a/python/dask_cudf/pyproject.toml +++ b/python/dask_cudf/pyproject.toml @@ -9,7 +9,7 @@ requires = [ [project] name = "dask_cudf" -version = "23.10.00" +version = "23.12.00" description = "Utilities for Dask and cuDF interactions" readme = { file = "README.md", content-type = "text/markdown" } authors = [ @@ -18,7 +18,7 @@ authors = [ license = { text = "Apache 2.0" } requires-python = ">=3.9" dependencies = [ - "cudf==23.10.*", + "cudf==23.12.*", "cupy-cuda11x>=12.0.0", "dask>=2023.7.1", "distributed>=2023.7.1", @@ -39,7 +39,7 @@ dynamic = ["entry-points"] [project.optional-dependencies] test = [ - "dask-cuda==23.10.*", + "dask-cuda==23.12.*", "numba>=0.57,<0.58", "pytest", "pytest-cov", From 98b1bc6c1ef1233a6c71c3b24fc8f88d591a4639 Mon Sep 17 00:00:00 2001 From: David Wendt <45795991+davidwendt@users.noreply.github.com> Date: Fri, 22 Sep 2023 11:07:37 -0400 Subject: [PATCH 093/150] Fix calls to copy_bitmask to pass stream parameter (#14158) Fixes a couple places where `cudf::copy_bitmask` was called instead of `cudf::detail::copy_bitmask` to pass the available stream (and mr) parameters. Found while reviewing #14121 Reference: https://github.com/rapidsai/cudf/pull/14121#discussion_r1332332391 Authors: - David Wendt (https://github.com/davidwendt) Approvers: - Bradley Dice (https://github.com/bdice) - MithunR (https://github.com/mythrocks) URL: https://github.com/rapidsai/cudf/pull/14158 --- cpp/src/lists/count_elements.cu | 12 ++++++------ cpp/src/replace/clamp.cu | 4 +++- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/cpp/src/lists/count_elements.cu b/cpp/src/lists/count_elements.cu index f8e7b4c6126..40a14d805e1 100644 --- a/cpp/src/lists/count_elements.cu +++ b/cpp/src/lists/count_elements.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -36,12 +36,12 @@ namespace cudf { namespace lists { namespace detail { /** - * @brief Returns a numeric column containing lengths of each element. + * @brief Returns a numeric column containing lengths of each element * - * @param input Input lists column. - * @param stream CUDA stream used for device memory operations and kernel launches. + * @param input Input lists column + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory - * @return New INT32 column with lengths. + * @return New size_type column with lengths */ std::unique_ptr count_elements(lists_column_view const& input, rmm::cuda_stream_view stream, @@ -52,7 +52,7 @@ std::unique_ptr count_elements(lists_column_view const& input, // create output column auto output = make_fixed_width_column(data_type{type_to_id()}, input.size(), - copy_bitmask(input.parent()), + cudf::detail::copy_bitmask(input.parent(), stream, mr), input.null_count(), stream, mr); diff --git a/cpp/src/replace/clamp.cu b/cpp/src/replace/clamp.cu index 2b48aed2d29..950cb484ddf 100644 --- a/cpp/src/replace/clamp.cu +++ b/cpp/src/replace/clamp.cu @@ -163,7 +163,9 @@ std::enable_if_t(), std::unique_ptr> clamp auto output = detail::allocate_like(input, input.size(), mask_allocation_policy::NEVER, stream, mr); // mask will not change - if (input.nullable()) { output->set_null_mask(copy_bitmask(input), input.null_count()); } + if (input.nullable()) { + output->set_null_mask(cudf::detail::copy_bitmask(input, stream, mr), input.null_count()); + } auto output_device_view = cudf::mutable_column_device_view::create(output->mutable_view(), stream); From f865c871cd0f9b9c596476d9d98aafaf9cc46bb1 Mon Sep 17 00:00:00 2001 From: David Wendt <45795991+davidwendt@users.noreply.github.com> Date: Fri, 22 Sep 2023 11:08:11 -0400 Subject: [PATCH 094/150] Expose stream parameter in public nvtext ngram APIs (#14061) Add stream parameter to public APIs: - `nvtext::generate_ngrams()` - `nvtext::generate_character_ngrams()` - `nvtext::hash_character_ngrams()` - `nvtext::ngrams_tokenize()` Also cleaned up some of the doxygen comments. And also fixed a spelling mistake in the jaccard.cu source that was bothering me. Reference #13744 Authors: - David Wendt (https://github.com/davidwendt) Approvers: - Yunsong Wang (https://github.com/PointKernel) - Vyas Ramasubramani (https://github.com/vyasr) URL: https://github.com/rapidsai/cudf/pull/14061 --- cpp/benchmarks/text/ngrams.cpp | 3 +- cpp/benchmarks/text/tokenize.cpp | 7 ++- cpp/include/nvtext/generate_ngrams.hpp | 38 ++++++++------- cpp/include/nvtext/ngrams_tokenize.hpp | 28 +++++------ cpp/src/text/generate_ngrams.cu | 9 ++-- cpp/src/text/jaccard.cu | 4 +- cpp/src/text/ngrams_tokenize.cu | 4 +- cpp/tests/CMakeLists.txt | 1 + cpp/tests/streams/text/ngrams_test.cpp | 59 ++++++++++++++++++++++++ cpp/tests/text/ngrams_tests.cpp | 28 ++++++----- cpp/tests/text/ngrams_tokenize_tests.cpp | 11 +++-- 11 files changed, 135 insertions(+), 57 deletions(-) create mode 100644 cpp/tests/streams/text/ngrams_test.cpp diff --git a/cpp/benchmarks/text/ngrams.cpp b/cpp/benchmarks/text/ngrams.cpp index 0319577f6b9..f3fd5cc5729 100644 --- a/cpp/benchmarks/text/ngrams.cpp +++ b/cpp/benchmarks/text/ngrams.cpp @@ -36,11 +36,12 @@ static void BM_ngrams(benchmark::State& state, ngrams_type nt) cudf::type_id::STRING, distribution_id::NORMAL, 0, max_str_length); auto const column = create_random_column(cudf::type_id::STRING, row_count{n_rows}, profile); cudf::strings_column_view input(column->view()); + auto const separator = cudf::string_scalar("_"); for (auto _ : state) { cuda_event_timer raii(state, true); switch (nt) { - case ngrams_type::tokens: nvtext::generate_ngrams(input); break; + case ngrams_type::tokens: nvtext::generate_ngrams(input, 2, separator); break; case ngrams_type::characters: nvtext::generate_character_ngrams(input); break; } } diff --git a/cpp/benchmarks/text/tokenize.cpp b/cpp/benchmarks/text/tokenize.cpp index 423fe667b05..b556a84c541 100644 --- a/cpp/benchmarks/text/tokenize.cpp +++ b/cpp/benchmarks/text/tokenize.cpp @@ -67,8 +67,11 @@ static void bench_tokenize(nvbench::state& state) auto result = nvtext::count_tokens(input, cudf::strings_column_view(delimiters)); }); } else if (tokenize_type == "ngrams") { - state.exec(nvbench::exec_tag::sync, - [&](nvbench::launch& launch) { auto result = nvtext::ngrams_tokenize(input); }); + auto const delimiter = cudf::string_scalar(""); + auto const separator = cudf::string_scalar("_"); + state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) { + auto result = nvtext::ngrams_tokenize(input, 2, delimiter, separator); + }); } else if (tokenize_type == "characters") { state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) { auto result = nvtext::character_tokenize(input); }); diff --git a/cpp/include/nvtext/generate_ngrams.hpp b/cpp/include/nvtext/generate_ngrams.hpp index 5d66401df9d..46f2c0e7bc9 100644 --- a/cpp/include/nvtext/generate_ngrams.hpp +++ b/cpp/include/nvtext/generate_ngrams.hpp @@ -47,19 +47,19 @@ namespace nvtext { * @throw cudf::logic_error if `separator` is invalid * @throw cudf::logic_error if there are not enough strings to generate any ngrams * - * @param strings Strings column to tokenize and produce ngrams from. - * @param ngrams The ngram number to generate. - * Default is 2 = bigram. - * @param separator The string to use for separating ngram tokens. - * Default is "_" character. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New strings columns of tokens. + * @param input Strings column to tokenize and produce ngrams from + * @param ngrams The ngram number to generate + * @param separator The string to use for separating ngram tokens + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New strings columns of tokens */ std::unique_ptr generate_ngrams( - cudf::strings_column_view const& strings, - cudf::size_type ngrams = 2, - cudf::string_scalar const& separator = cudf::string_scalar{"_"}, - rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); + cudf::strings_column_view const& input, + cudf::size_type ngrams, + cudf::string_scalar const& separator, + rmm::cuda_stream_view stream = cudf::get_default_stream(), + rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Generates ngrams of characters within each string. @@ -79,15 +79,17 @@ std::unique_ptr generate_ngrams( * @throw cudf::logic_error if `ngrams < 2` * @throw cudf::logic_error if there are not enough characters to generate any ngrams * - * @param strings Strings column to produce ngrams from. + * @param input Strings column to produce ngrams from * @param ngrams The ngram number to generate. * Default is 2 = bigram. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New strings columns of tokens. + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New strings columns of tokens */ std::unique_ptr generate_character_ngrams( - cudf::strings_column_view const& strings, + cudf::strings_column_view const& input, cudf::size_type ngrams = 2, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -113,14 +115,16 @@ std::unique_ptr generate_character_ngrams( * @throw cudf::logic_error if `ngrams < 2` * @throw cudf::logic_error if there are not enough characters to generate any ngrams * - * @param strings Strings column to produce ngrams from. + * @param input Strings column to produce ngrams from * @param ngrams The ngram number to generate. Default is 5. + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory. * @return A lists column of hash values */ std::unique_ptr hash_character_ngrams( - cudf::strings_column_view const& strings, + cudf::strings_column_view const& input, cudf::size_type ngrams = 5, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group diff --git a/cpp/include/nvtext/ngrams_tokenize.hpp b/cpp/include/nvtext/ngrams_tokenize.hpp index 17f20f7ea4c..9d76ef8689f 100644 --- a/cpp/include/nvtext/ngrams_tokenize.hpp +++ b/cpp/include/nvtext/ngrams_tokenize.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, NVIDIA CORPORATION. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -66,22 +66,22 @@ namespace nvtext { * * All null row entries are ignored and the output contains all valid rows. * - * @param strings Strings column to tokenize and produce ngrams from. - * @param ngrams The ngram number to generate. - * Default is 2 = bigram. + * @param input Strings column to tokenize and produce ngrams from + * @param ngrams The ngram number to generate * @param delimiter UTF-8 characters used to separate each string into tokens. - * The default of empty string will separate tokens using whitespace. - * @param separator The string to use for separating ngram tokens. - * Default is "_" character. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New strings columns of tokens. + * An empty string will separate tokens using whitespace. + * @param separator The string to use for separating ngram tokens + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New strings columns of tokens */ std::unique_ptr ngrams_tokenize( - cudf::strings_column_view const& strings, - cudf::size_type ngrams = 2, - cudf::string_scalar const& delimiter = cudf::string_scalar{""}, - cudf::string_scalar const& separator = cudf::string_scalar{"_"}, - rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); + cudf::strings_column_view const& input, + cudf::size_type ngrams, + cudf::string_scalar const& delimiter, + cudf::string_scalar const& separator, + rmm::cuda_stream_view stream = cudf::get_default_stream(), + rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group } // namespace nvtext diff --git a/cpp/src/text/generate_ngrams.cu b/cpp/src/text/generate_ngrams.cu index 938fd45246d..5f2f4d021a4 100644 --- a/cpp/src/text/generate_ngrams.cu +++ b/cpp/src/text/generate_ngrams.cu @@ -150,10 +150,11 @@ std::unique_ptr generate_ngrams(cudf::strings_column_view const& s std::unique_ptr generate_ngrams(cudf::strings_column_view const& strings, cudf::size_type ngrams, cudf::string_scalar const& separator, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::generate_ngrams(strings, ngrams, separator, cudf::get_default_stream(), mr); + return detail::generate_ngrams(strings, ngrams, separator, stream, mr); } namespace detail { @@ -317,18 +318,20 @@ std::unique_ptr hash_character_ngrams(cudf::strings_column_view co std::unique_ptr generate_character_ngrams(cudf::strings_column_view const& strings, cudf::size_type ngrams, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::generate_character_ngrams(strings, ngrams, cudf::get_default_stream(), mr); + return detail::generate_character_ngrams(strings, ngrams, stream, mr); } std::unique_ptr hash_character_ngrams(cudf::strings_column_view const& strings, cudf::size_type ngrams, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::hash_character_ngrams(strings, ngrams, cudf::get_default_stream(), mr); + return detail::hash_character_ngrams(strings, ngrams, stream, mr); } } // namespace nvtext diff --git a/cpp/src/text/jaccard.cu b/cpp/src/text/jaccard.cu index 5b55745c2c7..95324847ea0 100644 --- a/cpp/src/text/jaccard.cu +++ b/cpp/src/text/jaccard.cu @@ -107,7 +107,7 @@ rmm::device_uvector compute_unique_counts(cudf::column_view con * * This is called with a warp per row */ -struct sorted_interset_fn { +struct sorted_intersect_fn { cudf::column_device_view const d_input1; cudf::column_device_view const d_input2; cudf::size_type* d_results; @@ -151,7 +151,7 @@ rmm::device_uvector compute_intersect_counts(cudf::column_view auto const d_input1 = cudf::column_device_view::create(input1, stream); auto const d_input2 = cudf::column_device_view::create(input2, stream); auto d_results = rmm::device_uvector(input1.size(), stream); - sorted_interset_fn fn{*d_input1, *d_input2, d_results.data()}; + sorted_intersect_fn fn{*d_input1, *d_input2, d_results.data()}; thrust::for_each_n(rmm::exec_policy(stream), thrust::counting_iterator(0), input1.size() * cudf::detail::warp_size, diff --git a/cpp/src/text/ngrams_tokenize.cu b/cpp/src/text/ngrams_tokenize.cu index fd1cbf99221..73d85513e95 100644 --- a/cpp/src/text/ngrams_tokenize.cu +++ b/cpp/src/text/ngrams_tokenize.cu @@ -265,11 +265,11 @@ std::unique_ptr ngrams_tokenize(cudf::strings_column_view const& s cudf::size_type ngrams, cudf::string_scalar const& delimiter, cudf::string_scalar const& separator, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::ngrams_tokenize( - strings, ngrams, delimiter, separator, cudf::get_default_stream(), mr); + return detail::ngrams_tokenize(strings, ngrams, delimiter, separator, stream, mr); } } // namespace nvtext diff --git a/cpp/tests/CMakeLists.txt b/cpp/tests/CMakeLists.txt index d1e50442058..ba4921848d7 100644 --- a/cpp/tests/CMakeLists.txt +++ b/cpp/tests/CMakeLists.txt @@ -632,6 +632,7 @@ ConfigureTest( STREAM_STRINGS_TEST streams/strings/case_test.cpp streams/strings/find_test.cpp STREAM_MODE testing ) +ConfigureTest(STREAM_TEXT_TEST streams/text/ngrams_test.cpp STREAM_MODE testing) # ################################################################################################## # Install tests #################################################################################### diff --git a/cpp/tests/streams/text/ngrams_test.cpp b/cpp/tests/streams/text/ngrams_test.cpp new file mode 100644 index 00000000000..bce0d2b680b --- /dev/null +++ b/cpp/tests/streams/text/ngrams_test.cpp @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include +#include +#include + +class TextNGramsTest : public cudf::test::BaseFixture {}; + +TEST_F(TextNGramsTest, GenerateNgrams) +{ + auto const input = + cudf::test::strings_column_wrapper({"the", "fox", "jumped", "over", "thé", "dog"}); + auto const separator = cudf::string_scalar{"_", true, cudf::test::get_default_stream()}; + nvtext::generate_ngrams( + cudf::strings_column_view(input), 3, separator, cudf::test::get_default_stream()); +} + +TEST_F(TextNGramsTest, GenerateCharacterNgrams) +{ + auto const input = + cudf::test::strings_column_wrapper({"the", "fox", "jumped", "over", "thé", "dog"}); + nvtext::generate_character_ngrams( + cudf::strings_column_view(input), 3, cudf::test::get_default_stream()); +} + +TEST_F(TextNGramsTest, HashCharacterNgrams) +{ + auto input = + cudf::test::strings_column_wrapper({"the quick brown fox", "jumped over the lazy dog."}); + nvtext::hash_character_ngrams( + cudf::strings_column_view(input), 5, cudf::test::get_default_stream()); +} + +TEST_F(TextNGramsTest, NgramsTokenize) +{ + auto input = + cudf::test::strings_column_wrapper({"the quick brown fox", "jumped over the lazy dog."}); + auto const delimiter = cudf::string_scalar{" ", true, cudf::test::get_default_stream()}; + auto const separator = cudf::string_scalar{"_", true, cudf::test::get_default_stream()}; + nvtext::ngrams_tokenize( + cudf::strings_column_view(input), 2, delimiter, separator, cudf::test::get_default_stream()); +} diff --git a/cpp/tests/text/ngrams_tests.cpp b/cpp/tests/text/ngrams_tests.cpp index 323b3eed3e2..7b179588385 100644 --- a/cpp/tests/text/ngrams_tests.cpp +++ b/cpp/tests/text/ngrams_tests.cpp @@ -34,18 +34,19 @@ TEST_F(TextGenerateNgramsTest, Ngrams) { cudf::test::strings_column_wrapper strings{"the", "fox", "jumped", "over", "thé", "dog"}; cudf::strings_column_view strings_view(strings); + auto const separator = cudf::string_scalar("_"); { cudf::test::strings_column_wrapper expected{ "the_fox", "fox_jumped", "jumped_over", "over_thé", "thé_dog"}; - auto const results = nvtext::generate_ngrams(strings_view); + auto const results = nvtext::generate_ngrams(strings_view, 2, separator); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); } { cudf::test::strings_column_wrapper expected{ "the_fox_jumped", "fox_jumped_over", "jumped_over_thé", "over_thé_dog"}; - auto const results = nvtext::generate_ngrams(strings_view, 3); + auto const results = nvtext::generate_ngrams(strings_view, 3, separator); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); } { @@ -83,10 +84,11 @@ TEST_F(TextGenerateNgramsTest, NgramsWithNulls) h_strings.begin(), h_strings.end(), thrust::make_transform_iterator(h_strings.begin(), [](auto str) { return str != nullptr; })); + auto const separator = cudf::string_scalar("_"); cudf::strings_column_view strings_view(strings); { - auto const results = nvtext::generate_ngrams(strings_view, 3); + auto const results = nvtext::generate_ngrams(strings_view, 3, separator); cudf::test::strings_column_wrapper expected{ "the_fox_jumped", "fox_jumped_over", "jumped_over_the", "over_the_dog"}; CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); @@ -103,7 +105,10 @@ TEST_F(TextGenerateNgramsTest, Empty) { auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); - auto results = nvtext::generate_ngrams(cudf::strings_column_view(zero_size_strings_column)); + auto const separator = cudf::string_scalar("_"); + + auto results = + nvtext::generate_ngrams(cudf::strings_column_view(zero_size_strings_column), 2, separator); cudf::test::expect_column_empty(results->view()); results = nvtext::generate_character_ngrams(cudf::strings_column_view(zero_size_strings_column)); cudf::test::expect_column_empty(results->view()); @@ -112,21 +117,20 @@ TEST_F(TextGenerateNgramsTest, Empty) TEST_F(TextGenerateNgramsTest, Errors) { cudf::test::strings_column_wrapper strings{""}; + auto const separator = cudf::string_scalar("_"); // invalid parameter value - EXPECT_THROW(nvtext::generate_ngrams(cudf::strings_column_view(strings), 1), cudf::logic_error); + EXPECT_THROW(nvtext::generate_ngrams(cudf::strings_column_view(strings), 1, separator), + cudf::logic_error); EXPECT_THROW(nvtext::generate_character_ngrams(cudf::strings_column_view(strings), 1), cudf::logic_error); // not enough strings to generate ngrams - EXPECT_THROW(nvtext::generate_ngrams(cudf::strings_column_view(strings), 3), cudf::logic_error); + EXPECT_THROW(nvtext::generate_ngrams(cudf::strings_column_view(strings), 3, separator), + cudf::logic_error); EXPECT_THROW(nvtext::generate_character_ngrams(cudf::strings_column_view(strings), 3), cudf::logic_error); - std::vector h_strings{"", nullptr, "", nullptr}; - cudf::test::strings_column_wrapper strings_no_tokens( - h_strings.begin(), - h_strings.end(), - thrust::make_transform_iterator(h_strings.begin(), [](auto str) { return str != nullptr; })); - EXPECT_THROW(nvtext::generate_ngrams(cudf::strings_column_view(strings_no_tokens)), + cudf::test::strings_column_wrapper strings_no_tokens({"", "", "", ""}, {1, 0, 1, 0}); + EXPECT_THROW(nvtext::generate_ngrams(cudf::strings_column_view(strings_no_tokens), 2, separator), cudf::logic_error); EXPECT_THROW(nvtext::generate_character_ngrams(cudf::strings_column_view(strings_no_tokens)), cudf::logic_error); diff --git a/cpp/tests/text/ngrams_tokenize_tests.cpp b/cpp/tests/text/ngrams_tokenize_tests.cpp index 5879bec3e64..c6fb886f7e5 100644 --- a/cpp/tests/text/ngrams_tokenize_tests.cpp +++ b/cpp/tests/text/ngrams_tokenize_tests.cpp @@ -62,7 +62,7 @@ TEST_F(TextNgramsTokenizeTest, Tokenize) "mousé_ate", "ate_the", "the_cheese"}; - auto results = nvtext::ngrams_tokenize(strings_view); + auto results = nvtext::ngrams_tokenize(strings_view, 2, std::string(), std::string("_")); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); } { @@ -101,9 +101,10 @@ TEST_F(TextNgramsTokenizeTest, TokenizeOneGram) { cudf::test::strings_column_wrapper strings{"aaa bbb", " ccc ddd ", "eee"}; cudf::strings_column_view strings_view(strings); + auto const empty = cudf::string_scalar(""); cudf::test::strings_column_wrapper expected{"aaa", "bbb", "ccc", "ddd", "eee"}; - auto results = nvtext::ngrams_tokenize(strings_view, 1); + auto results = nvtext::ngrams_tokenize(strings_view, 1, empty, empty); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); } @@ -111,7 +112,8 @@ TEST_F(TextNgramsTokenizeTest, TokenizeEmptyTest) { auto strings = cudf::make_empty_column(cudf::data_type{cudf::type_id::STRING}); cudf::strings_column_view strings_view(strings->view()); - auto results = nvtext::ngrams_tokenize(strings_view); + auto const empty = cudf::string_scalar(""); + auto results = nvtext::ngrams_tokenize(strings_view, 2, empty, empty); EXPECT_EQ(results->size(), 0); EXPECT_EQ(results->has_nulls(), false); } @@ -120,5 +122,6 @@ TEST_F(TextNgramsTokenizeTest, TokenizeErrorTest) { cudf::test::strings_column_wrapper strings{"this column intentionally left blank"}; cudf::strings_column_view strings_view(strings); - EXPECT_THROW(nvtext::ngrams_tokenize(strings_view, 0), cudf::logic_error); + auto const empty = cudf::string_scalar(""); + EXPECT_THROW(nvtext::ngrams_tokenize(strings_view, 0, empty, empty), cudf::logic_error); } From a6d014e632ecad86cef486402dbe53acee191a1d Mon Sep 17 00:00:00 2001 From: Lawrence Mitchell Date: Fri, 22 Sep 2023 16:24:33 +0100 Subject: [PATCH 095/150] Support callables in DataFrame.assign (#14142) While here, change the way the initial copied frame is constructed: callables are allowed to refer to columns already in the dataframe, even if they overwrite them. - Closes #12936 Authors: - Lawrence Mitchell (https://github.com/wence-) Approvers: - Matthew Roeschke (https://github.com/mroeschke) URL: https://github.com/rapidsai/cudf/pull/14142 --- python/cudf/cudf/core/dataframe.py | 23 ++++++++++++++--------- python/cudf/cudf/tests/test_dataframe.py | 19 +++++++++++++++++++ 2 files changed, 33 insertions(+), 9 deletions(-) diff --git a/python/cudf/cudf/core/dataframe.py b/python/cudf/cudf/core/dataframe.py index 1a780cc9e9f..8a3dbe77787 100644 --- a/python/cudf/cudf/core/dataframe.py +++ b/python/cudf/cudf/core/dataframe.py @@ -1390,10 +1390,21 @@ def _get_numeric_data(self): return self[columns] @_cudf_nvtx_annotate - def assign(self, **kwargs): + def assign(self, **kwargs: Union[Callable[[Self], Any], Any]): """ Assign columns to DataFrame from keyword arguments. + Parameters + ---------- + **kwargs: dict mapping string column names to values + The value for each key can either be a literal column (or + something that can be converted to a column), or + a callable of one argument that will be given the + dataframe as an argument and should return the new column + (without modifying the input argument). + Columns are added in-order, so callables can refer to + column names constructed in the assignment. + Examples -------- >>> import cudf @@ -1405,15 +1416,9 @@ def assign(self, **kwargs): 1 1 4 2 2 5 """ - new_df = cudf.DataFrame(index=self.index.copy()) - for name, col in self._data.items(): - if name in kwargs: - new_df[name] = kwargs.pop(name) - else: - new_df._data[name] = col.copy() - + new_df = self.copy(deep=False) for k, v in kwargs.items(): - new_df[k] = v + new_df[k] = v(new_df) if callable(v) else v return new_df @classmethod diff --git a/python/cudf/cudf/tests/test_dataframe.py b/python/cudf/cudf/tests/test_dataframe.py index 6180162ecdd..2f531afdeb7 100644 --- a/python/cudf/cudf/tests/test_dataframe.py +++ b/python/cudf/cudf/tests/test_dataframe.py @@ -1327,6 +1327,25 @@ def test_assign(): np.testing.assert_equal(gdf2.y.to_numpy(), [2, 3, 4]) +@pytest.mark.parametrize( + "mapping", + [ + {"y": 1, "z": lambda df: df["x"] + df["y"]}, + { + "x": lambda df: df["x"] * 2, + "y": lambda df: 2, + "z": lambda df: df["x"] / df["y"], + }, + ], +) +def test_assign_callable(mapping): + df = pd.DataFrame({"x": [1, 2, 3]}) + cdf = cudf.from_pandas(df) + expect = df.assign(**mapping) + actual = cdf.assign(**mapping) + assert_eq(expect, actual) + + @pytest.mark.parametrize("nrows", [1, 8, 100, 1000]) @pytest.mark.parametrize("method", ["murmur3", "md5"]) @pytest.mark.parametrize("seed", [None, 42]) From 40bdd8ae4d89d2ea1f466c579d56f2c9ca1b014d Mon Sep 17 00:00:00 2001 From: Peter Andreas Entschev Date: Fri, 22 Sep 2023 19:20:18 +0200 Subject: [PATCH 096/150] Pin to `aws-sdk-cpp<1.11` (#14173) Pin conda packages to `aws-sdk-cpp<1.11`. The recent upgrade in version `1.11.*` has caused several issues with cleaning up (more details on changes can be read in [this link](https://github.com/aws/aws-sdk-cpp#version-111-is-now-available)), leading to Distributed and Dask-CUDA processes to segfault. The stack for one of those crashes looks like the following: ``` (gdb) bt #0 0x00007f5125359a0c in Aws::Utils::Logging::s_aws_logger_redirect_get_log_level(aws_logger*, unsigned int) () from /opt/conda/envs/dask/lib/python3.9/site-packages/pyarrow/../../.././libaws-cpp-sdk-core.so #1 0x00007f5124968f83 in aws_event_loop_thread () from /opt/conda/envs/dask/lib/python3.9/site-packages/pyarrow/../../../././libaws-c-io.so.1.0.0 #2 0x00007f5124ad9359 in thread_fn () from /opt/conda/envs/dask/lib/python3.9/site-packages/pyarrow/../../../././libaws-c-common.so.1 #3 0x00007f519958f6db in start_thread () from /lib/x86_64-linux-gnu/libpthread.so.0 #4 0x00007f5198b1361f in clone () from /lib/x86_64-linux-gnu/libc.so.6 ``` Such segfaults now manifest frequently in CI, and in some cases are reproducible with a hit rate of ~30%. Given the approaching release time, it's probably the safest option to just pin to an older version of the package while we don't pinpoint the exact cause for the issue and a patched build is released upstream. The `aws-sdk-cpp` is statically-linked in the `pyarrow` pip package, which prevents us from using the same pinning technique. cuDF is currently pinned to `pyarrow=12.0.1` which seems to be built against `aws-sdk-cpp=1.10.*`, as per [recent build logs](https://github.com/apache/arrow/actions/runs/6276453828/job/17046177335?pr=37792#step:6:1372). Authors: - Peter Andreas Entschev (https://github.com/pentschev) Approvers: - GALI PREM SAGAR (https://github.com/galipremsagar) - Ray Douglass (https://github.com/raydouglass) URL: https://github.com/rapidsai/cudf/pull/14173 --- conda/environments/all_cuda-118_arch-x86_64.yaml | 1 + conda/environments/all_cuda-120_arch-x86_64.yaml | 1 + conda/recipes/libcudf/conda_build_config.yaml | 3 +++ conda/recipes/libcudf/meta.yaml | 2 ++ dependencies.yaml | 1 + 5 files changed, 8 insertions(+) diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index d4abc28cf13..9fb991f9075 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -9,6 +9,7 @@ channels: - nvidia dependencies: - aiobotocore>=2.2.0 +- aws-sdk-cpp<1.11 - benchmark==1.8.0 - boto3>=1.21.21 - botocore>=1.24.21 diff --git a/conda/environments/all_cuda-120_arch-x86_64.yaml b/conda/environments/all_cuda-120_arch-x86_64.yaml index 9a98e400e6d..9ba0dd8dc38 100644 --- a/conda/environments/all_cuda-120_arch-x86_64.yaml +++ b/conda/environments/all_cuda-120_arch-x86_64.yaml @@ -9,6 +9,7 @@ channels: - nvidia dependencies: - aiobotocore>=2.2.0 +- aws-sdk-cpp<1.11 - benchmark==1.8.0 - boto3>=1.21.21 - botocore>=1.24.21 diff --git a/conda/recipes/libcudf/conda_build_config.yaml b/conda/recipes/libcudf/conda_build_config.yaml index 25b3f19de77..b1f5b083e06 100644 --- a/conda/recipes/libcudf/conda_build_config.yaml +++ b/conda/recipes/libcudf/conda_build_config.yaml @@ -22,6 +22,9 @@ gbench_version: gtest_version: - ">=1.13.0" +aws_sdk_cpp_version: + - "<1.11" + libarrow_version: - "=12" diff --git a/conda/recipes/libcudf/meta.yaml b/conda/recipes/libcudf/meta.yaml index 627065817ba..28357f0d96d 100644 --- a/conda/recipes/libcudf/meta.yaml +++ b/conda/recipes/libcudf/meta.yaml @@ -74,6 +74,7 @@ requirements: - gtest {{ gtest_version }} - gmock {{ gtest_version }} - zlib {{ zlib_version }} + - aws-sdk-cpp {{ aws_sdk_cpp_version }} outputs: - name: libcudf @@ -107,6 +108,7 @@ outputs: - dlpack {{ dlpack_version }} - gtest {{ gtest_version }} - gmock {{ gtest_version }} + - aws-sdk-cpp {{ aws_sdk_cpp_version }} test: commands: - test -f $PREFIX/lib/libcudf.so diff --git a/dependencies.yaml b/dependencies.yaml index 376e43094a7..5586f54348c 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -218,6 +218,7 @@ dependencies: - libkvikio==23.10.* - output_types: conda packages: + - aws-sdk-cpp<1.11 - fmt>=9.1.0,<10 - &gbench benchmark==1.8.0 - >est gtest>=1.13.0 From c7dd6b48684028a65b1d19d5d5b04060f6a4fe19 Mon Sep 17 00:00:00 2001 From: David Wendt <45795991+davidwendt@users.noreply.github.com> Date: Fri, 22 Sep 2023 14:15:31 -0400 Subject: [PATCH 097/150] Refactor libcudf indexalator to typed normalator (#14043) Creates generic normalizing-iterator for integer types for use by the `indexalator` and the future offsets normalizing iterator. Mostly code has been moved around or renamed so the normalizing-iterator part can take type template parameter to identify which integer type to normalize to. For the `indexalator`, this type is `cudf::size_type` and for the offsets iterator this type would be `int64`. Authors: - David Wendt (https://github.com/davidwendt) Approvers: - Bradley Dice (https://github.com/bdice) - MithunR (https://github.com/mythrocks) URL: https://github.com/rapidsai/cudf/pull/14043 --- cpp/include/cudf/detail/indexalator.cuh | 332 +--------------- .../cudf/detail/normalizing_iterator.cuh | 367 ++++++++++++++++++ 2 files changed, 374 insertions(+), 325 deletions(-) create mode 100644 cpp/include/cudf/detail/normalizing_iterator.cuh diff --git a/cpp/include/cudf/detail/indexalator.cuh b/cpp/include/cudf/detail/indexalator.cuh index 4731c4919e3..6532dae3695 100644 --- a/cpp/include/cudf/detail/indexalator.cuh +++ b/cpp/include/cudf/detail/indexalator.cuh @@ -16,14 +16,13 @@ #pragma once +#include + #include #include #include #include -#include -#include -#include #include #include #include @@ -32,193 +31,6 @@ namespace cudf { namespace detail { -/** - * @brief The base class for the input or output index normalizing iterator. - * - * This implementation uses CRTP to define the `input_indexalator` and the - * `output_indexalator` classes. This is so this class can manipulate the - * uniquely typed subclass member variable `p_` directly without requiring - * virtual functions since iterator instances will be copied to device memory. - * - * The base class mainly manages updating the `p_` member variable while the - * subclasses handle accessing individual elements in device memory. - * - * @tparam T The derived class type for the iterator. - */ -template -struct base_indexalator { - using difference_type = ptrdiff_t; - using value_type = size_type; - using pointer = size_type*; - using iterator_category = std::random_access_iterator_tag; - - base_indexalator() = default; - base_indexalator(base_indexalator const&) = default; - base_indexalator(base_indexalator&&) = default; - base_indexalator& operator=(base_indexalator const&) = default; - base_indexalator& operator=(base_indexalator&&) = default; - - /** - * @brief Prefix increment operator. - */ - CUDF_HOST_DEVICE inline T& operator++() - { - T& derived = static_cast(*this); - derived.p_ += width_; - return derived; - } - - /** - * @brief Postfix increment operator. - */ - CUDF_HOST_DEVICE inline T operator++(int) - { - T tmp{static_cast(*this)}; - operator++(); - return tmp; - } - - /** - * @brief Prefix decrement operator. - */ - CUDF_HOST_DEVICE inline T& operator--() - { - T& derived = static_cast(*this); - derived.p_ -= width_; - return derived; - } - - /** - * @brief Postfix decrement operator. - */ - CUDF_HOST_DEVICE inline T operator--(int) - { - T tmp{static_cast(*this)}; - operator--(); - return tmp; - } - - /** - * @brief Compound assignment by sum operator. - */ - CUDF_HOST_DEVICE inline T& operator+=(difference_type offset) - { - T& derived = static_cast(*this); - derived.p_ += offset * width_; - return derived; - } - - /** - * @brief Increment by offset operator. - */ - CUDF_HOST_DEVICE inline T operator+(difference_type offset) const - { - auto tmp = T{static_cast(*this)}; - tmp.p_ += (offset * width_); - return tmp; - } - - /** - * @brief Addition assignment operator. - */ - CUDF_HOST_DEVICE inline friend T operator+(difference_type offset, T const& rhs) - { - T tmp{rhs}; - tmp.p_ += (offset * rhs.width_); - return tmp; - } - - /** - * @brief Compound assignment by difference operator. - */ - CUDF_HOST_DEVICE inline T& operator-=(difference_type offset) - { - T& derived = static_cast(*this); - derived.p_ -= offset * width_; - return derived; - } - - /** - * @brief Decrement by offset operator. - */ - CUDF_HOST_DEVICE inline T operator-(difference_type offset) const - { - auto tmp = T{static_cast(*this)}; - tmp.p_ -= (offset * width_); - return tmp; - } - - /** - * @brief Subtraction assignment operator. - */ - CUDF_HOST_DEVICE inline friend T operator-(difference_type offset, T const& rhs) - { - T tmp{rhs}; - tmp.p_ -= (offset * rhs.width_); - return tmp; - } - - /** - * @brief Compute offset from iterator difference operator. - */ - CUDF_HOST_DEVICE inline difference_type operator-(T const& rhs) const - { - return (static_cast(*this).p_ - rhs.p_) / width_; - } - - /** - * @brief Equals to operator. - */ - CUDF_HOST_DEVICE inline bool operator==(T const& rhs) const - { - return rhs.p_ == static_cast(*this).p_; - } - /** - * @brief Not equals to operator. - */ - CUDF_HOST_DEVICE inline bool operator!=(T const& rhs) const - { - return rhs.p_ != static_cast(*this).p_; - } - /** - * @brief Less than operator. - */ - CUDF_HOST_DEVICE inline bool operator<(T const& rhs) const - { - return static_cast(*this).p_ < rhs.p_; - } - /** - * @brief Greater than operator. - */ - CUDF_HOST_DEVICE inline bool operator>(T const& rhs) const - { - return static_cast(*this).p_ > rhs.p_; - } - /** - * @brief Less than or equals to operator. - */ - CUDF_HOST_DEVICE inline bool operator<=(T const& rhs) const - { - return static_cast(*this).p_ <= rhs.p_; - } - /** - * @brief Greater than or equals to operator. - */ - CUDF_HOST_DEVICE inline bool operator>=(T const& rhs) const - { - return static_cast(*this).p_ >= rhs.p_; - } - - protected: - /** - * @brief Constructor assigns width and type member variables for base class. - */ - base_indexalator(int32_t width, data_type dtype) : width_(width), dtype_(dtype) {} - - int width_; /// integer type width = 1,2,4, or 8 - data_type dtype_; /// for type-dispatcher calls -}; - /** * @brief The index normalizing input iterator. * @@ -244,65 +56,7 @@ struct base_indexalator { * auto result = thrust::find(thrust::device, begin, end, size_type{12} ); * @endcode */ -struct input_indexalator : base_indexalator { - friend struct indexalator_factory; - friend struct base_indexalator; // for CRTP - - using reference = size_type const; // this keeps STL and thrust happy - - input_indexalator() = default; - input_indexalator(input_indexalator const&) = default; - input_indexalator(input_indexalator&&) = default; - input_indexalator& operator=(input_indexalator const&) = default; - input_indexalator& operator=(input_indexalator&&) = default; - - /** - * @brief Indirection operator returns the value at the current iterator position. - */ - __device__ inline size_type operator*() const { return operator[](0); } - - /** - * @brief Dispatch functor for resolving a size_type value from any index type. - */ - struct index_as_size_type { - template ()>* = nullptr> - __device__ size_type operator()(void const* tp) - { - return static_cast(*static_cast(tp)); - } - template ()>* = nullptr> - __device__ size_type operator()(void const* tp) - { - CUDF_UNREACHABLE("only index types are supported"); - } - }; - /** - * @brief Array subscript operator returns a value at the input - * `idx` position as a `size_type` value. - */ - __device__ inline size_type operator[](size_type idx) const - { - void const* tp = p_ + (idx * width_); - return type_dispatcher(dtype_, index_as_size_type{}, tp); - } - - protected: - /** - * @brief Create an input index normalizing iterator. - * - * Use the indexalator_factory to create an iterator instance. - * - * @param data Pointer to an integer array in device memory. - * @param width The width of the integer type (1, 2, 4, or 8) - * @param data_type Index integer type of width `width` - */ - input_indexalator(void const* data, int width, data_type dtype) - : base_indexalator(width, dtype), p_{static_cast(data)} - { - } - - char const* p_; /// pointer to the integer data in device memory -}; +using input_indexalator = input_normalator; /** * @brief The index normalizing output iterator. @@ -328,79 +82,7 @@ struct input_indexalator : base_indexalator { * thrust::less()); * @endcode */ -struct output_indexalator : base_indexalator { - friend struct indexalator_factory; - friend struct base_indexalator; // for CRTP - - using reference = output_indexalator const&; // required for output iterators - - output_indexalator() = default; - output_indexalator(output_indexalator const&) = default; - output_indexalator(output_indexalator&&) = default; - output_indexalator& operator=(output_indexalator const&) = default; - output_indexalator& operator=(output_indexalator&&) = default; - - /** - * @brief Indirection operator returns this iterator instance in order - * to capture the `operator=(size_type)` calls. - */ - __device__ inline output_indexalator const& operator*() const { return *this; } - - /** - * @brief Array subscript operator returns an iterator instance at the specified `idx` position. - * - * This allows capturing the subsequent `operator=(size_type)` call in this class. - */ - __device__ inline output_indexalator const operator[](size_type idx) const - { - output_indexalator tmp{*this}; - tmp.p_ += (idx * width_); - return tmp; - } - - /** - * @brief Dispatch functor for setting the index value from a size_type value. - */ - struct size_type_to_index { - template ()>* = nullptr> - __device__ void operator()(void* tp, size_type const value) - { - (*static_cast(tp)) = static_cast(value); - } - template ()>* = nullptr> - __device__ void operator()(void* tp, size_type const value) - { - CUDF_UNREACHABLE("only index types are supported"); - } - }; - - /** - * @brief Assign a size_type value to the current iterator position. - */ - __device__ inline output_indexalator const& operator=(size_type const value) const - { - void* tp = p_; - type_dispatcher(dtype_, size_type_to_index{}, tp, value); - return *this; - } - - protected: - /** - * @brief Create an output index normalizing iterator. - * - * Use the indexalator_factory to create an iterator instance. - * - * @param data Pointer to an integer array in device memory. - * @param width The width of the integer type (1, 2, 4, or 8) - * @param data_type Index integer type of width `width` - */ - output_indexalator(void* data, int width, data_type dtype) - : base_indexalator(width, dtype), p_{static_cast(data)} - { - } - - char* p_; /// pointer to the integer data in device memory -}; +using output_indexalator = output_normalator; /** * @brief Use this class to create an indexalator instance. @@ -413,7 +95,7 @@ struct indexalator_factory { template ()>* = nullptr> input_indexalator operator()(column_view const& indices) { - return input_indexalator(indices.data(), sizeof(IndexType), indices.type()); + return input_indexalator(indices.data(), indices.type()); } template const&>(index) creates a copy auto const scalar_impl = static_cast const*>(&index); - return input_indexalator(scalar_impl->data(), sizeof(IndexType), index.type()); + return input_indexalator(scalar_impl->data(), index.type()); } template ()>* = nullptr> output_indexalator operator()(mutable_column_view const& indices) { - return output_indexalator(indices.data(), sizeof(IndexType), indices.type()); + return output_indexalator(indices.data(), indices.type()); } template + +#include + +namespace cudf { +namespace detail { + +/** + * @brief The base class for the input or output normalizing iterator + * + * The base class mainly manages updating the `p_` member variable while the + * subclasses handle accessing individual elements in device memory. + * + * @tparam Derived The derived class type for the iterator + * @tparam Integer The type the iterator normalizes to + */ +template +struct base_normalator { + static_assert(std::is_integral_v); + using difference_type = std::ptrdiff_t; + using value_type = Integer; + using pointer = Integer*; + using iterator_category = std::random_access_iterator_tag; + + base_normalator() = default; + base_normalator(base_normalator const&) = default; + base_normalator(base_normalator&&) = default; + base_normalator& operator=(base_normalator const&) = default; + base_normalator& operator=(base_normalator&&) = default; + + /** + * @brief Prefix increment operator. + */ + CUDF_HOST_DEVICE inline Derived& operator++() + { + Derived& derived = static_cast(*this); + derived.p_ += width_; + return derived; + } + + /** + * @brief Postfix increment operator. + */ + CUDF_HOST_DEVICE inline Derived operator++(int) + { + Derived tmp{static_cast(*this)}; + operator++(); + return tmp; + } + + /** + * @brief Prefix decrement operator. + */ + CUDF_HOST_DEVICE inline Derived& operator--() + { + Derived& derived = static_cast(*this); + derived.p_ -= width_; + return derived; + } + + /** + * @brief Postfix decrement operator. + */ + CUDF_HOST_DEVICE inline Derived operator--(int) + { + Derived tmp{static_cast(*this)}; + operator--(); + return tmp; + } + + /** + * @brief Compound assignment by sum operator. + */ + CUDF_HOST_DEVICE inline Derived& operator+=(difference_type offset) + { + Derived& derived = static_cast(*this); + derived.p_ += offset * width_; + return derived; + } + + /** + * @brief Increment by offset operator. + */ + CUDF_HOST_DEVICE inline Derived operator+(difference_type offset) const + { + auto tmp = Derived{static_cast(*this)}; + tmp.p_ += (offset * width_); + return tmp; + } + + /** + * @brief Addition assignment operator. + */ + CUDF_HOST_DEVICE inline friend Derived operator+(difference_type offset, Derived const& rhs) + { + Derived tmp{rhs}; + tmp.p_ += (offset * rhs.width_); + return tmp; + } + + /** + * @brief Compound assignment by difference operator. + */ + CUDF_HOST_DEVICE inline Derived& operator-=(difference_type offset) + { + Derived& derived = static_cast(*this); + derived.p_ -= offset * width_; + return derived; + } + + /** + * @brief Decrement by offset operator. + */ + CUDF_HOST_DEVICE inline Derived operator-(difference_type offset) const + { + auto tmp = Derived{static_cast(*this)}; + tmp.p_ -= (offset * width_); + return tmp; + } + + /** + * @brief Subtraction assignment operator. + */ + CUDF_HOST_DEVICE inline friend Derived operator-(difference_type offset, Derived const& rhs) + { + Derived tmp{rhs}; + tmp.p_ -= (offset * rhs.width_); + return tmp; + } + + /** + * @brief Compute offset from iterator difference operator. + */ + CUDF_HOST_DEVICE inline difference_type operator-(Derived const& rhs) const + { + return (static_cast(*this).p_ - rhs.p_) / width_; + } + + /** + * @brief Equals to operator. + */ + CUDF_HOST_DEVICE inline bool operator==(Derived const& rhs) const + { + return rhs.p_ == static_cast(*this).p_; + } + + /** + * @brief Not equals to operator. + */ + CUDF_HOST_DEVICE inline bool operator!=(Derived const& rhs) const + { + return rhs.p_ != static_cast(*this).p_; + } + + /** + * @brief Less than operator. + */ + CUDF_HOST_DEVICE inline bool operator<(Derived const& rhs) const + { + return static_cast(*this).p_ < rhs.p_; + } + + /** + * @brief Greater than operator. + */ + CUDF_HOST_DEVICE inline bool operator>(Derived const& rhs) const + { + return static_cast(*this).p_ > rhs.p_; + } + + /** + * @brief Less than or equals to operator. + */ + CUDF_HOST_DEVICE inline bool operator<=(Derived const& rhs) const + { + return static_cast(*this).p_ <= rhs.p_; + } + + /** + * @brief Greater than or equals to operator. + */ + CUDF_HOST_DEVICE inline bool operator>=(Derived const& rhs) const + { + return static_cast(*this).p_ >= rhs.p_; + } + + protected: + /** + * @brief Constructor assigns width and type member variables for base class. + */ + explicit base_normalator(data_type dtype) : width_(size_of(dtype)), dtype_(dtype) {} + + int width_; /// integer type width = 1,2,4, or 8 + data_type dtype_; /// for type-dispatcher calls +}; + +/** + * @brief The integer normalizing input iterator + * + * This is an iterator that can be used for index types (integers) without + * requiring a type-specific instance. It can be used for any iterator + * interface for reading an array of integer values of type + * int8, int16, int32, int64, uint8, uint16, uint32, or uint64. + * Reading specific elements always return a type of `Integer` + * + * @tparam Integer Type returned by all read functions + */ +template +struct input_normalator : base_normalator, Integer> { + friend struct base_normalator, Integer>; // for CRTP + + using reference = Integer const; // this keeps STL and thrust happy + + input_normalator() = default; + input_normalator(input_normalator const&) = default; + input_normalator(input_normalator&&) = default; + input_normalator& operator=(input_normalator const&) = default; + input_normalator& operator=(input_normalator&&) = default; + + /** + * @brief Indirection operator returns the value at the current iterator position + */ + __device__ inline Integer operator*() const { return operator[](0); } + + /** + * @brief Dispatch functor for resolving a Integer value from any integer type + */ + struct normalize_type { + template >* = nullptr> + __device__ Integer operator()(void const* tp) + { + return static_cast(*static_cast(tp)); + } + template >* = nullptr> + __device__ Integer operator()(void const*) + { + CUDF_UNREACHABLE("only integral types are supported"); + } + }; + + /** + * @brief Array subscript operator returns a value at the input + * `idx` position as a `Integer` value. + */ + __device__ inline Integer operator[](size_type idx) const + { + void const* tp = p_ + (idx * this->width_); + return type_dispatcher(this->dtype_, normalize_type{}, tp); + } + + /** + * @brief Create an input index normalizing iterator. + * + * Use the indexalator_factory to create an iterator instance. + * + * @param data Pointer to an integer array in device memory. + * @param data_type Type of data in data + */ + input_normalator(void const* data, data_type dtype) + : base_normalator, Integer>(dtype), p_{static_cast(data)} + { + } + + char const* p_; /// pointer to the integer data in device memory +}; + +/** + * @brief The integer normalizing output iterator + * + * This is an iterator that can be used for index types (integers) without + * requiring a type-specific instance. It can be used for any iterator + * interface for writing an array of integer values of type + * int8, int16, int32, int64, uint8, uint16, uint32, or uint64. + * Setting specific elements always accept the `Integer` type values. + * + * @tparam Integer The type used for all write functions + */ +template +struct output_normalator : base_normalator, Integer> { + friend struct base_normalator, Integer>; // for CRTP + + using reference = output_normalator const&; // required for output iterators + + output_normalator() = default; + output_normalator(output_normalator const&) = default; + output_normalator(output_normalator&&) = default; + output_normalator& operator=(output_normalator const&) = default; + output_normalator& operator=(output_normalator&&) = default; + + /** + * @brief Indirection operator returns this iterator instance in order + * to capture the `operator=(Integer)` calls. + */ + __device__ inline output_normalator const& operator*() const { return *this; } + + /** + * @brief Array subscript operator returns an iterator instance at the specified `idx` position. + * + * This allows capturing the subsequent `operator=(Integer)` call in this class. + */ + __device__ inline output_normalator const operator[](size_type idx) const + { + output_normalator tmp{*this}; + tmp.p_ += (idx * this->width_); + return tmp; + } + + /** + * @brief Dispatch functor for setting the index value from a size_type value. + */ + struct normalize_type { + template >* = nullptr> + __device__ void operator()(void* tp, Integer const value) + { + (*static_cast(tp)) = static_cast(value); + } + template >* = nullptr> + __device__ void operator()(void*, Integer const) + { + CUDF_UNREACHABLE("only index types are supported"); + } + }; + + /** + * @brief Assign an Integer value to the current iterator position + */ + __device__ inline output_normalator const& operator=(Integer const value) const + { + void* tp = p_; + type_dispatcher(this->dtype_, normalize_type{}, tp, value); + return *this; + } + + /** + * @brief Create an output normalizing iterator + * + * @param data Pointer to an integer array in device memory. + * @param data_type Type of data in data + */ + output_normalator(void* data, data_type dtype) + : base_normalator, Integer>(dtype), p_{static_cast(data)} + { + } + + char* p_; /// pointer to the integer data in device memory +}; + +} // namespace detail +} // namespace cudf From 517d1239c913c86f7c1d9dc6642434e73aa2b14c Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Fri, 22 Sep 2023 12:40:09 -0700 Subject: [PATCH 098/150] Expose streams in all public sorting APIs (#14146) Contributes to #925 Authors: - Vyas Ramasubramani (https://github.com/vyasr) Approvers: - Bradley Dice (https://github.com/bdice) - David Wendt (https://github.com/davidwendt) URL: https://github.com/rapidsai/cudf/pull/14146 --- cpp/include/cudf/sorting.hpp | 44 ++++++--- cpp/src/lists/segmented_sort.cu | 30 +++--- cpp/src/sort/is_sorted.cu | 5 +- cpp/src/sort/rank.cu | 11 +-- cpp/src/sort/segmented_sort.cu | 8 +- cpp/src/sort/segmented_sort_impl.cuh | 2 +- cpp/src/sort/sort.cu | 10 +- cpp/src/sort/stable_segmented_sort.cu | 8 +- cpp/src/sort/stable_sort.cu | 8 +- cpp/tests/CMakeLists.txt | 7 +- cpp/tests/streams/sorting_test.cpp | 132 ++++++++++++++++++++++++++ 11 files changed, 210 insertions(+), 55 deletions(-) create mode 100644 cpp/tests/streams/sorting_test.cpp diff --git a/cpp/include/cudf/sorting.hpp b/cpp/include/cudf/sorting.hpp index 6924e77ae9b..e4e803b2d3c 100644 --- a/cpp/include/cudf/sorting.hpp +++ b/cpp/include/cudf/sorting.hpp @@ -18,6 +18,7 @@ #include #include +#include #include @@ -43,6 +44,7 @@ namespace cudf { * @param null_precedence The desired order of null compared to other elements * for each column. Size must be equal to `input.num_columns()` or empty. * If empty, all columns will be sorted in `null_order::BEFORE`. + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return A non-nullable column of elements containing the permuted row indices of * `input` if it were sorted @@ -51,6 +53,7 @@ std::unique_ptr sorted_order( table_view const& input, std::vector const& column_order = {}, std::vector const& null_precedence = {}, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -65,27 +68,30 @@ std::unique_ptr stable_sorted_order( table_view const& input, std::vector const& column_order = {}, std::vector const& null_precedence = {}, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Checks whether the rows of a `table` are sorted in a lexicographical * order. * - * @param[in] table Table whose rows need to be compared for ordering - * @param[in] column_order The expected sort order for each column. Size - * must be equal to `in.num_columns()` or empty. If - * empty, it is expected all columns are in - * ascending order. - * @param[in] null_precedence The desired order of null compared to other - * elements for each column. Size must be equal to - * `input.num_columns()` or empty. If empty, - * `null_order::BEFORE` is assumed for all columns. - * - * @returns bool true if sorted as expected, false if not + * @param table Table whose rows need to be compared for ordering + * @param column_order The expected sort order for each column. Size + * must be equal to `in.num_columns()` or empty. If + * empty, it is expected all columns are in + * ascending order. + * @param null_precedence The desired order of null compared to other + * elements for each column. Size must be equal to + * `input.num_columns()` or empty. If empty, + * `null_order::BEFORE` is assumed for all columns. + * + * @param stream CUDA stream used for device memory operations and kernel launches + * @returns true if sorted as expected, false if not */ bool is_sorted(cudf::table_view const& table, std::vector const& column_order, - std::vector const& null_precedence); + std::vector const& null_precedence, + rmm::cuda_stream_view stream = cudf::get_default_stream()); /** * @brief Performs a lexicographic sort of the rows of a table @@ -98,6 +104,7 @@ bool is_sorted(cudf::table_view const& table, * elements for each column in `input`. Size must be equal to * `input.num_columns()` or empty. If empty, all columns will be sorted with * `null_order::BEFORE`. + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned table's device memory * @return New table containing the desired sorted order of `input` */ @@ -105,6 +112,7 @@ std::unique_ptr

sort( table_view const& input, std::vector const& column_order = {}, std::vector const& null_precedence = {}, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -124,6 +132,7 @@ std::unique_ptr
sort( * elements for each column in `keys`. Size must be equal to * `keys.num_columns()` or empty. If empty, all columns will be sorted with * `null_order::BEFORE`. + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned table's device memory * @return The reordering of `values` determined by the lexicographic order of * the rows of `keys`. @@ -133,6 +142,7 @@ std::unique_ptr
sort_by_key( table_view const& keys, std::vector const& column_order = {}, std::vector const& null_precedence = {}, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -154,6 +164,7 @@ std::unique_ptr
sort_by_key( * elements for each column in `keys`. Size must be equal to * `keys.num_columns()` or empty. If empty, all columns will be sorted with * `null_order::BEFORE`. + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned table's device memory * @return The reordering of `values` determined by the lexicographic order of * the rows of `keys`. @@ -163,6 +174,7 @@ std::unique_ptr
stable_sort_by_key( table_view const& keys, std::vector const& column_order = {}, std::vector const& null_precedence = {}, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -189,6 +201,7 @@ std::unique_ptr
stable_sort_by_key( * @param null_precedence The desired order of null compared to other elements * for column * @param percentage flag to convert ranks to percentage in range (0,1] + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return A column of containing the rank of the each element of the column of `input`. The output * column type will be `size_type`column by default or else `double` when @@ -201,6 +214,7 @@ std::unique_ptr rank( null_policy null_handling, null_order null_precedence, bool percentage, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -241,6 +255,7 @@ std::unique_ptr rank( * elements for each column in `keys`. Size must be equal to * `keys.num_columns()` or empty. If empty, all columns will be sorted with * `null_order::BEFORE`. + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource to allocate any returned objects * @return sorted order of the segment sorted table * @@ -250,6 +265,7 @@ std::unique_ptr segmented_sorted_order( column_view const& segment_offsets, std::vector const& column_order = {}, std::vector const& null_precedence = {}, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -262,6 +278,7 @@ std::unique_ptr stable_segmented_sorted_order( column_view const& segment_offsets, std::vector const& column_order = {}, std::vector const& null_precedence = {}, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -306,6 +323,7 @@ std::unique_ptr stable_segmented_sorted_order( * elements for each column in `keys`. Size must be equal to * `keys.num_columns()` or empty. If empty, all columns will be sorted with * `null_order::BEFORE`. + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource to allocate any returned objects * @return table with elements in each segment sorted * @@ -316,6 +334,7 @@ std::unique_ptr
segmented_sort_by_key( column_view const& segment_offsets, std::vector const& column_order = {}, std::vector const& null_precedence = {}, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -329,6 +348,7 @@ std::unique_ptr
stable_segmented_sort_by_key( column_view const& segment_offsets, std::vector const& column_order = {}, std::vector const& null_precedence = {}, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group diff --git a/cpp/src/lists/segmented_sort.cu b/cpp/src/lists/segmented_sort.cu index 260636a61cf..49054ebb046 100644 --- a/cpp/src/lists/segmented_sort.cu +++ b/cpp/src/lists/segmented_sort.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -70,13 +70,13 @@ std::unique_ptr sort_lists(lists_column_view const& input, auto output_offset = build_output_offsets(input, stream, mr); auto const child = input.get_sliced_child(stream); - auto const sorted_child_table = segmented_sort_by_key(table_view{{child}}, - table_view{{child}}, - output_offset->view(), - {column_order}, - {null_precedence}, - stream, - mr); + auto const sorted_child_table = cudf::detail::segmented_sort_by_key(table_view{{child}}, + table_view{{child}}, + output_offset->view(), + {column_order}, + {null_precedence}, + stream, + mr); return make_lists_column(input.size(), std::move(output_offset), @@ -98,13 +98,13 @@ std::unique_ptr stable_sort_lists(lists_column_view const& input, auto output_offset = build_output_offsets(input, stream, mr); auto const child = input.get_sliced_child(stream); - auto const sorted_child_table = stable_segmented_sort_by_key(table_view{{child}}, - table_view{{child}}, - output_offset->view(), - {column_order}, - {null_precedence}, - stream, - mr); + auto const sorted_child_table = cudf::detail::stable_segmented_sort_by_key(table_view{{child}}, + table_view{{child}}, + output_offset->view(), + {column_order}, + {null_precedence}, + stream, + mr); return make_lists_column(input.size(), std::move(output_offset), diff --git a/cpp/src/sort/is_sorted.cu b/cpp/src/sort/is_sorted.cu index 25c594e9e74..39476a2f534 100644 --- a/cpp/src/sort/is_sorted.cu +++ b/cpp/src/sort/is_sorted.cu @@ -73,7 +73,8 @@ bool is_sorted(cudf::table_view const& in, bool is_sorted(cudf::table_view const& in, std::vector const& column_order, - std::vector const& null_precedence) + std::vector const& null_precedence, + rmm::cuda_stream_view stream) { CUDF_FUNC_RANGE(); if (in.num_columns() == 0 || in.num_rows() == 0) { return true; } @@ -89,7 +90,7 @@ bool is_sorted(cudf::table_view const& in, "Number of columns in the table doesn't match the vector null_precedence's size .\n"); } - return detail::is_sorted(in, column_order, null_precedence, cudf::get_default_stream()); + return detail::is_sorted(in, column_order, null_precedence, stream); } } // namespace cudf diff --git a/cpp/src/sort/rank.cu b/cpp/src/sort/rank.cu index fd65e38d467..3ead8cfcbaa 100644 --- a/cpp/src/sort/rank.cu +++ b/cpp/src/sort/rank.cu @@ -366,16 +366,11 @@ std::unique_ptr rank(column_view const& input, null_policy null_handling, null_order null_precedence, bool percentage, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::rank(input, - method, - column_order, - null_handling, - null_precedence, - percentage, - cudf::get_default_stream(), - mr); + return detail::rank( + input, method, column_order, null_handling, null_precedence, percentage, stream, mr); } } // namespace cudf diff --git a/cpp/src/sort/segmented_sort.cu b/cpp/src/sort/segmented_sort.cu index 38d008c120c..d9457341bd2 100644 --- a/cpp/src/sort/segmented_sort.cu +++ b/cpp/src/sort/segmented_sort.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -81,11 +81,12 @@ std::unique_ptr segmented_sorted_order(table_view const& keys, column_view const& segment_offsets, std::vector const& column_order, std::vector const& null_precedence, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::segmented_sorted_order( - keys, segment_offsets, column_order, null_precedence, cudf::get_default_stream(), mr); + keys, segment_offsets, column_order, null_precedence, stream, mr); } std::unique_ptr
segmented_sort_by_key(table_view const& values, @@ -93,11 +94,12 @@ std::unique_ptr
segmented_sort_by_key(table_view const& values, column_view const& segment_offsets, std::vector const& column_order, std::vector const& null_precedence, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::segmented_sort_by_key( - values, keys, segment_offsets, column_order, null_precedence, cudf::get_default_stream(), mr); + values, keys, segment_offsets, column_order, null_precedence, stream, mr); } } // namespace cudf diff --git a/cpp/src/sort/segmented_sort_impl.cuh b/cpp/src/sort/segmented_sort_impl.cuh index 37664f33762..5d11bf055f1 100644 --- a/cpp/src/sort/segmented_sort_impl.cuh +++ b/cpp/src/sort/segmented_sort_impl.cuh @@ -166,7 +166,7 @@ std::unique_ptr fast_segmented_sorted_order(column_view const& input, // Unfortunately, CUB's segmented sort functions cannot accept iterators. // We have to build a pre-filled sequence of indices as input. auto sorted_indices = - cudf::detail::sequence(input.size(), numeric_scalar{0}, stream, mr); + cudf::detail::sequence(input.size(), numeric_scalar{0, true, stream}, stream, mr); auto indices_view = sorted_indices->mutable_view(); cudf::type_dispatcher(input.type(), diff --git a/cpp/src/sort/sort.cu b/cpp/src/sort/sort.cu index 25b95af4f83..46edae798d4 100644 --- a/cpp/src/sort/sort.cu +++ b/cpp/src/sort/sort.cu @@ -109,30 +109,32 @@ std::unique_ptr
sort(table_view const& input, std::unique_ptr sorted_order(table_view const& input, std::vector const& column_order, std::vector const& null_precedence, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::sorted_order(input, column_order, null_precedence, cudf::get_default_stream(), mr); + return detail::sorted_order(input, column_order, null_precedence, stream, mr); } std::unique_ptr
sort(table_view const& input, std::vector const& column_order, std::vector const& null_precedence, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::sort(input, column_order, null_precedence, cudf::get_default_stream(), mr); + return detail::sort(input, column_order, null_precedence, stream, mr); } std::unique_ptr
sort_by_key(table_view const& values, table_view const& keys, std::vector const& column_order, std::vector const& null_precedence, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::sort_by_key( - values, keys, column_order, null_precedence, cudf::get_default_stream(), mr); + return detail::sort_by_key(values, keys, column_order, null_precedence, stream, mr); } } // namespace cudf diff --git a/cpp/src/sort/stable_segmented_sort.cu b/cpp/src/sort/stable_segmented_sort.cu index 40df1b50279..4725d65e05d 100644 --- a/cpp/src/sort/stable_segmented_sort.cu +++ b/cpp/src/sort/stable_segmented_sort.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -55,11 +55,12 @@ std::unique_ptr stable_segmented_sorted_order( column_view const& segment_offsets, std::vector const& column_order, std::vector const& null_precedence, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::stable_segmented_sorted_order( - keys, segment_offsets, column_order, null_precedence, cudf::get_default_stream(), mr); + keys, segment_offsets, column_order, null_precedence, stream, mr); } std::unique_ptr
stable_segmented_sort_by_key(table_view const& values, @@ -67,11 +68,12 @@ std::unique_ptr
stable_segmented_sort_by_key(table_view const& values, column_view const& segment_offsets, std::vector const& column_order, std::vector const& null_precedence, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::stable_segmented_sort_by_key( - values, keys, segment_offsets, column_order, null_precedence, cudf::get_default_stream(), mr); + values, keys, segment_offsets, column_order, null_precedence, stream, mr); } } // namespace cudf diff --git a/cpp/src/sort/stable_sort.cu b/cpp/src/sort/stable_sort.cu index 6f5678c4168..cf602dcf1a9 100644 --- a/cpp/src/sort/stable_sort.cu +++ b/cpp/src/sort/stable_sort.cu @@ -62,22 +62,22 @@ std::unique_ptr
stable_sort_by_key(table_view const& values, std::unique_ptr stable_sorted_order(table_view const& input, std::vector const& column_order, std::vector const& null_precedence, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::stable_sorted_order( - input, column_order, null_precedence, cudf::get_default_stream(), mr); + return detail::stable_sorted_order(input, column_order, null_precedence, stream, mr); } std::unique_ptr
stable_sort_by_key(table_view const& values, table_view const& keys, std::vector const& column_order, std::vector const& null_precedence, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::stable_sort_by_key( - values, keys, column_order, null_precedence, cudf::get_default_stream(), mr); + return detail::stable_sort_by_key(values, keys, column_order, null_precedence, stream, mr); } } // namespace cudf diff --git a/cpp/tests/CMakeLists.txt b/cpp/tests/CMakeLists.txt index ba4921848d7..c7d3e2af19f 100644 --- a/cpp/tests/CMakeLists.txt +++ b/cpp/tests/CMakeLists.txt @@ -621,17 +621,18 @@ ConfigureTest( STREAM_IDENTIFICATION_TEST identify_stream_usage/test_default_stream_identification.cu ) -ConfigureTest(STREAM_HASHING_TEST streams/hash_test.cpp STREAM_MODE testing) -ConfigureTest(STREAM_COPYING_TEST streams/copying_test.cpp STREAM_MODE testing) -ConfigureTest(STREAM_GROUPBY_TEST streams/groupby_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_CONCATENATE_TEST streams/concatenate_test.cpp STREAM_MODE testing) +ConfigureTest(STREAM_COPYING_TEST streams/copying_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_FILLING_TEST streams/filling_test.cpp STREAM_MODE testing) +ConfigureTest(STREAM_GROUPBY_TEST streams/groupby_test.cpp STREAM_MODE testing) +ConfigureTest(STREAM_HASHING_TEST streams/hash_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_REPLACE_TEST streams/replace_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_SEARCH_TEST streams/search_test.cpp STREAM_MODE testing) ConfigureTest( STREAM_STRINGS_TEST streams/strings/case_test.cpp streams/strings/find_test.cpp STREAM_MODE testing ) +ConfigureTest(STREAM_SORTING_TEST streams/sorting_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_TEXT_TEST streams/text/ngrams_test.cpp STREAM_MODE testing) # ################################################################################################## diff --git a/cpp/tests/streams/sorting_test.cpp b/cpp/tests/streams/sorting_test.cpp new file mode 100644 index 00000000000..e481f95bded --- /dev/null +++ b/cpp/tests/streams/sorting_test.cpp @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include +#include +#include + +class SortingTest : public cudf::test::BaseFixture {}; + +TEST_F(SortingTest, SortedOrder) +{ + cudf::test::fixed_width_column_wrapper const column{10, 20, 30, 40, 50}; + cudf::table_view const tbl{{column}}; + + cudf::sorted_order(tbl, {}, {}, cudf::test::get_default_stream()); +} + +TEST_F(SortingTest, StableSortedOrder) +{ + cudf::test::fixed_width_column_wrapper const column{10, 20, 30, 40, 50}; + cudf::table_view const tbl{{column}}; + + cudf::stable_sorted_order(tbl, {}, {}, cudf::test::get_default_stream()); +} + +TEST_F(SortingTest, IsSorted) +{ + cudf::test::fixed_width_column_wrapper const column{10, 20, 30, 40, 50}; + cudf::table_view const tbl{{column}}; + + cudf::is_sorted(tbl, {}, {}, cudf::test::get_default_stream()); +} + +TEST_F(SortingTest, Sort) +{ + cudf::test::fixed_width_column_wrapper const column{10, 20, 30, 40, 50}; + cudf::table_view const tbl{{column}}; + + cudf::sort(tbl, {}, {}, cudf::test::get_default_stream()); +} + +TEST_F(SortingTest, SortByKey) +{ + cudf::test::fixed_width_column_wrapper const values_col{10, 20, 30, 40, 50}; + cudf::table_view const values{{values_col}}; + cudf::test::fixed_width_column_wrapper const keys_col{10, 20, 30, 40, 50}; + cudf::table_view const keys{{keys_col}}; + + cudf::sort_by_key(values, keys, {}, {}, cudf::test::get_default_stream()); +} + +TEST_F(SortingTest, StableSortByKey) +{ + cudf::test::fixed_width_column_wrapper const values_col{10, 20, 30, 40, 50}; + cudf::table_view const values{{values_col}}; + cudf::test::fixed_width_column_wrapper const keys_col{10, 20, 30, 40, 50}; + cudf::table_view const keys{{keys_col}}; + + cudf::stable_sort_by_key(values, keys, {}, {}, cudf::test::get_default_stream()); +} + +TEST_F(SortingTest, Rank) +{ + cudf::test::fixed_width_column_wrapper const column{10, 20, 30, 40, 50}; + + cudf::rank(column, + cudf::rank_method::AVERAGE, + cudf::order::ASCENDING, + cudf::null_policy::EXCLUDE, + cudf::null_order::AFTER, + false, + cudf::test::get_default_stream()); +} + +TEST_F(SortingTest, SegmentedSortedOrder) +{ + cudf::test::fixed_width_column_wrapper const keys_col{9, 8, 7, 6, 5, 4, 3, 2, 1, 0}; + cudf::table_view const keys{{keys_col}}; + cudf::test::fixed_width_column_wrapper const segment_offsets{3, 7}; + + cudf::segmented_sorted_order(keys, segment_offsets, {}, {}, cudf::test::get_default_stream()); +} + +TEST_F(SortingTest, StableSegmentedSortedOrder) +{ + cudf::test::fixed_width_column_wrapper const keys_col{9, 8, 7, 6, 5, 4, 3, 2, 1, 0}; + cudf::table_view const keys{{keys_col}}; + cudf::test::fixed_width_column_wrapper const segment_offsets{3, 7}; + + cudf::stable_segmented_sorted_order( + keys, segment_offsets, {}, {}, cudf::test::get_default_stream()); +} + +TEST_F(SortingTest, SegmentedSortByKey) +{ + cudf::test::fixed_width_column_wrapper const keys_col{9, 8, 7, 6, 5, 4, 3, 2, 1, 0}; + cudf::table_view const keys{{keys_col}}; + cudf::test::fixed_width_column_wrapper const values_col{7, 6, 9, 3, 4, 5, 1, 2, 0, 4}; + cudf::table_view const values{{values_col}}; + cudf::test::fixed_width_column_wrapper const segment_offsets{0, 3, 7, 10}; + + cudf::segmented_sort_by_key( + values, keys, segment_offsets, {}, {}, cudf::test::get_default_stream()); +} + +TEST_F(SortingTest, StableSegmentedSortByKey) +{ + cudf::test::fixed_width_column_wrapper const keys_col{9, 8, 7, 6, 5, 4, 3, 2, 1, 0}; + cudf::table_view const keys{{keys_col}}; + cudf::test::fixed_width_column_wrapper const values_col{7, 6, 9, 3, 4, 5, 1, 2, 0, 4}; + cudf::table_view const values{{values_col}}; + cudf::test::fixed_width_column_wrapper const segment_offsets{0, 3, 7, 10}; + + cudf::stable_segmented_sort_by_key( + values, keys, segment_offsets, {}, {}, cudf::test::get_default_stream()); +} From 71f30bec80194e8711156cea90d09b4ee0c940bd Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Fri, 22 Sep 2023 17:59:25 -0700 Subject: [PATCH 099/150] Enable direct ingestion and production of Arrow scalars (#14121) This PR adds overloads of `from_arrow` and `to_arrow` for scalars to enable interoperability on par with Arrow Arrays. The new public APIs accept streams, and for consistency streams have also been added to the corresponding column APIs, so this PR contributes to #925. Authors: - Vyas Ramasubramani (https://github.com/vyasr) Approvers: - David Wendt (https://github.com/davidwendt) - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cudf/pull/14121 --- cpp/include/cudf/detail/interop.hpp | 80 ++++++++++++++++++-- cpp/include/cudf/interop.hpp | 35 ++++++++- cpp/src/interop/from_arrow.cu | 88 +++++++++++++++++++++- cpp/src/interop/to_arrow.cu | 99 +++++++++++++++++++------ cpp/tests/CMakeLists.txt | 1 + cpp/tests/interop/from_arrow_test.cpp | 95 ++++++++++++++++++++++++ cpp/tests/interop/to_arrow_test.cpp | 103 ++++++++++++++++++++++++++ cpp/tests/streams/interop_test.cpp | 68 +++++++++++++++++ 8 files changed, 540 insertions(+), 29 deletions(-) create mode 100644 cpp/tests/streams/interop_test.cpp diff --git a/cpp/include/cudf/detail/interop.hpp b/cpp/include/cudf/detail/interop.hpp index 3d4832c8d17..44024333239 100644 --- a/cpp/include/cudf/detail/interop.hpp +++ b/cpp/include/cudf/detail/interop.hpp @@ -104,13 +104,67 @@ std::shared_ptr to_arrow_array(cudf::type_id id, Ts&&... args) } } +/** + * @brief Invokes an `operator()` template with the type instantiation based on + * the specified `arrow::DataType`'s `id()`. + * + * This function is analogous to libcudf's type_dispatcher, but instead applies + * to Arrow functions. Its primary use case is to leverage Arrow's + * metaprogramming facilities like arrow::TypeTraits that require translating + * the runtime dtype information into compile-time types. + */ +template +constexpr decltype(auto) arrow_type_dispatcher(arrow::DataType const& dtype, + Functor f, + Ts&&... args) +{ + switch (dtype.id()) { + case arrow::Type::INT8: + return f.template operator()(std::forward(args)...); + case arrow::Type::INT16: + return f.template operator()(std::forward(args)...); + case arrow::Type::INT32: + return f.template operator()(std::forward(args)...); + case arrow::Type::INT64: + return f.template operator()(std::forward(args)...); + case arrow::Type::UINT8: + return f.template operator()(std::forward(args)...); + case arrow::Type::UINT16: + return f.template operator()(std::forward(args)...); + case arrow::Type::UINT32: + return f.template operator()(std::forward(args)...); + case arrow::Type::UINT64: + return f.template operator()(std::forward(args)...); + case arrow::Type::FLOAT: + return f.template operator()(std::forward(args)...); + case arrow::Type::DOUBLE: + return f.template operator()(std::forward(args)...); + case arrow::Type::BOOL: + return f.template operator()(std::forward(args)...); + case arrow::Type::TIMESTAMP: + return f.template operator()(std::forward(args)...); + case arrow::Type::DURATION: + return f.template operator()(std::forward(args)...); + case arrow::Type::STRING: + return f.template operator()(std::forward(args)...); + case arrow::Type::LIST: + return f.template operator()(std::forward(args)...); + case arrow::Type::DECIMAL128: + return f.template operator()(std::forward(args)...); + case arrow::Type::STRUCT: + return f.template operator()(std::forward(args)...); + default: { + CUDF_FAIL("Invalid type."); + } + } +} + // Converting arrow type to cudf type data_type arrow_to_cudf_type(arrow::DataType const& arrow_type); /** - * @copydoc cudf::to_arrow - * - * @param stream CUDA stream used for device memory operations and kernel launches. + * @copydoc cudf::to_arrow(table_view input, std::vector const& metadata, + * rmm::cuda_stream_view stream, arrow::MemoryPool* ar_mr) */ std::shared_ptr to_arrow(table_view input, std::vector const& metadata, @@ -118,13 +172,27 @@ std::shared_ptr to_arrow(table_view input, arrow::MemoryPool* ar_mr); /** - * @copydoc cudf::arrow_to_cudf - * - * @param stream CUDA stream used for device memory operations and kernel launches. + * @copydoc cudf::to_arrow(cudf::scalar const& input, column_metadata const& metadata, + * rmm::cuda_stream_view stream, arrow::MemoryPool* ar_mr) + */ +std::shared_ptr to_arrow(cudf::scalar const& input, + column_metadata const& metadata, + rmm::cuda_stream_view stream, + arrow::MemoryPool* ar_mr); +/** + * @copydoc cudf::from_arrow(arrow::Table const& input_table, rmm::cuda_stream_view stream, + * rmm::mr::device_memory_resource* mr) */ std::unique_ptr
from_arrow(arrow::Table const& input_table, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); +/** + * @copydoc cudf::from_arrow(arrow::Scalar const& input, rmm::cuda_stream_view stream, + * rmm::mr::device_memory_resource* mr) + */ +std::unique_ptr from_arrow(arrow::Scalar const& input, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr); } // namespace detail } // namespace cudf diff --git a/cpp/include/cudf/interop.hpp b/cpp/include/cudf/interop.hpp index e210179b147..865cc004107 100644 --- a/cpp/include/cudf/interop.hpp +++ b/cpp/include/cudf/interop.hpp @@ -126,23 +126,56 @@ struct column_metadata { * * @param input table_view that needs to be converted to arrow Table * @param metadata Contains hierarchy of names of columns and children + * @param stream CUDA stream used for device memory operations and kernel launches * @param ar_mr arrow memory pool to allocate memory for arrow Table * @return arrow Table generated from `input` */ std::shared_ptr to_arrow(table_view input, std::vector const& metadata = {}, - arrow::MemoryPool* ar_mr = arrow::default_memory_pool()); + rmm::cuda_stream_view stream = cudf::get_default_stream(), + arrow::MemoryPool* ar_mr = arrow::default_memory_pool()); +/** + * @brief Create `arrow::Scalar` from cudf scalar `input` + * + * Converts the `cudf::scalar` to `arrow::Scalar`. + * + * @param input scalar that needs to be converted to arrow Scalar + * @param metadata Contains hierarchy of names of columns and children + * @param stream CUDA stream used for device memory operations and kernel launches + * @param ar_mr arrow memory pool to allocate memory for arrow Scalar + * @return arrow Scalar generated from `input` + */ +std::shared_ptr to_arrow(cudf::scalar const& input, + column_metadata const& metadata = {}, + rmm::cuda_stream_view stream = cudf::get_default_stream(), + arrow::MemoryPool* ar_mr = arrow::default_memory_pool()); /** * @brief Create `cudf::table` from given arrow Table input * * @param input arrow:Table that needs to be converted to `cudf::table` + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate `cudf::table` * @return cudf table generated from given arrow Table */ std::unique_ptr
from_arrow( arrow::Table const& input, + rmm::cuda_stream_view stream = cudf::get_default_stream(), + rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); + +/** + * @brief Create `cudf::scalar` from given arrow Scalar input + * + * @param input `arrow::Scalar` that needs to be converted to `cudf::scalar` + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate `cudf::scalar` + * @return cudf scalar generated from given arrow Scalar + */ + +std::unique_ptr from_arrow( + arrow::Scalar const& input, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group diff --git a/cpp/src/interop/from_arrow.cu b/cpp/src/interop/from_arrow.cu index 30cfee97fd8..e39625c92e7 100644 --- a/cpp/src/interop/from_arrow.cu +++ b/cpp/src/interop/from_arrow.cu @@ -419,6 +419,52 @@ std::unique_ptr get_column(arrow::Array const& array, : get_empty_type_column(array.length()); } +struct BuilderGenerator { + template && + !std::is_same_v)> + std::shared_ptr operator()(std::shared_ptr const& type) + { + return std::make_shared::BuilderType>( + type, arrow::default_memory_pool()); + } + + template || + std::is_same_v)> + std::shared_ptr operator()(std::shared_ptr const& type) + { + CUDF_FAIL("Type not supported by BuilderGenerator"); + } +}; + +std::shared_ptr make_builder(std::shared_ptr const& type) +{ + switch (type->id()) { + case arrow::Type::STRUCT: { + std::vector> field_builders; + + for (auto field : type->fields()) { + auto const vt = field->type(); + if (vt->id() == arrow::Type::STRUCT || vt->id() == arrow::Type::LIST) { + field_builders.push_back(make_builder(vt)); + } else { + field_builders.push_back(arrow_type_dispatcher(*vt, BuilderGenerator{}, vt)); + } + } + return std::make_shared( + type, arrow::default_memory_pool(), field_builders); + } + case arrow::Type::LIST: { + return std::make_shared(arrow::default_memory_pool(), + make_builder(type->field(0)->type())); + } + default: { + return arrow_type_dispatcher(*type, BuilderGenerator{}, type); + } + } +} + } // namespace std::unique_ptr
from_arrow(arrow::Table const& input_table, @@ -462,14 +508,54 @@ std::unique_ptr
from_arrow(arrow::Table const& input_table, return std::make_unique
(std::move(columns)); } +std::unique_ptr from_arrow(arrow::Scalar const& input, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + // Get a builder for the scalar type + auto builder = detail::make_builder(input.type); + + auto status = builder->AppendScalar(input); + if (status != arrow::Status::OK()) { + if (status.IsNotImplemented()) { + // The only known failure case here is for nulls + CUDF_FAIL("Cannot create untyped null scalars or nested types with untyped null leaf nodes", + std::invalid_argument); + } + CUDF_FAIL("Arrow ArrayBuilder::AppendScalar failed"); + } + + auto maybe_array = builder->Finish(); + if (!maybe_array.ok()) { CUDF_FAIL("Arrow ArrayBuilder::Finish failed"); } + auto array = *maybe_array; + + auto field = arrow::field("", input.type); + + auto table = arrow::Table::Make(arrow::schema({field}), {array}); + + auto cudf_table = detail::from_arrow(*table, stream, mr); + + auto cv = cudf_table->view().column(0); + return get_element(cv, 0, stream); +} + } // namespace detail std::unique_ptr
from_arrow(arrow::Table const& input_table, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::from_arrow(input_table, cudf::get_default_stream(), mr); + return detail::from_arrow(input_table, stream, mr); } +std::unique_ptr from_arrow(arrow::Scalar const& input, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + + return detail::from_arrow(input, stream, mr); +} } // namespace cudf diff --git a/cpp/src/interop/to_arrow.cu b/cpp/src/interop/to_arrow.cu index 958a2fcb95f..0cd750bc947 100644 --- a/cpp/src/interop/to_arrow.cu +++ b/cpp/src/interop/to_arrow.cu @@ -15,14 +15,16 @@ */ #include +#include #include +#include #include #include +#include #include #include #include #include -#include #include #include #include @@ -77,7 +79,10 @@ std::shared_ptr fetch_mask_buffer(column_view input_view, auto mask_buffer = allocate_arrow_bitmap(static_cast(input_view.size()), ar_mr); CUDF_CUDA_TRY(cudaMemcpyAsync( mask_buffer->mutable_data(), - (input_view.offset() > 0) ? cudf::copy_bitmask(input_view).data() : input_view.null_mask(), + (input_view.offset() > 0) + ? cudf::detail::copy_bitmask(input_view, stream, rmm::mr::get_current_device_resource()) + .data() + : input_view.null_mask(), mask_size_in_bytes, cudaMemcpyDefault, stream.value())); @@ -139,29 +144,36 @@ struct dispatch_to_arrow { } }; -template <> -std::shared_ptr dispatch_to_arrow::operator()( - column_view input, - cudf::type_id, - column_metadata const&, - arrow::MemoryPool* ar_mr, - rmm::cuda_stream_view stream) +// Convert decimal types from libcudf to arrow where those types are not +// directly supported by Arrow. These types must be fit into 128 bits, the +// smallest decimal resolution supported by Arrow. +template +std::shared_ptr unsupported_decimals_to_arrow(column_view input, + int32_t precision, + arrow::MemoryPool* ar_mr, + rmm::cuda_stream_view stream) { - using DeviceType = int64_t; - size_type const BIT_WIDTH_RATIO = 2; // Array::Type:type::DECIMAL (128) / int64_t + constexpr size_type BIT_WIDTH_RATIO = sizeof(__int128_t) / sizeof(DeviceType); rmm::device_uvector buf(input.size() * BIT_WIDTH_RATIO, stream); auto count = thrust::make_counting_iterator(0); - thrust::for_each(rmm::exec_policy(cudf::get_default_stream()), - count, - count + input.size(), - [in = input.begin(), out = buf.data()] __device__(auto in_idx) { - auto const out_idx = in_idx * 2; - out[out_idx] = in[in_idx]; - out[out_idx + 1] = in[in_idx] < 0 ? -1 : 0; - }); + thrust::for_each( + rmm::exec_policy(cudf::get_default_stream()), + count, + count + input.size(), + [in = input.begin(), out = buf.data(), BIT_WIDTH_RATIO] __device__(auto in_idx) { + auto const out_idx = in_idx * BIT_WIDTH_RATIO; + // The lowest order bits are the value, the remainder + // simply matches the sign bit to satisfy the two's + // complement integer representation of negative numbers. + out[out_idx] = in[in_idx]; +#pragma unroll BIT_WIDTH_RATIO - 1 + for (auto i = 1; i < BIT_WIDTH_RATIO; ++i) { + out[out_idx + i] = in[in_idx] < 0 ? -1 : 0; + } + }); auto const buf_size_in_bytes = buf.size() * sizeof(DeviceType); auto data_buffer = allocate_arrow_buffer(buf_size_in_bytes, ar_mr); @@ -169,7 +181,7 @@ std::shared_ptr dispatch_to_arrow::operator()( CUDF_CUDA_TRY(cudaMemcpyAsync( data_buffer->mutable_data(), buf.data(), buf_size_in_bytes, cudaMemcpyDefault, stream.value())); - auto type = arrow::decimal(18, -input.type().scale()); + auto type = arrow::decimal(precision, -input.type().scale()); auto mask = fetch_mask_buffer(input, ar_mr, stream); auto buffers = std::vector>{mask, std::move(data_buffer)}; auto data = std::make_shared(type, input.size(), buffers); @@ -177,6 +189,28 @@ std::shared_ptr dispatch_to_arrow::operator()( return std::make_shared(data); } +template <> +std::shared_ptr dispatch_to_arrow::operator()( + column_view input, + cudf::type_id, + column_metadata const&, + arrow::MemoryPool* ar_mr, + rmm::cuda_stream_view stream) +{ + return unsupported_decimals_to_arrow(input, 9, ar_mr, stream); +} + +template <> +std::shared_ptr dispatch_to_arrow::operator()( + column_view input, + cudf::type_id, + column_metadata const&, + arrow::MemoryPool* ar_mr, + rmm::cuda_stream_view stream) +{ + return unsupported_decimals_to_arrow(input, 18, ar_mr, stream); +} + template <> std::shared_ptr dispatch_to_arrow::operator()( column_view input, @@ -403,14 +437,37 @@ std::shared_ptr to_arrow(table_view input, return result; } + +std::shared_ptr to_arrow(cudf::scalar const& input, + column_metadata const& metadata, + rmm::cuda_stream_view stream, + arrow::MemoryPool* ar_mr) +{ + auto const column = cudf::make_column_from_scalar(input, 1, stream); + cudf::table_view const tv{{column->view()}}; + auto const arrow_table = cudf::to_arrow(tv, {metadata}, stream); + auto const ac = arrow_table->column(0); + auto const maybe_scalar = ac->GetScalar(0); + if (!maybe_scalar.ok()) { CUDF_FAIL("Failed to produce a scalar"); } + return maybe_scalar.ValueOrDie(); +} } // namespace detail std::shared_ptr to_arrow(table_view input, std::vector const& metadata, + rmm::cuda_stream_view stream, arrow::MemoryPool* ar_mr) { CUDF_FUNC_RANGE(); - return detail::to_arrow(input, metadata, cudf::get_default_stream(), ar_mr); + return detail::to_arrow(input, metadata, stream, ar_mr); } +std::shared_ptr to_arrow(cudf::scalar const& input, + column_metadata const& metadata, + rmm::cuda_stream_view stream, + arrow::MemoryPool* ar_mr) +{ + CUDF_FUNC_RANGE(); + return detail::to_arrow(input, metadata, stream, ar_mr); +} } // namespace cudf diff --git a/cpp/tests/CMakeLists.txt b/cpp/tests/CMakeLists.txt index c7d3e2af19f..956bfc7c27d 100644 --- a/cpp/tests/CMakeLists.txt +++ b/cpp/tests/CMakeLists.txt @@ -626,6 +626,7 @@ ConfigureTest(STREAM_COPYING_TEST streams/copying_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_FILLING_TEST streams/filling_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_GROUPBY_TEST streams/groupby_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_HASHING_TEST streams/hash_test.cpp STREAM_MODE testing) +ConfigureTest(STREAM_INTEROP_TEST streams/interop_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_REPLACE_TEST streams/replace_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_SEARCH_TEST streams/search_test.cpp STREAM_MODE testing) ConfigureTest( diff --git a/cpp/tests/interop/from_arrow_test.cpp b/cpp/tests/interop/from_arrow_test.cpp index 9a5cc3733af..a898106a5b2 100644 --- a/cpp/tests/interop/from_arrow_test.cpp +++ b/cpp/tests/interop/from_arrow_test.cpp @@ -456,3 +456,98 @@ INSTANTIATE_TEST_CASE_P(FromArrowTest, std::make_tuple(0, 0), std::make_tuple(0, 3000), std::make_tuple(10000, 10000))); + +template +struct FromArrowNumericScalarTest : public cudf::test::BaseFixture {}; + +using NumericTypesNotBool = + cudf::test::Concat; +TYPED_TEST_SUITE(FromArrowNumericScalarTest, NumericTypesNotBool); + +TYPED_TEST(FromArrowNumericScalarTest, Basic) +{ + TypeParam const value{42}; + auto const arrow_scalar = arrow::MakeScalar(value); + auto const cudf_scalar = cudf::from_arrow(*arrow_scalar); + auto const cudf_numeric_scalar = + dynamic_cast*>(cudf_scalar.get()); + if (cudf_numeric_scalar == nullptr) { CUDF_FAIL("Attempted to test with a non-numeric type."); } + EXPECT_EQ(cudf_numeric_scalar->type(), cudf::data_type(cudf::type_to_id())); + EXPECT_EQ(cudf_numeric_scalar->value(), value); +} + +struct FromArrowDecimalScalarTest : public cudf::test::BaseFixture {}; + +// Only testing Decimal128 because that's the only size cudf and arrow have in common. +TEST_F(FromArrowDecimalScalarTest, Basic) +{ + auto const value{42}; + auto const precision{8}; + auto const scale{4}; + auto arrow_scalar = arrow::Decimal128Scalar(value, arrow::decimal128(precision, -scale)); + auto cudf_scalar = cudf::from_arrow(arrow_scalar); + + // Arrow offers a minimum of 128 bits for the Decimal type. + auto const cudf_decimal_scalar = + dynamic_cast*>(cudf_scalar.get()); + EXPECT_EQ(cudf_decimal_scalar->type(), + cudf::data_type(cudf::type_to_id(), scale)); + EXPECT_EQ(cudf_decimal_scalar->value(), value); +} + +struct FromArrowStringScalarTest : public cudf::test::BaseFixture {}; + +TEST_F(FromArrowStringScalarTest, Basic) +{ + auto const value = std::string("hello world"); + auto const arrow_scalar = arrow::StringScalar(value); + auto const cudf_scalar = cudf::from_arrow(arrow_scalar); + + auto const cudf_string_scalar = dynamic_cast(cudf_scalar.get()); + EXPECT_EQ(cudf_string_scalar->type(), cudf::data_type(cudf::type_id::STRING)); + EXPECT_EQ(cudf_string_scalar->to_string(), value); +} + +struct FromArrowListScalarTest : public cudf::test::BaseFixture {}; + +TEST_F(FromArrowListScalarTest, Basic) +{ + std::vector host_values = {1, 2, 3, 5, 6, 7, 8}; + std::vector host_validity = {true, true, true, false, true, true, true}; + + arrow::Int64Builder builder; + auto const status = builder.AppendValues(host_values, host_validity); + auto const maybe_array = builder.Finish(); + auto const array = *maybe_array; + + auto const arrow_scalar = arrow::ListScalar(array); + auto const cudf_scalar = cudf::from_arrow(arrow_scalar); + + auto const cudf_list_scalar = dynamic_cast(cudf_scalar.get()); + EXPECT_EQ(cudf_list_scalar->type(), cudf::data_type(cudf::type_id::LIST)); + + cudf::test::fixed_width_column_wrapper const lhs( + host_values.begin(), host_values.end(), host_validity.begin()); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(lhs, cudf_list_scalar->view()); +} + +struct FromArrowStructScalarTest : public cudf::test::BaseFixture {}; + +TEST_F(FromArrowStructScalarTest, Basic) +{ + int64_t const value{42}; + auto const underlying_arrow_scalar = arrow::MakeScalar(value); + + auto const field = arrow::field("", underlying_arrow_scalar->type); + auto const arrow_type = arrow::struct_({field}); + auto const arrow_scalar = arrow::StructScalar({underlying_arrow_scalar}, arrow_type); + auto const cudf_scalar = cudf::from_arrow(arrow_scalar); + + auto const cudf_struct_scalar = dynamic_cast(cudf_scalar.get()); + EXPECT_EQ(cudf_struct_scalar->type(), cudf::data_type(cudf::type_id::STRUCT)); + + cudf::test::fixed_width_column_wrapper const col({value}); + cudf::table_view const lhs({col}); + + CUDF_TEST_EXPECT_TABLES_EQUAL(lhs, cudf_struct_scalar->view()); +} diff --git a/cpp/tests/interop/to_arrow_test.cpp b/cpp/tests/interop/to_arrow_test.cpp index 97d80984272..6bb4cdfd747 100644 --- a/cpp/tests/interop/to_arrow_test.cpp +++ b/cpp/tests/interop/to_arrow_test.cpp @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -578,4 +579,106 @@ INSTANTIATE_TEST_CASE_P(ToArrowTest, std::make_tuple(0, 0), std::make_tuple(0, 3000))); +template +struct ToArrowNumericScalarTest : public cudf::test::BaseFixture {}; + +using NumericTypesNotBool = + cudf::test::Concat; +TYPED_TEST_SUITE(ToArrowNumericScalarTest, NumericTypesNotBool); + +TYPED_TEST(ToArrowNumericScalarTest, Basic) +{ + TypeParam const value{42}; + auto const cudf_scalar = cudf::make_fixed_width_scalar(value); + + cudf::column_metadata const metadata{""}; + auto const arrow_scalar = cudf::to_arrow(*cudf_scalar, metadata); + + auto const ref_arrow_scalar = arrow::MakeScalar(value); + EXPECT_TRUE(arrow_scalar->Equals(*ref_arrow_scalar)); +} + +struct ToArrowDecimalScalarTest : public cudf::test::BaseFixture {}; + +// Only testing Decimal128 because that's the only size cudf and arrow have in common. +TEST_F(ToArrowDecimalScalarTest, Basic) +{ + auto const value{42}; + auto const precision{18}; // cudf will convert to the widest-precision Arrow scalar of the type + int32_t const scale{4}; + + auto const cudf_scalar = + cudf::make_fixed_point_scalar(value, numeric::scale_type{scale}); + + cudf::column_metadata const metadata{""}; + auto const arrow_scalar = cudf::to_arrow(*cudf_scalar, metadata); + + auto const maybe_ref_arrow_scalar = + arrow::MakeScalar(arrow::decimal128(precision, -scale), value); + if (!maybe_ref_arrow_scalar.ok()) { CUDF_FAIL("Failed to construct reference scalar"); } + auto const ref_arrow_scalar = *maybe_ref_arrow_scalar; + EXPECT_TRUE(arrow_scalar->Equals(*ref_arrow_scalar)); +} + +struct ToArrowStringScalarTest : public cudf::test::BaseFixture {}; + +TEST_F(ToArrowStringScalarTest, Basic) +{ + std::string const value{"hello world"}; + auto const cudf_scalar = cudf::make_string_scalar(value); + cudf::column_metadata const metadata{""}; + auto const arrow_scalar = cudf::to_arrow(*cudf_scalar, metadata); + + auto const ref_arrow_scalar = arrow::MakeScalar(value); + EXPECT_TRUE(arrow_scalar->Equals(*ref_arrow_scalar)); +} + +struct ToArrowListScalarTest : public cudf::test::BaseFixture {}; + +TEST_F(ToArrowListScalarTest, Basic) +{ + std::vector const host_values = {1, 2, 3, 5, 6, 7, 8}; + std::vector const host_validity = {true, true, true, false, true, true, true}; + + cudf::test::fixed_width_column_wrapper const col( + host_values.begin(), host_values.end(), host_validity.begin()); + + auto const cudf_scalar = cudf::make_list_scalar(col); + + cudf::column_metadata const metadata{""}; + auto const arrow_scalar = cudf::to_arrow(*cudf_scalar, metadata); + + arrow::Int64Builder builder; + auto const status = builder.AppendValues(host_values, host_validity); + auto const maybe_array = builder.Finish(); + auto const array = *maybe_array; + + auto const ref_arrow_scalar = arrow::ListScalar(array); + + EXPECT_TRUE(arrow_scalar->Equals(ref_arrow_scalar)); +} + +struct ToArrowStructScalarTest : public cudf::test::BaseFixture {}; + +TEST_F(ToArrowStructScalarTest, Basic) +{ + int64_t const value{42}; + auto const field_name{"a"}; + + cudf::test::fixed_width_column_wrapper const col{value}; + cudf::table_view const tbl({col}); + auto const cudf_scalar = cudf::make_struct_scalar(tbl); + + cudf::column_metadata metadata{""}; + metadata.children_meta.emplace_back(field_name); + auto const arrow_scalar = cudf::to_arrow(*cudf_scalar, metadata); + + auto const underlying_arrow_scalar = arrow::MakeScalar(value); + auto const field = arrow::field(field_name, underlying_arrow_scalar->type, false); + auto const arrow_type = arrow::struct_({field}); + auto const ref_arrow_scalar = arrow::StructScalar({underlying_arrow_scalar}, arrow_type); + + EXPECT_TRUE(arrow_scalar->Equals(ref_arrow_scalar)); +} + CUDF_TEST_PROGRAM_MAIN() diff --git a/cpp/tests/streams/interop_test.cpp b/cpp/tests/streams/interop_test.cpp new file mode 100644 index 00000000000..7eac9e016eb --- /dev/null +++ b/cpp/tests/streams/interop_test.cpp @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include +#include +#include + +struct ArrowTest : public cudf::test::BaseFixture {}; + +TEST_F(ArrowTest, ToArrow) +{ + int32_t const value{42}; + auto col = cudf::test::fixed_width_column_wrapper{{value}}; + cudf::table_view tbl{{col}}; + + std::vector metadata{{""}}; + cudf::to_arrow(tbl, metadata, cudf::test::get_default_stream()); +} + +TEST_F(ArrowTest, FromArrow) +{ + std::vector host_values = {1, 2, 3, 5, 6, 7, 8}; + std::vector host_validity = {true, true, true, false, true, true, true}; + + arrow::Int64Builder builder; + auto status = builder.AppendValues(host_values, host_validity); + auto maybe_array = builder.Finish(); + auto array = *maybe_array; + + auto field = arrow::field("", arrow::int32()); + auto schema = arrow::schema({field}); + auto table = arrow::Table::Make(schema, {array}); + cudf::from_arrow(*table, cudf::test::get_default_stream()); +} + +TEST_F(ArrowTest, ToArrowScalar) +{ + int32_t const value{42}; + auto cudf_scalar = + cudf::make_fixed_width_scalar(value, cudf::test::get_default_stream()); + + cudf::column_metadata metadata{""}; + cudf::to_arrow(*cudf_scalar, metadata, cudf::test::get_default_stream()); +} + +TEST_F(ArrowTest, FromArrowScalar) +{ + int32_t const value{42}; + auto arrow_scalar = arrow::MakeScalar(value); + cudf::from_arrow(*arrow_scalar, cudf::test::get_default_stream()); +} From d67cc5d05a6c18dd832f7b63421296fb66ae56f1 Mon Sep 17 00:00:00 2001 From: MithunR Date: Fri, 22 Sep 2023 22:01:40 -0700 Subject: [PATCH 100/150] Fix assert failure for range window functions (#14168) Authors: - MithunR (https://github.com/mythrocks) - Yunsong Wang (https://github.com/PointKernel) Approvers: - Divye Gala (https://github.com/divyegala) - David Wendt (https://github.com/davidwendt) - Yunsong Wang (https://github.com/PointKernel) URL: https://github.com/rapidsai/cudf/pull/14168 --- cpp/src/rolling/grouped_rolling.cu | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/cpp/src/rolling/grouped_rolling.cu b/cpp/src/rolling/grouped_rolling.cu index 6e69b5157c2..7ac784bef43 100644 --- a/cpp/src/rolling/grouped_rolling.cu +++ b/cpp/src/rolling/grouped_rolling.cu @@ -357,6 +357,16 @@ template struct device_value_accessor { column_device_view const col; ///< column view of column in device + /// Checks that the type used to access device values matches the rep-type + /// of the order-by column. + struct is_correct_range_rep { + template /// Order-by type. + constexpr bool operator()() const + { + return std::is_same_v>; + } + }; + /** * @brief constructor * @@ -364,8 +374,11 @@ struct device_value_accessor { */ explicit __device__ device_value_accessor(column_device_view const& col_) : col{col_} { - cudf_assert(type_id_matches_device_storage_type(col.type().id()) && - "the data type mismatch"); + // For non-timestamp types, T must match the order-by column's type. + // For timestamp types, T must match the range rep type for the order-by column. + cudf_assert((type_id_matches_device_storage_type(col.type().id()) or + cudf::type_dispatcher(col.type(), is_correct_range_rep{})) && + "data type mismatch when accessing the order-by column"); } /** From fe3cab5595337300345573d7e64fa52cba78a6c5 Mon Sep 17 00:00:00 2001 From: Karthikeyan <6488848+karthikeyann@users.noreply.github.com> Date: Mon, 25 Sep 2023 10:15:44 +0530 Subject: [PATCH 101/150] Fix Memcheck error found in JSON_TEST JsonReaderTest.ErrorStrings (#14164) Fix missing null mask in string column names parsing. For parsing error, the row is made null. To write output properly, the nulls need to be passed so that they can be skipped during writing output stage in `parse_data`. Fixes #14141 Authors: - Karthikeyan (https://github.com/karthikeyann) Approvers: - David Wendt (https://github.com/davidwendt) - Elias Stehle (https://github.com/elstehle) - Nghia Truong (https://github.com/ttnghia) URL: https://github.com/rapidsai/cudf/pull/14164 --- cpp/src/io/utilities/data_casting.cu | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cpp/src/io/utilities/data_casting.cu b/cpp/src/io/utilities/data_casting.cu index 1772e5e43fa..d16237d7afe 100644 --- a/cpp/src/io/utilities/data_casting.cu +++ b/cpp/src/io/utilities/data_casting.cu @@ -924,6 +924,9 @@ std::unique_ptr parse_data( if (col_size == 0) { return make_empty_column(col_type); } auto d_null_count = rmm::device_scalar(null_count, stream); auto null_count_data = d_null_count.data(); + if (null_mask.is_empty()) { + null_mask = cudf::detail::create_null_mask(col_size, mask_state::ALL_VALID, stream, mr); + } // Prepare iterator that returns (string_ptr, string_length)-pairs needed by type conversion auto str_tuples = thrust::make_transform_iterator(offset_length_begin, to_string_view_pair{data}); From 3f47b5d463445faa9f95b1cc57c46fb5b41f60a7 Mon Sep 17 00:00:00 2001 From: David Wendt <45795991+davidwendt@users.noreply.github.com> Date: Mon, 25 Sep 2023 11:28:33 -0400 Subject: [PATCH 102/150] Move cpp/src/hash/hash_allocator.cuh to include/cudf/hashing/detail (#14163) Moves `cpp/src/hash/hash_allocator.cuh` to `include/cudf/hashing/detail` so it may be more accessible from non-src/hash source files. Also, found `cpp/src/hash/helper_functions.hpp` used in the same way a moved that one as well. No functional changes, just headers moved and includes fixed up. Reference: https://github.com/rapidsai/cudf/pull/13930#discussion_r1330118935 Closes #14143 Authors: - David Wendt (https://github.com/davidwendt) Approvers: - Yunsong Wang (https://github.com/PointKernel) - Vyas Ramasubramani (https://github.com/vyasr) URL: https://github.com/rapidsai/cudf/pull/14163 --- .../hash => include/cudf/hashing/detail}/hash_allocator.cuh | 0 .../cudf/hashing/detail}/helper_functions.cuh | 0 cpp/src/hash/concurrent_unordered_map.cuh | 4 ++-- cpp/src/hash/unordered_multiset.cuh | 3 +-- cpp/src/io/json/json_tree.cu | 4 ++-- cpp/src/join/join_common_utils.hpp | 5 ++--- cpp/src/stream_compaction/stream_compaction_common.hpp | 5 ++--- cpp/src/text/subword/bpe_tokenizer.cuh | 3 +-- 8 files changed, 10 insertions(+), 14 deletions(-) rename cpp/{src/hash => include/cudf/hashing/detail}/hash_allocator.cuh (100%) rename cpp/{src/hash => include/cudf/hashing/detail}/helper_functions.cuh (100%) diff --git a/cpp/src/hash/hash_allocator.cuh b/cpp/include/cudf/hashing/detail/hash_allocator.cuh similarity index 100% rename from cpp/src/hash/hash_allocator.cuh rename to cpp/include/cudf/hashing/detail/hash_allocator.cuh diff --git a/cpp/src/hash/helper_functions.cuh b/cpp/include/cudf/hashing/detail/helper_functions.cuh similarity index 100% rename from cpp/src/hash/helper_functions.cuh rename to cpp/include/cudf/hashing/detail/helper_functions.cuh diff --git a/cpp/src/hash/concurrent_unordered_map.cuh b/cpp/src/hash/concurrent_unordered_map.cuh index 439b1c2d066..d773c2763df 100644 --- a/cpp/src/hash/concurrent_unordered_map.cuh +++ b/cpp/src/hash/concurrent_unordered_map.cuh @@ -16,12 +16,12 @@ #pragma once -#include -#include #include #include #include +#include +#include #include #include diff --git a/cpp/src/hash/unordered_multiset.cuh b/cpp/src/hash/unordered_multiset.cuh index 87075a39ea3..183042fc0f4 100644 --- a/cpp/src/hash/unordered_multiset.cuh +++ b/cpp/src/hash/unordered_multiset.cuh @@ -16,11 +16,10 @@ #pragma once -#include - #include #include #include +#include #include #include diff --git a/cpp/src/io/json/json_tree.cu b/cpp/src/io/json/json_tree.cu index 9231040eb70..da5b0eedfbd 100644 --- a/cpp/src/io/json/json_tree.cu +++ b/cpp/src/io/json/json_tree.cu @@ -15,8 +15,6 @@ */ #include "nested_json.hpp" -#include -#include #include #include @@ -24,7 +22,9 @@ #include #include #include +#include #include +#include #include #include diff --git a/cpp/src/join/join_common_utils.hpp b/cpp/src/join/join_common_utils.hpp index 4c1b1ed98b1..e96505e5ed6 100644 --- a/cpp/src/join/join_common_utils.hpp +++ b/cpp/src/join/join_common_utils.hpp @@ -17,13 +17,12 @@ #include #include +#include +#include #include #include #include -#include -#include - #include #include diff --git a/cpp/src/stream_compaction/stream_compaction_common.hpp b/cpp/src/stream_compaction/stream_compaction_common.hpp index 58d958d2ff4..18c531e3e69 100644 --- a/cpp/src/stream_compaction/stream_compaction_common.hpp +++ b/cpp/src/stream_compaction/stream_compaction_common.hpp @@ -15,12 +15,11 @@ */ #pragma once +#include +#include #include #include -#include -#include - #include #include diff --git a/cpp/src/text/subword/bpe_tokenizer.cuh b/cpp/src/text/subword/bpe_tokenizer.cuh index 83aa22aaae9..2fa879ea734 100644 --- a/cpp/src/text/subword/bpe_tokenizer.cuh +++ b/cpp/src/text/subword/bpe_tokenizer.cuh @@ -18,10 +18,9 @@ #include -#include - #include #include +#include #include #include From 036c07d363406da9e500c3d6be9a3edca28fd6c2 Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Mon, 25 Sep 2023 06:36:26 -1000 Subject: [PATCH 103/150] Fix DataFrame from Series with different CategoricalIndexes (#14157) closes #14130 Authors: - Matthew Roeschke (https://github.com/mroeschke) Approvers: - GALI PREM SAGAR (https://github.com/galipremsagar) URL: https://github.com/rapidsai/cudf/pull/14157 --- python/cudf/cudf/core/indexed_frame.py | 7 +++++++ python/cudf/cudf/tests/test_dataframe.py | 13 +++++++++++++ 2 files changed, 20 insertions(+) diff --git a/python/cudf/cudf/core/indexed_frame.py b/python/cudf/cudf/core/indexed_frame.py index 62e091b29b5..aacf1fa8dae 100644 --- a/python/cudf/cudf/core/indexed_frame.py +++ b/python/cudf/cudf/core/indexed_frame.py @@ -5438,6 +5438,13 @@ def _is_same_dtype(lhs_dtype, rhs_dtype): # for matching column dtype. if lhs_dtype == rhs_dtype: return True + elif ( + is_categorical_dtype(lhs_dtype) + and is_categorical_dtype(rhs_dtype) + and lhs_dtype.categories.dtype == rhs_dtype.categories.dtype + ): + # OK if categories are not all the same + return True elif ( is_categorical_dtype(lhs_dtype) and not is_categorical_dtype(rhs_dtype) diff --git a/python/cudf/cudf/tests/test_dataframe.py b/python/cudf/cudf/tests/test_dataframe.py index 2f531afdeb7..67b63028fab 100644 --- a/python/cudf/cudf/tests/test_dataframe.py +++ b/python/cudf/cudf/tests/test_dataframe.py @@ -10408,6 +10408,19 @@ def test_dataframe_init_from_nested_dict(): assert_eq(pdf, gdf) +def test_init_from_2_categoricalindex_series_diff_categories(): + s1 = cudf.Series( + [39, 6, 4], index=cudf.CategoricalIndex(["female", "male", "unknown"]) + ) + s2 = cudf.Series( + [2, 152, 2, 242, 150], + index=cudf.CategoricalIndex(["f", "female", "m", "male", "unknown"]), + ) + result = cudf.DataFrame([s1, s2]) + expected = pd.DataFrame([s1.to_pandas(), s2.to_pandas()]) + assert_eq(result, expected, check_dtype=False) + + def test_data_frame_values_no_cols_but_index(): result = cudf.DataFrame(index=range(5)).values expected = pd.DataFrame(index=range(5)).values From ddd2b0dfac0903c5f17d581eca5d6b945ede9451 Mon Sep 17 00:00:00 2001 From: "Richard (Rick) Zamora" Date: Mon, 25 Sep 2023 13:14:18 -0500 Subject: [PATCH 104/150] Allow explicit `shuffle="p2p"` within dask-cudf API (#13893) This PR allows explicit `shuffle="p2p"` usage within the dask-cudf API now that https://github.com/dask/distributed/pull/7743 is in. Authors: - Richard (Rick) Zamora (https://github.com/rjzamora) - Ray Douglass (https://github.com/raydouglass) - gpuCI (https://github.com/GPUtester) - Mike Wendt (https://github.com/mike-wendt) - AJ Schmidt (https://github.com/ajschmidt8) - GALI PREM SAGAR (https://github.com/galipremsagar) - Lawrence Mitchell (https://github.com/wence-) Approvers: - Lawrence Mitchell (https://github.com/wence-) URL: https://github.com/rapidsai/cudf/pull/13893 --- python/dask_cudf/dask_cudf/backends.py | 31 ++++++++++++++++--- python/dask_cudf/dask_cudf/sorting.py | 26 +++++++++++----- .../dask_cudf/tests/test_dispatch.py | 11 +++++-- .../dask_cudf/tests/test_distributed.py | 22 ++++++++++++- 4 files changed, 76 insertions(+), 14 deletions(-) diff --git a/python/dask_cudf/dask_cudf/backends.py b/python/dask_cudf/dask_cudf/backends.py index e3f4f04eb85..344b03c631d 100644 --- a/python/dask_cudf/dask_cudf/backends.py +++ b/python/dask_cudf/dask_cudf/backends.py @@ -373,22 +373,37 @@ def percentile_cudf(a, q, interpolation="linear"): @pyarrow_schema_dispatch.register((cudf.DataFrame,)) -def _get_pyarrow_schema_cudf(obj, preserve_index=True, **kwargs): +def _get_pyarrow_schema_cudf(obj, preserve_index=None, **kwargs): if kwargs: warnings.warn( "Ignoring the following arguments to " f"`pyarrow_schema_dispatch`: {list(kwargs)}" ) - return meta_nonempty(obj).to_arrow(preserve_index=preserve_index).schema + + return _cudf_to_table( + meta_nonempty(obj), preserve_index=preserve_index + ).schema @to_pyarrow_table_dispatch.register(cudf.DataFrame) -def _cudf_to_table(obj, preserve_index=True, **kwargs): +def _cudf_to_table(obj, preserve_index=None, **kwargs): if kwargs: warnings.warn( "Ignoring the following arguments to " f"`to_pyarrow_table_dispatch`: {list(kwargs)}" ) + + # TODO: Remove this logic when cudf#14159 is resolved + # (see: https://github.com/rapidsai/cudf/issues/14159) + if preserve_index and isinstance(obj.index, cudf.RangeIndex): + obj = obj.copy() + obj.index.name = ( + obj.index.name + if obj.index.name is not None + else "__index_level_0__" + ) + obj.index = obj.index._as_int_index() + return obj.to_arrow(preserve_index=preserve_index) @@ -401,7 +416,15 @@ def _table_to_cudf(obj, table, self_destruct=None, **kwargs): f"Ignoring the following arguments to " f"`from_pyarrow_table_dispatch`: {list(kwargs)}" ) - return obj.from_arrow(table) + result = obj.from_arrow(table) + + # TODO: Remove this logic when cudf#14159 is resolved + # (see: https://github.com/rapidsai/cudf/issues/14159) + if "__index_level_0__" in result.index.names: + assert len(result.index.names) == 1 + result.index.name = None + + return result @union_categoricals_dispatch.register((cudf.Series, cudf.BaseIndex)) diff --git a/python/dask_cudf/dask_cudf/sorting.py b/python/dask_cudf/dask_cudf/sorting.py index e841f2d8830..d6c9c1be73c 100644 --- a/python/dask_cudf/dask_cudf/sorting.py +++ b/python/dask_cudf/dask_cudf/sorting.py @@ -6,7 +6,7 @@ import numpy as np import tlz as toolz -import dask +from dask import config from dask.base import tokenize from dask.dataframe import methods from dask.dataframe.core import DataFrame, Index, Series @@ -18,6 +18,8 @@ from cudf.api.types import is_categorical_dtype from cudf.utils.utils import _dask_cudf_nvtx_annotate +_SHUFFLE_SUPPORT = ("tasks", "p2p") # "disk" not supported + @_dask_cudf_nvtx_annotate def set_index_post(df, index_name, drop, column_dtype): @@ -307,15 +309,25 @@ def sort_values( return df4 +def get_default_shuffle_method(): + # Note that `dask.utils.get_default_shuffle_method` + # will return "p2p" by default when a distributed + # client is present. Dask-cudf supports "p2p", but + # will not use it by default (yet) + default = config.get("dataframe.shuffle.method", "tasks") + if default not in _SHUFFLE_SUPPORT: + default = "tasks" + return default + + def _get_shuffle_type(shuffle): # Utility to set the shuffle-kwarg default - # and to validate user-specified options. - # The only supported options is currently "tasks" - shuffle = shuffle or dask.config.get("shuffle", "tasks") - if shuffle != "tasks": + # and to validate user-specified options + shuffle = shuffle or get_default_shuffle_method() + if shuffle not in _SHUFFLE_SUPPORT: raise ValueError( - f"Dask-cudf only supports in-memory shuffling with " - f"'tasks'. Got shuffle={shuffle}" + "Dask-cudf only supports the following shuffle " + f"methods: {_SHUFFLE_SUPPORT}. Got shuffle={shuffle}" ) return shuffle diff --git a/python/dask_cudf/dask_cudf/tests/test_dispatch.py b/python/dask_cudf/dask_cudf/tests/test_dispatch.py index cf49b1df4f4..c64e25fd437 100644 --- a/python/dask_cudf/dask_cudf/tests/test_dispatch.py +++ b/python/dask_cudf/dask_cudf/tests/test_dispatch.py @@ -22,18 +22,25 @@ def test_is_categorical_dispatch(): assert is_categorical_dtype(cudf.Index([1, 2, 3], dtype="category")) -def test_pyarrow_conversion_dispatch(): +@pytest.mark.parametrize("preserve_index", [True, False]) +def test_pyarrow_conversion_dispatch(preserve_index): from dask.dataframe.dispatch import ( from_pyarrow_table_dispatch, to_pyarrow_table_dispatch, ) df1 = cudf.DataFrame(np.random.randn(10, 3), columns=list("abc")) - df2 = from_pyarrow_table_dispatch(df1, to_pyarrow_table_dispatch(df1)) + df2 = from_pyarrow_table_dispatch( + df1, to_pyarrow_table_dispatch(df1, preserve_index=preserve_index) + ) assert type(df1) == type(df2) assert_eq(df1, df2) + # Check that preserve_index does not produce a RangeIndex + if preserve_index: + assert not isinstance(df2.index, cudf.RangeIndex) + @pytest.mark.parametrize("index", [None, [1, 2] * 5]) def test_deterministic_tokenize(index): diff --git a/python/dask_cudf/dask_cudf/tests/test_distributed.py b/python/dask_cudf/dask_cudf/tests/test_distributed.py index e24feaa2ea4..db3f3695648 100644 --- a/python/dask_cudf/dask_cudf/tests/test_distributed.py +++ b/python/dask_cudf/dask_cudf/tests/test_distributed.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2022, NVIDIA CORPORATION. +# Copyright (c) 2020-2023, NVIDIA CORPORATION. import numba.cuda import pytest @@ -77,3 +77,23 @@ def test_str_series_roundtrip(): actual = dask_series.compute() assert_eq(actual, expected) + + +def test_p2p_shuffle(): + # Check that we can use `shuffle="p2p"` + with dask_cuda.LocalCUDACluster(n_workers=1) as cluster: + with Client(cluster): + ddf = ( + dask.datasets.timeseries( + start="2000-01-01", + end="2000-01-08", + dtypes={"x": int}, + ) + .reset_index(drop=True) + .to_backend("cudf") + ) + dd.assert_eq( + ddf.sort_values("x", shuffle="p2p").compute(), + ddf.compute().sort_values("x"), + check_index=False, + ) From 1b925bfc7741eb22fed0a978fa0e1d0d5dfee601 Mon Sep 17 00:00:00 2001 From: Vukasin Milovanovic Date: Mon, 25 Sep 2023 13:09:16 -0700 Subject: [PATCH 105/150] Add Parquet reader benchmarks for row selection (#14147) Re-enabled the group of benchmarks that compares row selection options in Parquet reader. Use `read_parquet_metadata` to get the column names and number of row groups. Clean up read chunk computation for ORC and Parquet benchmarks. Authors: - Vukasin Milovanovic (https://github.com/vuule) Approvers: - https://github.com/nvdbaranec - Nghia Truong (https://github.com/ttnghia) URL: https://github.com/rapidsai/cudf/pull/14147 --- cpp/benchmarks/io/cuio_common.cpp | 18 ++--- cpp/benchmarks/io/orc/orc_reader_options.cpp | 12 ++-- .../io/parquet/parquet_reader_options.cpp | 65 +++++++++++-------- 3 files changed, 53 insertions(+), 42 deletions(-) diff --git a/cpp/benchmarks/io/cuio_common.cpp b/cpp/benchmarks/io/cuio_common.cpp index 6b8af91b842..b1aaef41340 100644 --- a/cpp/benchmarks/io/cuio_common.cpp +++ b/cpp/benchmarks/io/cuio_common.cpp @@ -15,6 +15,7 @@ */ #include +#include #include #include @@ -141,17 +142,18 @@ std::vector select_column_names(std::vector const& col return col_names_to_read; } -std::vector segments_in_chunk(int num_segments, int num_chunks, int chunk) +std::vector segments_in_chunk(int num_segments, int num_chunks, int chunk_idx) { CUDF_EXPECTS(num_segments >= num_chunks, "Number of chunks cannot be greater than the number of segments in the file"); - auto start_segment = [num_segments, num_chunks](int chunk) { - return num_segments * chunk / num_chunks; - }; - std::vector selected_segments; - for (auto segment = start_segment(chunk); segment < start_segment(chunk + 1); ++segment) { - selected_segments.push_back(segment); - } + CUDF_EXPECTS(chunk_idx < num_chunks, + "Chunk index must be smaller than the number of chunks in the file"); + + auto const segments_in_chunk = cudf::util::div_rounding_up_unsafe(num_segments, num_chunks); + auto const begin_segment = std::min(chunk_idx * segments_in_chunk, num_segments); + auto const end_segment = std::min(begin_segment + segments_in_chunk, num_segments); + std::vector selected_segments(end_segment - begin_segment); + std::iota(selected_segments.begin(), selected_segments.end(), begin_segment); return selected_segments; } diff --git a/cpp/benchmarks/io/orc/orc_reader_options.cpp b/cpp/benchmarks/io/orc/orc_reader_options.cpp index 647a411c89d..1f656f7ea70 100644 --- a/cpp/benchmarks/io/orc/orc_reader_options.cpp +++ b/cpp/benchmarks/io/orc/orc_reader_options.cpp @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -30,7 +31,7 @@ constexpr int64_t data_size = 512 << 20; // The number of separate read calls to use when reading files in multiple chunks // Each call reads roughly equal amounts of data -constexpr int32_t chunked_read_num_chunks = 8; +constexpr int32_t chunked_read_num_chunks = 4; std::vector get_top_level_col_names(cudf::io::source_info const& source) { @@ -88,7 +89,7 @@ void BM_orc_read_varying_options(nvbench::state& state, auto const num_stripes = cudf::io::read_orc_metadata(source_sink.make_source_info()).num_stripes(); - cudf::size_type const chunk_row_cnt = view.num_rows() / num_chunks; + auto const chunk_row_cnt = cudf::util::div_rounding_up_unsafe(view.num_rows(), num_chunks); auto mem_stats_logger = cudf::memory_stats_logger(); state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value())); @@ -99,7 +100,6 @@ void BM_orc_read_varying_options(nvbench::state& state, timer.start(); cudf::size_type rows_read = 0; for (int32_t chunk = 0; chunk < num_chunks; ++chunk) { - auto const is_last_chunk = chunk == (num_chunks - 1); switch (RowSelection) { case row_selection::ALL: break; case row_selection::STRIPES: @@ -108,7 +108,6 @@ void BM_orc_read_varying_options(nvbench::state& state, case row_selection::NROWS: read_options.set_skip_rows(chunk * chunk_row_cnt); read_options.set_num_rows(chunk_row_cnt); - if (is_last_chunk) read_options.set_num_rows(-1); break; default: CUDF_FAIL("Unsupported row selection method"); } @@ -132,9 +131,6 @@ using col_selections = nvbench::enum_type_list; -using row_selections = - nvbench::enum_type_list; - NVBENCH_BENCH_TYPES(BM_orc_read_varying_options, NVBENCH_TYPE_AXES(col_selections, nvbench::enum_type_list, @@ -146,6 +142,8 @@ NVBENCH_BENCH_TYPES(BM_orc_read_varying_options, {"column_selection", "row_selection", "uses_index", "uses_numpy_dtype", "timestamp_type"}) .set_min_samples(4); +using row_selections = + nvbench::enum_type_list; NVBENCH_BENCH_TYPES(BM_orc_read_varying_options, NVBENCH_TYPE_AXES(nvbench::enum_type_list, row_selections, diff --git a/cpp/benchmarks/io/parquet/parquet_reader_options.cpp b/cpp/benchmarks/io/parquet/parquet_reader_options.cpp index 4105f2182d7..9f221de7da2 100644 --- a/cpp/benchmarks/io/parquet/parquet_reader_options.cpp +++ b/cpp/benchmarks/io/parquet/parquet_reader_options.cpp @@ -19,6 +19,7 @@ #include #include +#include #include #include @@ -26,21 +27,21 @@ // Size of the data in the benchmark dataframe; chosen to be low enough to allow benchmarks to // run on most GPUs, but large enough to allow highest throughput -constexpr std::size_t data_size = 512 << 20; -constexpr std::size_t row_group_size = 128 << 20; +constexpr std::size_t data_size = 512 << 20; +// The number of separate read calls to use when reading files in multiple chunks +// Each call reads roughly equal amounts of data +constexpr int32_t chunked_read_num_chunks = 4; std::vector get_top_level_col_names(cudf::io::source_info const& source) { - cudf::io::parquet_reader_options const read_options = - cudf::io::parquet_reader_options::builder(source); - auto const schema = cudf::io::read_parquet(read_options).metadata.schema_info; - - std::vector names; - names.reserve(schema.size()); - std::transform(schema.cbegin(), schema.cend(), std::back_inserter(names), [](auto const& c) { - return c.name; - }); - return names; + auto const top_lvl_cols = cudf::io::read_parquet_metadata(source).schema().root().children(); + std::vector col_names; + std::transform(top_lvl_cols.cbegin(), + top_lvl_cols.cend(), + std::back_inserter(col_names), + [](auto const& col_meta) { return col_meta.name(); }); + + return col_names; } template , nvbench::enum_type>) { + auto const num_chunks = RowSelection == row_selection::ALL ? 1 : chunked_read_num_chunks; + auto constexpr str_to_categories = ConvertsStrings == converts_strings::YES; auto constexpr uses_pd_metadata = UsesPandasMetadata == uses_pandas_metadata::YES; @@ -87,9 +90,8 @@ void BM_parquet_read_options(nvbench::state& state, .use_pandas_metadata(uses_pd_metadata) .timestamp_type(ts_type); - // TODO: add read_parquet_metadata to properly calculate #row_groups - auto constexpr num_row_groups = data_size / row_group_size; - auto constexpr num_chunks = 1; + auto const num_row_groups = read_parquet_metadata(source_sink.make_source_info()).num_rowgroups(); + auto const chunk_row_cnt = cudf::util::div_rounding_up_unsafe(view.num_rows(), num_chunks); auto mem_stats_logger = cudf::memory_stats_logger(); state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value())); @@ -100,18 +102,15 @@ void BM_parquet_read_options(nvbench::state& state, timer.start(); cudf::size_type rows_read = 0; for (int32_t chunk = 0; chunk < num_chunks; ++chunk) { - auto const is_last_chunk = chunk == (num_chunks - 1); switch (RowSelection) { case row_selection::ALL: break; case row_selection::ROW_GROUPS: { - auto row_groups_to_read = segments_in_chunk(num_row_groups, num_chunks, chunk); - if (is_last_chunk) { - // Need to assume that an additional "overflow" row group is present - row_groups_to_read.push_back(num_row_groups); - } - read_options.set_row_groups({row_groups_to_read}); + read_options.set_row_groups({segments_in_chunk(num_row_groups, num_chunks, chunk)}); } break; - case row_selection::NROWS: [[fallthrough]]; + case row_selection::NROWS: + read_options.set_skip_rows(chunk * chunk_row_cnt); + read_options.set_num_rows(chunk_row_cnt); + break; default: CUDF_FAIL("Unsupported row selection method"); } @@ -130,14 +129,26 @@ void BM_parquet_read_options(nvbench::state& state, state.add_buffer_size(source_sink.size(), "encoded_file_size", "encoded_file_size"); } +using row_selections = + nvbench::enum_type_list; +NVBENCH_BENCH_TYPES(BM_parquet_read_options, + NVBENCH_TYPE_AXES(nvbench::enum_type_list, + row_selections, + nvbench::enum_type_list, + nvbench::enum_type_list, + nvbench::enum_type_list)) + .set_name("parquet_read_row_selection") + .set_type_axes_names({"column_selection", + "row_selection", + "str_to_categories", + "uses_pandas_metadata", + "timestamp_type"}) + .set_min_samples(4); + using col_selections = nvbench::enum_type_list; - -// TODO: row_selection::ROW_GROUPS disabled until we add an API to read metadata from a parquet file -// and determine num row groups. https://github.com/rapidsai/cudf/pull/9963#issuecomment-1004832863 - NVBENCH_BENCH_TYPES(BM_parquet_read_options, NVBENCH_TYPE_AXES(col_selections, nvbench::enum_type_list, From f3402c402c2d0be54a6f2060e1bd74e284c1e687 Mon Sep 17 00:00:00 2001 From: Suraj Aralihalli Date: Mon, 25 Sep 2023 14:10:44 -0700 Subject: [PATCH 106/150] Add stream parameter to external dict APIs (#14115) This PR adds stream parameter to public dictionary APIs, which include: 1. `cudf::dictionary::encode` 2. `cudf::dictionary::decode` 3. `cudf::dictionary::get_index` 4. `cudf::dictionary::add_keys` 5. `cudf::dictionary::remove_keys` 6. `cudf::dictionary::remove_unused_keys` 7. `cudf::dictionary::set_keys` 8. `cudf::dictionary::match_dictionaries` Reference [13744](https://github.com/rapidsai/cudf/issues/13744) Authors: - Suraj Aralihalli (https://github.com/SurajAralihalli) - Yunsong Wang (https://github.com/PointKernel) Approvers: - Vyas Ramasubramani (https://github.com/vyasr) URL: https://github.com/rapidsai/cudf/pull/14115 --- cpp/include/cudf/dictionary/encode.hpp | 6 +- cpp/include/cudf/dictionary/search.hpp | 6 +- cpp/include/cudf/dictionary/update_keys.hpp | 16 ++- cpp/include/cudf_test/column_wrapper.hpp | 18 +++- cpp/src/dictionary/add_keys.cu | 3 +- cpp/src/dictionary/decode.cu | 5 +- cpp/src/dictionary/encode.cu | 5 +- cpp/src/dictionary/remove_keys.cu | 6 +- cpp/src/dictionary/search.cu | 11 +- cpp/src/dictionary/set_keys.cu | 9 +- cpp/tests/CMakeLists.txt | 1 + cpp/tests/streams/dictionary_test.cpp | 105 ++++++++++++++++++++ 12 files changed, 164 insertions(+), 27 deletions(-) create mode 100644 cpp/tests/streams/dictionary_test.cpp diff --git a/cpp/include/cudf/dictionary/encode.hpp b/cpp/include/cudf/dictionary/encode.hpp index fb13eabe11a..959b785bf87 100644 --- a/cpp/include/cudf/dictionary/encode.hpp +++ b/cpp/include/cudf/dictionary/encode.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -53,12 +53,14 @@ namespace dictionary { * * @param column The column to dictionary encode * @param indices_type The integer type to use for the indices + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return Returns a dictionary column */ std::unique_ptr encode( column_view const& column, data_type indices_type = data_type{type_id::UINT32}, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -72,11 +74,13 @@ std::unique_ptr encode( * @endcode * * @param dictionary_column Existing dictionary column + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return New column with type matching the dictionary_column's keys */ std::unique_ptr decode( dictionary_column_view const& dictionary_column, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group diff --git a/cpp/include/cudf/dictionary/search.hpp b/cpp/include/cudf/dictionary/search.hpp index ed7a9c84693..1b72cf42acd 100644 --- a/cpp/include/cudf/dictionary/search.hpp +++ b/cpp/include/cudf/dictionary/search.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -37,12 +37,14 @@ namespace dictionary { * * @param dictionary The dictionary to search for the key. * @param key The value to search for in the dictionary keyset. + * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource used to allocate the returned scalar's device memory. - * @return Numeric scalar index value of the key within the dictionary + * @return Numeric scalar index value of the key within the dictionary. */ std::unique_ptr get_index( dictionary_column_view const& dictionary, scalar const& key, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group diff --git a/cpp/include/cudf/dictionary/update_keys.hpp b/cpp/include/cudf/dictionary/update_keys.hpp index 2fcfb5e1f7c..81728e1ff73 100644 --- a/cpp/include/cudf/dictionary/update_keys.hpp +++ b/cpp/include/cudf/dictionary/update_keys.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -51,13 +51,15 @@ namespace dictionary { * @throw cudf_logic_error if the new_keys contain nulls. * * @param dictionary_column Existing dictionary column. - * @param new_keys New keys to incorporate into the dictionary_column + * @param new_keys New keys to incorporate into the dictionary_column. + * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource used to allocate the returned column's device memory. * @return New dictionary column. */ std::unique_ptr add_keys( dictionary_column_view const& dictionary_column, column_view const& new_keys, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -81,13 +83,15 @@ std::unique_ptr add_keys( * @throw cudf_logic_error if the keys_to_remove contain nulls. * * @param dictionary_column Existing dictionary column. - * @param keys_to_remove The keys to remove from the dictionary_column + * @param keys_to_remove The keys to remove from the dictionary_column. + * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource used to allocate the returned column's device memory. * @return New dictionary column. */ std::unique_ptr remove_keys( dictionary_column_view const& dictionary_column, column_view const& keys_to_remove, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -103,11 +107,13 @@ std::unique_ptr remove_keys( * @endcode * * @param dictionary_column Existing dictionary column. + * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource used to allocate the returned column's device memory. * @return New dictionary column. */ std::unique_ptr remove_unused_keys( dictionary_column_view const& dictionary_column, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -134,12 +140,14 @@ std::unique_ptr remove_unused_keys( * * @param dictionary_column Existing dictionary column. * @param keys New keys to use for the output column. Must not contain nulls. + * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource used to allocate the returned column's device memory. * @return New dictionary column. */ std::unique_ptr set_keys( dictionary_column_view const& dictionary_column, column_view const& keys, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -149,11 +157,13 @@ std::unique_ptr set_keys( * The result is a vector of new dictionaries with a common set of keys. * * @param input Dictionary columns to match keys. + * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource used to allocate the returned column's device memory. * @return New dictionary columns. */ std::vector> match_dictionaries( cudf::host_span input, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group diff --git a/cpp/include/cudf_test/column_wrapper.hpp b/cpp/include/cudf_test/column_wrapper.hpp index cc8cac35ef4..c0932b81dc3 100644 --- a/cpp/include/cudf_test/column_wrapper.hpp +++ b/cpp/include/cudf_test/column_wrapper.hpp @@ -944,8 +944,10 @@ class dictionary_column_wrapper : public detail::column_wrapper { template dictionary_column_wrapper(InputIterator begin, InputIterator end) : column_wrapper{} { - wrapped = cudf::dictionary::encode( - fixed_width_column_wrapper(begin, end)); + wrapped = + cudf::dictionary::encode(fixed_width_column_wrapper(begin, end), + cudf::data_type{type_id::UINT32}, + cudf::test::get_default_stream()); } /** @@ -978,7 +980,9 @@ class dictionary_column_wrapper : public detail::column_wrapper { : column_wrapper{} { wrapped = cudf::dictionary::encode( - fixed_width_column_wrapper(begin, end, v)); + fixed_width_column_wrapper(begin, end, v), + cudf::data_type{type_id::UINT32}, + cudf::test::get_default_stream()); } /** @@ -1134,7 +1138,9 @@ class dictionary_column_wrapper : public detail::column_wrapper { template dictionary_column_wrapper(StringsIterator begin, StringsIterator end) : column_wrapper{} { - wrapped = cudf::dictionary::encode(strings_column_wrapper(begin, end)); + wrapped = cudf::dictionary::encode(strings_column_wrapper(begin, end), + cudf::data_type{type_id::UINT32}, + cudf::test::get_default_stream()); } /** @@ -1169,7 +1175,9 @@ class dictionary_column_wrapper : public detail::column_wrapper { dictionary_column_wrapper(StringsIterator begin, StringsIterator end, ValidityIterator v) : column_wrapper{} { - wrapped = cudf::dictionary::encode(strings_column_wrapper(begin, end, v)); + wrapped = cudf::dictionary::encode(strings_column_wrapper(begin, end, v), + cudf::data_type{type_id::UINT32}, + cudf::test::get_default_stream()); } /** diff --git a/cpp/src/dictionary/add_keys.cu b/cpp/src/dictionary/add_keys.cu index ab22c07e4d5..3973100aced 100644 --- a/cpp/src/dictionary/add_keys.cu +++ b/cpp/src/dictionary/add_keys.cu @@ -130,10 +130,11 @@ std::unique_ptr add_keys(dictionary_column_view const& dictionary_column std::unique_ptr add_keys(dictionary_column_view const& dictionary_column, column_view const& keys, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::add_keys(dictionary_column, keys, cudf::get_default_stream(), mr); + return detail::add_keys(dictionary_column, keys, stream, mr); } } // namespace dictionary diff --git a/cpp/src/dictionary/decode.cu b/cpp/src/dictionary/decode.cu index 01411d06b62..fdf546b5875 100644 --- a/cpp/src/dictionary/decode.cu +++ b/cpp/src/dictionary/decode.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -65,10 +65,11 @@ std::unique_ptr decode(dictionary_column_view const& source, } // namespace detail std::unique_ptr decode(dictionary_column_view const& source, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::decode(source, cudf::get_default_stream(), mr); + return detail::decode(source, stream, mr); } } // namespace dictionary diff --git a/cpp/src/dictionary/encode.cu b/cpp/src/dictionary/encode.cu index fe8e777b694..c92b57f0cac 100644 --- a/cpp/src/dictionary/encode.cu +++ b/cpp/src/dictionary/encode.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -89,10 +89,11 @@ data_type get_indices_type_for_size(size_type keys_size) std::unique_ptr encode(column_view const& input_column, data_type indices_type, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::encode(input_column, indices_type, cudf::get_default_stream(), mr); + return detail::encode(input_column, indices_type, stream, mr); } } // namespace dictionary diff --git a/cpp/src/dictionary/remove_keys.cu b/cpp/src/dictionary/remove_keys.cu index 9fe4a63373b..86b70f1119b 100644 --- a/cpp/src/dictionary/remove_keys.cu +++ b/cpp/src/dictionary/remove_keys.cu @@ -195,17 +195,19 @@ std::unique_ptr remove_unused_keys(dictionary_column_view const& diction std::unique_ptr remove_keys(dictionary_column_view const& dictionary_column, column_view const& keys_to_remove, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::remove_keys(dictionary_column, keys_to_remove, cudf::get_default_stream(), mr); + return detail::remove_keys(dictionary_column, keys_to_remove, stream, mr); } std::unique_ptr remove_unused_keys(dictionary_column_view const& dictionary_column, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::remove_unused_keys(dictionary_column, cudf::get_default_stream(), mr); + return detail::remove_unused_keys(dictionary_column, stream, mr); } } // namespace dictionary diff --git a/cpp/src/dictionary/search.cu b/cpp/src/dictionary/search.cu index 8e97a387780..e35aded1984 100644 --- a/cpp/src/dictionary/search.cu +++ b/cpp/src/dictionary/search.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -79,10 +79,8 @@ struct find_index_fn { using ScalarType = cudf::scalar_type_t; auto find_key = static_cast(key).value(stream); auto keys_view = column_device_view::create(input.keys(), stream); - auto iter = thrust::equal_range(rmm::exec_policy(cudf::get_default_stream()), - keys_view->begin(), - keys_view->end(), - find_key); + auto iter = thrust::equal_range( + rmm::exec_policy(stream), keys_view->begin(), keys_view->end(), find_key); return type_dispatcher(input.indices().type(), dispatch_scalar_index{}, thrust::distance(keys_view->begin(), iter.first), @@ -176,10 +174,11 @@ std::unique_ptr get_insert_index(dictionary_column_view const& dictionar std::unique_ptr get_index(dictionary_column_view const& dictionary, scalar const& key, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::get_index(dictionary, key, cudf::get_default_stream(), mr); + return detail::get_index(dictionary, key, stream, mr); } } // namespace dictionary diff --git a/cpp/src/dictionary/set_keys.cu b/cpp/src/dictionary/set_keys.cu index 36f5021d305..b49cf7850b1 100644 --- a/cpp/src/dictionary/set_keys.cu +++ b/cpp/src/dictionary/set_keys.cu @@ -241,17 +241,20 @@ std::pair>, std::vector> match_d std::unique_ptr set_keys(dictionary_column_view const& dictionary_column, column_view const& keys, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::set_keys(dictionary_column, keys, cudf::get_default_stream(), mr); + return detail::set_keys(dictionary_column, keys, stream, mr); } std::vector> match_dictionaries( - cudf::host_span input, rmm::mr::device_memory_resource* mr) + cudf::host_span input, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::match_dictionaries(input, cudf::get_default_stream(), mr); + return detail::match_dictionaries(input, stream, mr); } } // namespace dictionary diff --git a/cpp/tests/CMakeLists.txt b/cpp/tests/CMakeLists.txt index 956bfc7c27d..68ff6c54c99 100644 --- a/cpp/tests/CMakeLists.txt +++ b/cpp/tests/CMakeLists.txt @@ -629,6 +629,7 @@ ConfigureTest(STREAM_HASHING_TEST streams/hash_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_INTEROP_TEST streams/interop_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_REPLACE_TEST streams/replace_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_SEARCH_TEST streams/search_test.cpp STREAM_MODE testing) +ConfigureTest(STREAM_DICTIONARY_TEST streams/dictionary_test.cpp STREAM_MODE testing) ConfigureTest( STREAM_STRINGS_TEST streams/strings/case_test.cpp streams/strings/find_test.cpp STREAM_MODE testing diff --git a/cpp/tests/streams/dictionary_test.cpp b/cpp/tests/streams/dictionary_test.cpp new file mode 100644 index 00000000000..f48e64c078e --- /dev/null +++ b/cpp/tests/streams/dictionary_test.cpp @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +class DictionaryTest : public cudf::test::BaseFixture {}; + +TEST_F(DictionaryTest, Encode) +{ + cudf::test::fixed_width_column_wrapper col({1, 2, 3, 4, 5}); + cudf::data_type int32_type(cudf::type_id::UINT32); + cudf::column_view col_view = col; + cudf::dictionary::encode(col_view, int32_type, cudf::test::get_default_stream()); +} + +TEST_F(DictionaryTest, Decode) +{ + // keys = {0, 2, 6}, indices = {0, 1, 1, 2, 2} + std::vector elements{0, 2, 2, 6, 6}; + cudf::test::dictionary_column_wrapper dict_col(elements.begin(), elements.end()); + cudf::dictionary_column_view dict_col_view = dict_col; + cudf::dictionary::decode(dict_col_view, cudf::test::get_default_stream()); +} + +TEST_F(DictionaryTest, GetIndex) +{ + std::vector elements{0, 2, 2, 6, 6}; + cudf::test::dictionary_column_wrapper dict_col(elements.begin(), elements.end()); + cudf::dictionary_column_view dict_col_view = dict_col; + cudf::numeric_scalar key_scalar(2, true, cudf::test::get_default_stream()); + cudf::dictionary::get_index(dict_col_view, key_scalar, cudf::test::get_default_stream()); +} + +TEST_F(DictionaryTest, AddKeys) +{ + std::vector elements{0, 2, 2, 6, 6}; + cudf::test::dictionary_column_wrapper dict_col(elements.begin(), elements.end()); + cudf::dictionary_column_view dict_col_view = dict_col; + cudf::test::fixed_width_column_wrapper new_keys_col({8, 9}); + cudf::dictionary::add_keys(dict_col_view, new_keys_col, cudf::test::get_default_stream()); +} + +TEST_F(DictionaryTest, RemoveKeys) +{ + std::vector elements{0, 2, 2, 6, 6}; + cudf::test::dictionary_column_wrapper dict_col(elements.begin(), elements.end()); + cudf::dictionary_column_view dict_col_view = dict_col; + cudf::test::fixed_width_column_wrapper keys_to_remove_col({2}); + cudf::dictionary::remove_keys( + dict_col_view, keys_to_remove_col, cudf::test::get_default_stream()); +} + +TEST_F(DictionaryTest, RemoveUnsedKeys) +{ + std::vector elements{0, 2, 2, 6, 6}; + cudf::test::dictionary_column_wrapper dict_col(elements.begin(), elements.end()); + cudf::dictionary_column_view dict_col_view = dict_col; + cudf::dictionary::remove_unused_keys(dict_col_view, cudf::test::get_default_stream()); +} + +TEST_F(DictionaryTest, SetKeys) +{ + std::vector elements{0, 2, 2, 6, 6}; + cudf::test::dictionary_column_wrapper dict_col(elements.begin(), elements.end()); + cudf::dictionary_column_view dict_col_view = dict_col; + cudf::test::fixed_width_column_wrapper keys_col({2, 6}); + cudf::dictionary::set_keys(dict_col_view, keys_col, cudf::test::get_default_stream()); +} + +TEST_F(DictionaryTest, MatchDictionaries) +{ + std::vector elements_a{0, 2, 2, 6, 6}; + cudf::test::dictionary_column_wrapper dict_col_a(elements_a.begin(), elements_a.end()); + cudf::dictionary_column_view dict_col_view_a = dict_col_a; + + std::vector elements_b{1, 3, 4, 5, 5}; + cudf::test::dictionary_column_wrapper dict_col_b(elements_b.begin(), elements_b.end()); + cudf::dictionary_column_view dict_col_view_b = dict_col_b; + + std::vector dicts = {dict_col_view_a, dict_col_view_b}; + + cudf::test::fixed_width_column_wrapper keys_col({2, 6}); + cudf::dictionary::match_dictionaries(dicts, cudf::test::get_default_stream()); +} From 2e1a17d6519ea018921e35075306e01b4fdddf72 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Mon, 25 Sep 2023 15:53:55 -0700 Subject: [PATCH 107/150] Replace Python scalar conversions with libcudf (#14124) This PR replaces the various Cython converters for different libcudf scalar types by using the new libcudf `[to|from]_arrow` overloads for scalars introduced in #14121. This change dramatically simplifies the Cython code and paves the way for implementation of a pylibcudf.Scalar object. Authors: - Vyas Ramasubramani (https://github.com/vyasr) Approvers: - https://github.com/brandon-b-miller URL: https://github.com/rapidsai/cudf/pull/14124 --- python/cudf/cudf/_lib/cpp/interop.pxd | 11 +- python/cudf/cudf/_lib/interop.pyx | 95 +++++- python/cudf/cudf/_lib/scalar.pyx | 448 +++++--------------------- python/cudf/cudf/tests/test_list.py | 4 +- python/cudf/cudf/tests/test_struct.py | 35 +- python/cudf/cudf/utils/dtypes.py | 18 -- 6 files changed, 210 insertions(+), 401 deletions(-) diff --git a/python/cudf/cudf/_lib/cpp/interop.pxd b/python/cudf/cudf/_lib/cpp/interop.pxd index e81f0d617fb..88e9d83ee98 100644 --- a/python/cudf/cudf/_lib/cpp/interop.pxd +++ b/python/cudf/cudf/_lib/cpp/interop.pxd @@ -1,12 +1,13 @@ -# Copyright (c) 2020, NVIDIA CORPORATION. +# Copyright (c) 2020-2023, NVIDIA CORPORATION. from libcpp.memory cimport shared_ptr, unique_ptr from libcpp.string cimport string from libcpp.vector cimport vector -from pyarrow.lib cimport CTable +from pyarrow.lib cimport CScalar, CTable from cudf._lib.types import cudf_to_np_types, np_to_cudf_types +from cudf._lib.cpp.scalar.scalar cimport scalar from cudf._lib.cpp.table.table cimport table from cudf._lib.cpp.table.table_view cimport table_view @@ -24,6 +25,7 @@ cdef extern from "cudf/interop.hpp" namespace "cudf" \ ) except + cdef unique_ptr[table] from_arrow(CTable input) except + + cdef unique_ptr[scalar] from_arrow(CScalar input) except + cdef cppclass column_metadata: column_metadata() except + @@ -35,3 +37,8 @@ cdef extern from "cudf/interop.hpp" namespace "cudf" \ table_view input, vector[column_metadata] metadata, ) except + + + cdef shared_ptr[CScalar] to_arrow( + const scalar& input, + column_metadata metadata, + ) except + diff --git a/python/cudf/cudf/_lib/interop.pyx b/python/cudf/cudf/_lib/interop.pyx index 8fd2a409d90..639754fc54f 100644 --- a/python/cudf/cudf/_lib/interop.pyx +++ b/python/cudf/cudf/_lib/interop.pyx @@ -4,7 +4,14 @@ from cpython cimport pycapsule from libcpp.memory cimport shared_ptr, unique_ptr from libcpp.utility cimport move from libcpp.vector cimport vector -from pyarrow.lib cimport CTable, pyarrow_unwrap_table, pyarrow_wrap_table +from pyarrow.lib cimport ( + CScalar, + CTable, + pyarrow_unwrap_scalar, + pyarrow_unwrap_table, + pyarrow_wrap_scalar, + pyarrow_wrap_table, +) from cudf._lib.cpp.interop cimport ( DLManagedTensor, @@ -14,12 +21,22 @@ from cudf._lib.cpp.interop cimport ( to_arrow as cpp_to_arrow, to_dlpack as cpp_to_dlpack, ) +from cudf._lib.cpp.scalar.scalar cimport fixed_point_scalar, scalar from cudf._lib.cpp.table.table cimport table from cudf._lib.cpp.table.table_view cimport table_view +from cudf._lib.cpp.types cimport type_id +from cudf._lib.cpp.wrappers.decimals cimport ( + decimal32, + decimal64, + decimal128, + scale_type, +) +from cudf._lib.scalar cimport DeviceScalar from cudf._lib.utils cimport columns_from_unique_ptr, table_view_from_columns from cudf.api.types import is_list_dtype, is_struct_dtype from cudf.core.buffer import acquire_spill_lock +from cudf.core.dtypes import Decimal32Dtype, Decimal64Dtype def from_dlpack(dlpack_capsule): @@ -182,3 +199,79 @@ def from_arrow(object input_table): c_result = move(cpp_from_arrow(cpp_arrow_table.get()[0])) return columns_from_unique_ptr(move(c_result)) + + +@acquire_spill_lock() +def to_arrow_scalar(DeviceScalar source_scalar): + """Convert a scalar to a PyArrow scalar. + + Parameters + ---------- + source_scalar : the scalar to convert + + Returns + ------- + pyarrow.lib.Scalar + """ + cdef vector[column_metadata] cpp_metadata = gather_metadata( + [("", source_scalar.dtype)] + ) + cdef const scalar* source_scalar_ptr = source_scalar.get_raw_ptr() + + cdef shared_ptr[CScalar] cpp_arrow_scalar + with nogil: + cpp_arrow_scalar = cpp_to_arrow( + source_scalar_ptr[0], cpp_metadata[0] + ) + + return pyarrow_wrap_scalar(cpp_arrow_scalar) + + +@acquire_spill_lock() +def from_arrow_scalar(object input_scalar, output_dtype=None): + """Convert from PyArrow scalar to a cudf scalar. + + Parameters + ---------- + input_scalar : PyArrow scalar + output_dtype : output type to cast to, ignored except for decimals + + Returns + ------- + cudf._lib.DeviceScalar + """ + cdef shared_ptr[CScalar] cpp_arrow_scalar = ( + pyarrow_unwrap_scalar(input_scalar) + ) + cdef unique_ptr[scalar] c_result + + with nogil: + c_result = move(cpp_from_arrow(cpp_arrow_scalar.get()[0])) + + cdef type_id ctype = c_result.get().type().id() + if ctype == type_id.DECIMAL128: + if output_dtype is None: + # Decimals must be cast to the cudf dtype of the right width + raise ValueError( + "Decimal scalars must be constructed with a dtype" + ) + + if isinstance(output_dtype, Decimal32Dtype): + c_result.reset( + new fixed_point_scalar[decimal32]( + ( c_result.get()).value(), + scale_type(-input_scalar.type.scale), + c_result.get().is_valid() + ) + ) + elif isinstance(output_dtype, Decimal64Dtype): + c_result.reset( + new fixed_point_scalar[decimal64]( + ( c_result.get()).value(), + scale_type(-input_scalar.type.scale), + c_result.get().is_valid() + ) + ) + # Decimal128Dtype is a no-op, no conversion needed. + + return DeviceScalar.from_unique_ptr(move(c_result), output_dtype) diff --git a/python/cudf/cudf/_lib/scalar.pyx b/python/cudf/cudf/_lib/scalar.pyx index 0407785b2d8..5ab286c5701 100644 --- a/python/cudf/cudf/_lib/scalar.pyx +++ b/python/cudf/cudf/_lib/scalar.pyx @@ -2,22 +2,13 @@ cimport cython -import decimal +import copy import numpy as np import pandas as pd import pyarrow as pa -from libc.stdint cimport ( - int8_t, - int16_t, - int32_t, - int64_t, - uint8_t, - uint16_t, - uint32_t, - uint64_t, -) +from libc.stdint cimport int64_t from libcpp cimport bool from libcpp.memory cimport unique_ptr from libcpp.utility cimport move @@ -25,38 +16,22 @@ from libcpp.utility cimport move from rmm._lib.memory_resource cimport get_current_device_resource import cudf -from cudf._lib.types import ( - LIBCUDF_TO_SUPPORTED_NUMPY_TYPES, - datetime_unit_map, - duration_unit_map, -) +from cudf._lib.types import LIBCUDF_TO_SUPPORTED_NUMPY_TYPES from cudf.core.dtypes import ListDtype, StructDtype from cudf.core.missing import NA, NaT -from cudf._lib.column cimport Column -from cudf._lib.cpp.column.column_view cimport column_view -from cudf._lib.cpp.table.table_view cimport table_view from cudf._lib.types cimport dtype_from_column_view, underlying_type_t_type_id -from cudf._lib.interop import from_arrow, to_arrow +from cudf._lib.interop import from_arrow_scalar, to_arrow_scalar cimport cudf._lib.cpp.types as libcudf_types from cudf._lib.cpp.scalar.scalar cimport ( duration_scalar, - fixed_point_scalar, list_scalar, - numeric_scalar, scalar, - string_scalar, struct_scalar, timestamp_scalar, ) -from cudf._lib.cpp.wrappers.decimals cimport ( - decimal32, - decimal64, - decimal128, - scale_type, -) from cudf._lib.cpp.wrappers.durations cimport ( duration_ms, duration_ns, @@ -69,7 +44,21 @@ from cudf._lib.cpp.wrappers.timestamps cimport ( timestamp_s, timestamp_us, ) -from cudf._lib.utils cimport columns_from_table_view, table_view_from_columns + + +def _replace_nested(obj, check, replacement): + if isinstance(obj, list): + for i, item in enumerate(obj): + if check(item): + obj[i] = replacement + elif isinstance(item, (dict, list)): + _replace_nested(item, check, replacement) + elif isinstance(obj, dict): + for k, v in obj.items(): + if check(v): + obj[k] = replacement + elif isinstance(v, (dict, list)): + _replace_nested(v, check, replacement) # The DeviceMemoryResource attribute could be released prematurely @@ -97,61 +86,61 @@ cdef class DeviceScalar: A NumPy dtype. """ self._dtype = dtype if dtype.kind != 'U' else cudf.dtype('object') - self._set_value(value, self._dtype) - - def _set_value(self, value, dtype): - # IMPORTANT: this should only ever be called from __init__ - valid = not _is_null_host_scalar(value) - - if isinstance(dtype, cudf.core.dtypes.DecimalDtype): - _set_decimal_from_scalar( - self.c_value, value, dtype, valid) - elif isinstance(dtype, cudf.ListDtype): - _set_list_from_pylist( - self.c_value, value, dtype, valid) - elif isinstance(dtype, cudf.StructDtype): - _set_struct_from_pydict(self.c_value, value, dtype, valid) + + if cudf.utils.utils.is_na_like(value): + value = None + else: + # TODO: For now we always deepcopy the input value to avoid + # overwriting the input values when replacing nulls. Since it's + # just host values it's not that expensive, but we could consider + # alternatives. + value = copy.deepcopy(value) + _replace_nested(value, cudf.utils.utils.is_na_like, None) + + if isinstance(dtype, cudf.core.dtypes._BaseDtype): + pa_type = dtype.to_arrow() elif pd.api.types.is_string_dtype(dtype): - _set_string_from_np_string(self.c_value, value, valid) - elif pd.api.types.is_numeric_dtype(dtype): - _set_numeric_from_np_scalar(self.c_value, - value, - dtype, - valid) - elif pd.api.types.is_datetime64_dtype(dtype): - _set_datetime64_from_np_scalar( - self.c_value, value, dtype, valid - ) - elif pd.api.types.is_timedelta64_dtype(dtype): - _set_timedelta64_from_np_scalar( - self.c_value, value, dtype, valid - ) + # Have to manually convert object types, which we use internally + # for strings but pyarrow only supports as unicode 'U' + pa_type = pa.string() else: - raise ValueError( - f"Cannot convert value of type " - f"{type(value).__name__} to cudf scalar" - ) + pa_type = pa.from_numpy_dtype(dtype) + + pa_scalar = pa.scalar(value, type=pa_type) + + # Note: This factory-like behavior in __init__ will be removed when + # migrating to pylibcudf. + cdef DeviceScalar obj = from_arrow_scalar(pa_scalar, self._dtype) + self.c_value.swap(obj.c_value) def _to_host_scalar(self): - if isinstance(self.dtype, cudf.core.dtypes.DecimalDtype): - result = _get_py_decimal_from_fixed_point(self.c_value) - elif cudf.api.types.is_struct_dtype(self.dtype): - result = _get_py_dict_from_struct(self.c_value, self.dtype) - elif cudf.api.types.is_list_dtype(self.dtype): - result = _get_py_list_from_list(self.c_value, self.dtype) - elif pd.api.types.is_string_dtype(self.dtype): - result = _get_py_string_from_string(self.c_value) - elif pd.api.types.is_numeric_dtype(self.dtype): - result = _get_np_scalar_from_numeric(self.c_value) - elif pd.api.types.is_datetime64_dtype(self.dtype): - result = _get_np_scalar_from_timestamp64(self.c_value) - elif pd.api.types.is_timedelta64_dtype(self.dtype): - result = _get_np_scalar_from_timedelta64(self.c_value) + is_datetime = self.dtype.kind == "M" + is_timedelta = self.dtype.kind == "m" + + null_type = NaT if is_datetime or is_timedelta else NA + + ps = to_arrow_scalar(self) + if not ps.is_valid: + return null_type + + # TODO: The special handling of specific types below does not currently + # extend to nested types containing those types (e.g. List[timedelta] + # where the timedelta would overflow). We should eventually account for + # those cases, but that will require more careful consideration of how + # to traverse the contents of the nested data. + if is_datetime or is_timedelta: + time_unit, _ = np.datetime_data(self.dtype) + # Cast to int64 to avoid overflow + ps_cast = ps.cast('int64').as_py() + out_type = np.datetime64 if is_datetime else np.timedelta64 + ret = out_type(ps_cast, time_unit) + elif cudf.api.types.is_numeric_dtype(self.dtype): + ret = ps.type.to_pandas_dtype()(ps.as_py()) else: - raise ValueError( - "Could not convert cudf::scalar to a Python value" - ) - return result + ret = ps.as_py() + + _replace_nested(ret, lambda item: item is None, NA) + return ret @property def dtype(self): @@ -236,42 +225,9 @@ cdef class DeviceScalar: return s -cdef _set_string_from_np_string(unique_ptr[scalar]& s, value, bool valid=True): - value = value if valid else "" - s.reset(new string_scalar(value.encode(), valid)) - - -cdef _set_numeric_from_np_scalar(unique_ptr[scalar]& s, - object value, - object dtype, - bool valid=True): - value = value if valid else 0 - if dtype == "int8": - s.reset(new numeric_scalar[int8_t](value, valid)) - elif dtype == "int16": - s.reset(new numeric_scalar[int16_t](value, valid)) - elif dtype == "int32": - s.reset(new numeric_scalar[int32_t](value, valid)) - elif dtype == "int64": - s.reset(new numeric_scalar[int64_t](value, valid)) - elif dtype == "uint8": - s.reset(new numeric_scalar[uint8_t](value, valid)) - elif dtype == "uint16": - s.reset(new numeric_scalar[uint16_t](value, valid)) - elif dtype == "uint32": - s.reset(new numeric_scalar[uint32_t](value, valid)) - elif dtype == "uint64": - s.reset(new numeric_scalar[uint64_t](value, valid)) - elif dtype == "float32": - s.reset(new numeric_scalar[float](value, valid)) - elif dtype == "float64": - s.reset(new numeric_scalar[double](value, valid)) - elif dtype == "bool": - s.reset(new numeric_scalar[bool](value, valid)) - else: - raise ValueError(f"dtype not supported: {dtype}") - - +# TODO: Currently the only uses of this function and the one below are in +# _create_proxy_nat_scalar. See if that code path can be simplified to excise +# or at least simplify these implementations. cdef _set_datetime64_from_np_scalar(unique_ptr[scalar]& s, object value, object dtype, @@ -324,253 +280,6 @@ cdef _set_timedelta64_from_np_scalar(unique_ptr[scalar]& s, else: raise ValueError(f"dtype not supported: {dtype}") -cdef _set_decimal_from_scalar(unique_ptr[scalar]& s, - object value, - object dtype, - bool valid=True): - value = cudf.utils.dtypes._decimal_to_int64(value) if valid else 0 - if isinstance(dtype, cudf.Decimal64Dtype): - s.reset( - new fixed_point_scalar[decimal64]( - np.int64(value), scale_type(-dtype.scale), valid - ) - ) - elif isinstance(dtype, cudf.Decimal32Dtype): - s.reset( - new fixed_point_scalar[decimal32]( - np.int32(value), scale_type(-dtype.scale), valid - ) - ) - elif isinstance(dtype, cudf.Decimal128Dtype): - s.reset( - new fixed_point_scalar[decimal128]( - value, scale_type(-dtype.scale), valid - ) - ) - else: - raise ValueError(f"dtype not supported: {dtype}") - -cdef _set_struct_from_pydict(unique_ptr[scalar]& s, - object value, - object dtype, - bool valid=True): - arrow_schema = dtype.to_arrow() - columns = [str(i) for i in range(len(arrow_schema))] - if valid: - pyarrow_table = pa.Table.from_arrays( - [ - pa.array([value[f.name]], from_pandas=True, type=f.type) - for f in arrow_schema - ], - names=columns - ) - else: - pyarrow_table = pa.Table.from_arrays( - [ - pa.array([NA], from_pandas=True, type=f.type) - for f in arrow_schema - ], - names=columns - ) - - data = from_arrow(pyarrow_table) - cdef table_view struct_view = table_view_from_columns(data) - - s.reset( - new struct_scalar(struct_view, valid) - ) - -cdef _get_py_dict_from_struct(unique_ptr[scalar]& s, dtype): - if not s.get()[0].is_valid(): - return NA - - cdef table_view struct_table_view = (s.get()).view() - columns = columns_from_table_view(struct_table_view, None) - struct_col = cudf.core.column.build_struct_column( - names=dtype.fields.keys(), - children=tuple(columns), - size=1, - ) - table = to_arrow([struct_col], [("None", dtype)]) - python_dict = table.to_pydict()["None"][0] - return {k: _nested_na_replace([python_dict[k]])[0] for k in python_dict} - -cdef _set_list_from_pylist(unique_ptr[scalar]& s, - object value, - object dtype, - bool valid=True): - - value = value if valid else [NA] - cdef Column col - if isinstance(dtype.element_type, ListDtype): - pa_type = dtype.element_type.to_arrow() - else: - pa_type = dtype.to_arrow().value_type - col = cudf.core.column.as_column( - pa.array(value, from_pandas=True, type=pa_type) - ) - cdef column_view col_view = col.view() - s.reset( - new list_scalar(col_view, valid) - ) - - -cdef _get_py_list_from_list(unique_ptr[scalar]& s, dtype): - - if not s.get()[0].is_valid(): - return NA - - cdef column_view list_col_view = (s.get()).view() - cdef Column element_col = Column.from_column_view(list_col_view, None) - - arrow_obj = to_arrow([element_col], [("None", dtype.element_type)])["None"] - - result = arrow_obj.to_pylist() - return _nested_na_replace(result) - - -cdef _get_py_string_from_string(unique_ptr[scalar]& s): - if not s.get()[0].is_valid(): - return NA - return (s.get())[0].to_string().decode() - - -cdef _get_np_scalar_from_numeric(unique_ptr[scalar]& s): - cdef scalar* s_ptr = s.get() - if not s_ptr[0].is_valid(): - return NA - - cdef libcudf_types.data_type cdtype = s_ptr[0].type() - - if cdtype.id() == libcudf_types.type_id.INT8: - return np.int8((s_ptr)[0].value()) - elif cdtype.id() == libcudf_types.type_id.INT16: - return np.int16((s_ptr)[0].value()) - elif cdtype.id() == libcudf_types.type_id.INT32: - return np.int32((s_ptr)[0].value()) - elif cdtype.id() == libcudf_types.type_id.INT64: - return np.int64((s_ptr)[0].value()) - elif cdtype.id() == libcudf_types.type_id.UINT8: - return np.uint8((s_ptr)[0].value()) - elif cdtype.id() == libcudf_types.type_id.UINT16: - return np.uint16((s_ptr)[0].value()) - elif cdtype.id() == libcudf_types.type_id.UINT32: - return np.uint32((s_ptr)[0].value()) - elif cdtype.id() == libcudf_types.type_id.UINT64: - return np.uint64((s_ptr)[0].value()) - elif cdtype.id() == libcudf_types.type_id.FLOAT32: - return np.float32((s_ptr)[0].value()) - elif cdtype.id() == libcudf_types.type_id.FLOAT64: - return np.float64((s_ptr)[0].value()) - elif cdtype.id() == libcudf_types.type_id.BOOL8: - return np.bool_((s_ptr)[0].value()) - else: - raise ValueError("Could not convert cudf::scalar to numpy scalar") - - -cdef _get_py_decimal_from_fixed_point(unique_ptr[scalar]& s): - cdef scalar* s_ptr = s.get() - if not s_ptr[0].is_valid(): - return NA - - cdef libcudf_types.data_type cdtype = s_ptr[0].type() - - if cdtype.id() == libcudf_types.type_id.DECIMAL64: - rep_val = int((s_ptr)[0].value()) - scale = int((s_ptr)[0].type().scale()) - return decimal.Decimal(rep_val).scaleb(scale) - elif cdtype.id() == libcudf_types.type_id.DECIMAL32: - rep_val = int((s_ptr)[0].value()) - scale = int((s_ptr)[0].type().scale()) - return decimal.Decimal(rep_val).scaleb(scale) - elif cdtype.id() == libcudf_types.type_id.DECIMAL128: - rep_val = int((s_ptr)[0].value()) - scale = int((s_ptr)[0].type().scale()) - return decimal.Decimal(rep_val).scaleb(scale) - else: - raise ValueError("Could not convert cudf::scalar to numpy scalar") - -cdef _get_np_scalar_from_timestamp64(unique_ptr[scalar]& s): - - cdef scalar* s_ptr = s.get() - - if not s_ptr[0].is_valid(): - return NaT - - cdef libcudf_types.data_type cdtype = s_ptr[0].type() - - if cdtype.id() == libcudf_types.type_id.TIMESTAMP_SECONDS: - return np.datetime64( - ( - s_ptr - )[0].ticks_since_epoch_64(), - datetime_unit_map[(cdtype.id())] - ) - elif cdtype.id() == libcudf_types.type_id.TIMESTAMP_MILLISECONDS: - return np.datetime64( - ( - s_ptr - )[0].ticks_since_epoch_64(), - datetime_unit_map[(cdtype.id())] - ) - elif cdtype.id() == libcudf_types.type_id.TIMESTAMP_MICROSECONDS: - return np.datetime64( - ( - s_ptr - )[0].ticks_since_epoch_64(), - datetime_unit_map[(cdtype.id())] - ) - elif cdtype.id() == libcudf_types.type_id.TIMESTAMP_NANOSECONDS: - return np.datetime64( - ( - s_ptr - )[0].ticks_since_epoch_64(), - datetime_unit_map[(cdtype.id())] - ) - else: - raise ValueError("Could not convert cudf::scalar to numpy scalar") - - -cdef _get_np_scalar_from_timedelta64(unique_ptr[scalar]& s): - - cdef scalar* s_ptr = s.get() - - if not s_ptr[0].is_valid(): - return NaT - - cdef libcudf_types.data_type cdtype = s_ptr[0].type() - - if cdtype.id() == libcudf_types.type_id.DURATION_SECONDS: - return np.timedelta64( - ( - s_ptr - )[0].ticks(), - duration_unit_map[(cdtype.id())] - ) - elif cdtype.id() == libcudf_types.type_id.DURATION_MILLISECONDS: - return np.timedelta64( - ( - s_ptr - )[0].ticks(), - duration_unit_map[(cdtype.id())] - ) - elif cdtype.id() == libcudf_types.type_id.DURATION_MICROSECONDS: - return np.timedelta64( - ( - s_ptr - )[0].ticks(), - duration_unit_map[(cdtype.id())] - ) - elif cdtype.id() == libcudf_types.type_id.DURATION_NANOSECONDS: - return np.timedelta64( - ( - s_ptr - )[0].ticks(), - duration_unit_map[(cdtype.id())] - ) - else: - raise ValueError("Could not convert cudf::scalar to numpy scalar") - def as_device_scalar(val, dtype=None): if isinstance(val, (cudf.Scalar, DeviceScalar)): @@ -607,16 +316,3 @@ def _create_proxy_nat_scalar(dtype): return result else: raise TypeError('NAT only valid for datetime and timedelta') - - -def _nested_na_replace(input_list): - ''' - Replace `None` with `cudf.NA` in the result of - `__getitem__` calls to list type columns - ''' - for idx, value in enumerate(input_list): - if isinstance(value, list): - _nested_na_replace(value) - elif value is None: - input_list[idx] = NA - return input_list diff --git a/python/cudf/cudf/tests/test_list.py b/python/cudf/cudf/tests/test_list.py index 5dd58d8a875..ac10dd97c56 100644 --- a/python/cudf/cudf/tests/test_list.py +++ b/python/cudf/cudf/tests/test_list.py @@ -895,14 +895,14 @@ def test_memory_usage(): "data, idx", [ ( - [[{"f2": {"a": 100}, "f1": "a"}, {"f1": "sf12", "f2": None}]], + [[{"f2": {"a": 100}, "f1": "a"}, {"f1": "sf12", "f2": NA}]], 0, ), ( [ [ {"f2": {"a": 100, "c": 90, "f2": 10}, "f1": "a"}, - {"f1": "sf12", "f2": None}, + {"f1": "sf12", "f2": NA}, ] ], 0, diff --git a/python/cudf/cudf/tests/test_struct.py b/python/cudf/cudf/tests/test_struct.py index a3593e55b97..ce6dc587320 100644 --- a/python/cudf/cudf/tests/test_struct.py +++ b/python/cudf/cudf/tests/test_struct.py @@ -150,9 +150,7 @@ def test_struct_setitem(data, item): "data", [ {"a": 1, "b": "rapids", "c": [1, 2, 3, 4]}, - {"a": 1, "b": "rapids", "c": [1, 2, 3, 4], "d": cudf.NA}, {"a": "Hello"}, - {"b": [], "c": [1, 2, 3]}, ], ) def test_struct_scalar_host_construction(data): @@ -161,6 +159,39 @@ def test_struct_scalar_host_construction(data): assert list(slr.device_value.value.values()) == list(data.values()) +@pytest.mark.parametrize( + ("data", "dtype"), + [ + ( + {"a": 1, "b": "rapids", "c": [1, 2, 3, 4], "d": cudf.NA}, + cudf.StructDtype( + { + "a": np.dtype(np.int64), + "b": np.dtype(np.str_), + "c": cudf.ListDtype(np.dtype(np.int64)), + "d": np.dtype(np.int64), + } + ), + ), + ( + {"b": [], "c": [1, 2, 3]}, + cudf.StructDtype( + { + "b": cudf.ListDtype(np.dtype(np.int64)), + "c": cudf.ListDtype(np.dtype(np.int64)), + } + ), + ), + ], +) +def test_struct_scalar_host_construction_no_dtype_inference(data, dtype): + # cudf cannot infer the dtype of the scalar when it contains only nulls or + # is empty. + slr = cudf.Scalar(data, dtype=dtype) + assert slr.value == data + assert list(slr.device_value.value.values()) == list(data.values()) + + def test_struct_scalar_null(): slr = cudf.Scalar(cudf.NA, dtype=StructDtype) assert slr.device_value.value is cudf.NA diff --git a/python/cudf/cudf/utils/dtypes.py b/python/cudf/cudf/utils/dtypes.py index 1b94db75340..73ea8e2cfc4 100644 --- a/python/cudf/cudf/utils/dtypes.py +++ b/python/cudf/cudf/utils/dtypes.py @@ -463,24 +463,6 @@ def _get_nan_for_dtype(dtype): return np.float64("nan") -def _decimal_to_int64(decimal: Decimal) -> int: - """ - Scale a Decimal such that the result is the integer - that would result from removing the decimal point. - - Examples - -------- - >>> _decimal_to_int64(Decimal('1.42')) - 142 - >>> _decimal_to_int64(Decimal('0.0042')) - 42 - >>> _decimal_to_int64(Decimal('-1.004201')) - -1004201 - - """ - return int(f"{decimal:0f}".replace(".", "")) - - def get_allowed_combinations_for_operator(dtype_l, dtype_r, op): error = TypeError( f"{op} not supported between {dtype_l} and {dtype_r} scalars" From daea8c8bc37ec53b7347857a3b6795bcb0ad86ff Mon Sep 17 00:00:00 2001 From: AJ Schmidt Date: Tue, 26 Sep 2023 09:11:31 -0400 Subject: [PATCH 108/150] Disable `Recently Updated` Check (#14193) This check occasionally hangs for `cudf` for unknown reasons. Upon checking the application logs, the GitHub API seems to be returning responses that aren't helpful in troubleshooting the problem. Therefore, it's probably best to just remove the check to avoid confusion. [skip ci] Authors: - AJ Schmidt (https://github.com/ajschmidt8) --- .github/ops-bot.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/ops-bot.yaml b/.github/ops-bot.yaml index 9a0b4155035..d2ca78924e1 100644 --- a/.github/ops-bot.yaml +++ b/.github/ops-bot.yaml @@ -5,4 +5,3 @@ auto_merger: true branch_checker: true label_checker: true release_drafter: true -recently_updated: true From 3196f6c36140962818aa8d12fe4fbd0dc522e31e Mon Sep 17 00:00:00 2001 From: Jake Awe <50372925+AyodeAwe@users.noreply.github.com> Date: Tue, 26 Sep 2023 11:54:18 -0500 Subject: [PATCH 109/150] update rmm tag path (#14195) PR updates the download path of the `rmm` tag used in `build_docs.sh` following the re-arrangement of the docs directories. Authors: - Jake Awe (https://github.com/AyodeAwe) Approvers: - AJ Schmidt (https://github.com/ajschmidt8) URL: https://github.com/rapidsai/cudf/pull/14195 --- ci/build_docs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/build_docs.sh b/ci/build_docs.sh index 1ed047a500b..9149b5e6bfe 100755 --- a/ci/build_docs.sh +++ b/ci/build_docs.sh @@ -30,7 +30,7 @@ export RAPIDS_DOCS_DIR="$(mktemp -d)" rapids-logger "Build CPP docs" pushd cpp/doxygen -aws s3 cp s3://rapidsai-docs/librmm/${RAPIDS_VERSION_NUMBER}/html/rmm.tag . || echo "Failed to download rmm Doxygen tag" +aws s3 cp s3://rapidsai-docs/librmm/html/${RAPIDS_VERSION_NUMBER}/rmm.tag . || echo "Failed to download rmm Doxygen tag" doxygen Doxyfile mkdir -p "${RAPIDS_DOCS_DIR}/libcudf/html" mv html/* "${RAPIDS_DOCS_DIR}/libcudf/html" From a9ec350217331979359c50ea1da9457e9973f719 Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Tue, 26 Sep 2023 14:32:04 -0500 Subject: [PATCH 110/150] Fix pytorch related pytest (#14198) Calling `cudf.Index([])` results in `str` dtype `Index`. This PR fixes an issue with a pytorch related pytest by explicitly passing a `float64` dtype. xref: https://github.com/rapidsai/cudf/pull/14116 Authors: - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - https://github.com/brandon-b-miller URL: https://github.com/rapidsai/cudf/pull/14198 --- python/cudf/cudf/tests/test_cuda_array_interface.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/cudf/cudf/tests/test_cuda_array_interface.py b/python/cudf/cudf/tests/test_cuda_array_interface.py index e81f4ec795a..848c77206b2 100644 --- a/python/cudf/cudf/tests/test_cuda_array_interface.py +++ b/python/cudf/cudf/tests/test_cuda_array_interface.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2022, NVIDIA CORPORATION. +# Copyright (c) 2019-2023, NVIDIA CORPORATION. import types from contextlib import ExitStack as does_not_raise @@ -193,7 +193,7 @@ def test_cuda_array_interface_pytorch(): assert_eq(got, cudf.Series(buffer, dtype=np.bool_)) - index = cudf.Index([]) + index = cudf.Index([], dtype="float64") tensor = torch.tensor(index) got = cudf.Index(tensor) assert_eq(got, index) From 030c0f4995ec458fcfc00a4ebb3aa8bccb2b27a0 Mon Sep 17 00:00:00 2001 From: Yunsong Wang Date: Tue, 26 Sep 2023 12:42:12 -0700 Subject: [PATCH 111/150] Refactor `contains_table` with cuco::static_set (#14064) Contributes to #12261 This PR refactors `contains_table` to use the new `cuco::static_set` data structure. It also adds a `contains_table` benchmark to track the performance before and after this work. Authors: - Yunsong Wang (https://github.com/PointKernel) Approvers: - Nghia Truong (https://github.com/ttnghia) - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cudf/pull/14064 --- cpp/benchmarks/CMakeLists.txt | 2 +- .../{contains.cpp => contains_scalar.cpp} | 0 cpp/benchmarks/search/contains_table.cpp | 73 ++++ cpp/include/cudf/detail/search.hpp | 2 + cpp/src/search/contains_table.cu | 319 +++++++++--------- 5 files changed, 229 insertions(+), 167 deletions(-) rename cpp/benchmarks/search/{contains.cpp => contains_scalar.cpp} (100%) create mode 100644 cpp/benchmarks/search/contains_table.cpp diff --git a/cpp/benchmarks/CMakeLists.txt b/cpp/benchmarks/CMakeLists.txt index 5e7862f4b3b..cd6b3cfdc03 100644 --- a/cpp/benchmarks/CMakeLists.txt +++ b/cpp/benchmarks/CMakeLists.txt @@ -173,7 +173,7 @@ ConfigureBench(ITERATOR_BENCH iterator/iterator.cu) # ################################################################################################## # * search benchmark ------------------------------------------------------------------------------ ConfigureBench(SEARCH_BENCH search/search.cpp) -ConfigureNVBench(SEARCH_NVBENCH search/contains.cpp) +ConfigureNVBench(SEARCH_NVBENCH search/contains_scalar.cpp search/contains_table.cpp) # ################################################################################################## # * sort benchmark -------------------------------------------------------------------------------- diff --git a/cpp/benchmarks/search/contains.cpp b/cpp/benchmarks/search/contains_scalar.cpp similarity index 100% rename from cpp/benchmarks/search/contains.cpp rename to cpp/benchmarks/search/contains_scalar.cpp diff --git a/cpp/benchmarks/search/contains_table.cpp b/cpp/benchmarks/search/contains_table.cpp new file mode 100644 index 00000000000..17702d0741c --- /dev/null +++ b/cpp/benchmarks/search/contains_table.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include +#include +#include + +#include + +#include + +auto constexpr num_unique_elements = 1000; + +template +static void nvbench_contains_table(nvbench::state& state, nvbench::type_list) +{ + auto const size = state.get_int64("table_size"); + auto const dtype = cudf::type_to_id(); + double const null_probability = state.get_float64("null_probability"); + + auto builder = data_profile_builder().null_probability(null_probability); + if (dtype == cudf::type_id::LIST) { + builder.distribution(dtype, distribution_id::UNIFORM, 0, num_unique_elements) + .distribution(cudf::type_id::INT32, distribution_id::UNIFORM, 0, num_unique_elements) + .list_depth(1); + } else { + builder.distribution(dtype, distribution_id::UNIFORM, 0, num_unique_elements); + } + + auto const haystack = create_random_table( + {dtype}, table_size_bytes{static_cast(size)}, data_profile{builder}, 0); + auto const needles = create_random_table( + {dtype}, table_size_bytes{static_cast(size)}, data_profile{builder}, 1); + + auto mem_stats_logger = cudf::memory_stats_logger(); + + state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) { + auto const stream_view = rmm::cuda_stream_view{launch.get_stream()}; + [[maybe_unused]] auto const result = + cudf::detail::contains(haystack->view(), + needles->view(), + cudf::null_equality::EQUAL, + cudf::nan_equality::ALL_EQUAL, + stream_view, + rmm::mr::get_current_device_resource()); + }); + + state.add_buffer_size( + mem_stats_logger.peak_memory_usage(), "peak_memory_usage", "peak_memory_usage"); +} + +NVBENCH_BENCH_TYPES(nvbench_contains_table, + NVBENCH_TYPE_AXES(nvbench::type_list)) + .set_name("contains_table") + .set_type_axes_names({"type"}) + .add_float64_axis("null_probability", {0.0, 0.1}) + .add_int64_axis("table_size", {10'000, 100'000, 1'000'000, 10'000'000}); diff --git a/cpp/include/cudf/detail/search.hpp b/cpp/include/cudf/detail/search.hpp index 4c4ad7834f4..4277baf3edd 100644 --- a/cpp/include/cudf/detail/search.hpp +++ b/cpp/include/cudf/detail/search.hpp @@ -81,6 +81,8 @@ std::unique_ptr contains(column_view const& haystack, * output = { false, true, true } * @endcode * + * @throws cudf::logic_error If column types of haystack and needles don't match + * * @param haystack The table containing the search space * @param needles A table of rows whose existence to check in the search space * @param compare_nulls Control whether nulls should be compared as equal or not diff --git a/cpp/src/search/contains_table.cu b/cpp/src/search/contains_table.cu index e37f0686ac3..43624ba691d 100644 --- a/cpp/src/search/contains_table.cu +++ b/cpp/src/search/contains_table.cu @@ -26,7 +26,7 @@ #include -#include +#include #include @@ -37,69 +37,59 @@ namespace { using cudf::experimental::row::lhs_index_type; using cudf::experimental::row::rhs_index_type; -using static_map = cuco::static_map>>; - /** - * @brief Check if the given type `T` is a strong index type (i.e., `lhs_index_type` or - * `rhs_index_type`). - * - * @return A boolean value indicating if `T` is a strong index type + * @brief An hasher adapter wrapping both haystack hasher and needles hasher */ -template -constexpr auto is_strong_index_type() -{ - return std::is_same_v || std::is_same_v; -} +template +struct hasher_adapter { + hasher_adapter(HaystackHasher const& haystack_hasher, NeedleHasher const& needle_hasher) + : _haystack_hasher{haystack_hasher}, _needle_hasher{needle_hasher} + { + } -/** - * @brief An adapter functor to support strong index types for row hasher that must be operating on - * `cudf::size_type`. - */ -template -struct strong_index_hasher_adapter { - strong_index_hasher_adapter(Hasher const& hasher) : _hasher{hasher} {} + __device__ constexpr auto operator()(lhs_index_type idx) const noexcept + { + return _haystack_hasher(static_cast(idx)); + } - template ())> - __device__ constexpr auto operator()(T const idx) const noexcept + __device__ constexpr auto operator()(rhs_index_type idx) const noexcept { - return _hasher(static_cast(idx)); + return _needle_hasher(static_cast(idx)); } private: - Hasher const _hasher; + HaystackHasher const _haystack_hasher; + NeedleHasher const _needle_hasher; }; /** - * @brief An adapter functor to support strong index type for table row comparator that must be - * operating on `cudf::size_type`. + * @brief An comparator adapter wrapping both self comparator and two table comparator */ -template -struct strong_index_comparator_adapter { - strong_index_comparator_adapter(Comparator const& comparator) : _comparator{comparator} {} - - template () && is_strong_index_type())> - __device__ constexpr auto operator()(T const lhs_index, U const rhs_index) const noexcept +template +struct comparator_adapter { + comparator_adapter(SelfEqual const& self_equal, TwoTableEqual const& two_table_equal) + : _self_equal{self_equal}, _two_table_equal{two_table_equal} + { + } + + __device__ constexpr auto operator()(lhs_index_type lhs_index, + lhs_index_type rhs_index) const noexcept { auto const lhs = static_cast(lhs_index); auto const rhs = static_cast(rhs_index); - if constexpr (std::is_same_v || std::is_same_v) { - return _comparator(lhs, rhs); - } else { - // Here we have T == rhs_index_type. - // This is when the indices are provided in wrong order for two table comparator, so we need - // to switch them back to the right order before calling the underlying comparator. - return _comparator(rhs, lhs); - } + return _self_equal(lhs, rhs); + } + + __device__ constexpr auto operator()(lhs_index_type lhs_index, + rhs_index_type rhs_index) const noexcept + { + return _two_table_equal(lhs_index, rhs_index); } private: - Comparator const _comparator; + SelfEqual const _self_equal; + TwoTableEqual const _two_table_equal; }; /** @@ -134,38 +124,62 @@ std::pair build_row_bitmask(table_view } /** - * @brief Invoke an `operator()` template with a row equality comparator based on the specified - * `compare_nans` parameter. + * @brief Invokes the given `func` with desired comparators based on the specified `compare_nans` + * parameter + * + * @tparam HasNested Flag indicating whether there are nested columns in haystack or needles + * @tparam Hasher Type of device hash function + * @tparam Func Type of the helper function doing `contains` check * - * @param compare_nans The flag to specify whether NaNs should be compared equal or not + * @param compare_nulls Control whether nulls should be compared as equal or not + * @param compare_nans Control whether floating-point NaNs values should be compared as equal or not + * @param haystack_has_nulls Flag indicating whether haystack has nulls or not + * @param has_any_nulls Flag indicating whether there are nested nulls is either haystack or needles + * @param self_equal Self table comparator + * @param two_table_equal Two table comparator + * @param d_hasher Device hash functor * @param func The input functor to invoke */ -template -void dispatch_nan_comparator(nan_equality compare_nans, Func&& func) +template +void dispatch_nan_comparator( + null_equality compare_nulls, + nan_equality compare_nans, + bool haystack_has_nulls, + bool has_any_nulls, + cudf::experimental::row::equality::self_comparator self_equal, + cudf::experimental::row::equality::two_table_comparator two_table_equal, + Hasher const& d_hasher, + Func&& func) { + // Distinguish probing scheme CG sizes between nested and flat types for better performance + auto const probing_scheme = [&]() { + if constexpr (HasNested) { + return cuco::experimental::linear_probing<4, Hasher>{d_hasher}; + } else { + return cuco::experimental::linear_probing<1, Hasher>{d_hasher}; + } + }(); + if (compare_nans == nan_equality::ALL_EQUAL) { using nan_equal_comparator = cudf::experimental::row::equality::nan_equal_physical_equality_comparator; - func(nan_equal_comparator{}); + auto const d_self_equal = self_equal.equal_to( + nullate::DYNAMIC{haystack_has_nulls}, compare_nulls, nan_equal_comparator{}); + auto const d_two_table_equal = two_table_equal.equal_to( + nullate::DYNAMIC{has_any_nulls}, compare_nulls, nan_equal_comparator{}); + func(d_self_equal, d_two_table_equal, probing_scheme); } else { using nan_unequal_comparator = cudf::experimental::row::equality::physical_equality_comparator; - func(nan_unequal_comparator{}); + auto const d_self_equal = self_equal.equal_to( + nullate::DYNAMIC{haystack_has_nulls}, compare_nulls, nan_unequal_comparator{}); + auto const d_two_table_equal = two_table_equal.equal_to( + nullate::DYNAMIC{has_any_nulls}, compare_nulls, nan_unequal_comparator{}); + func(d_self_equal, d_two_table_equal, probing_scheme); } } } // namespace -/** - * @brief Check if rows in the given `needles` table exist in the `haystack` table. - * - * @param haystack The table containing the search space - * @param needles A table of rows whose existence to check in the search space - * @param compare_nulls Control whether nulls should be compared as equal or not - * @param compare_nans Control whether floating-point NaNs values should be compared as equal or not - * @param stream CUDA stream used for device memory operations and kernel launches - * @param mr Device memory resource used to allocate the returned vector - * @return A vector of bools indicating if each row in `needles` has matching rows in `haystack` - */ rmm::device_uvector contains(table_view const& haystack, table_view const& needles, null_equality compare_nulls, @@ -173,124 +187,97 @@ rmm::device_uvector contains(table_view const& haystack, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { - auto map = static_map(compute_hash_table_size(haystack.num_rows()), - cuco::empty_key{lhs_index_type{std::numeric_limits::max()}}, - cuco::empty_value{detail::JoinNoneValue}, - detail::hash_table_allocator_type{default_allocator{}, stream}, - stream.value()); + CUDF_EXPECTS(cudf::have_same_types(haystack, needles), "Column types mismatch"); auto const haystack_has_nulls = has_nested_nulls(haystack); auto const needles_has_nulls = has_nested_nulls(needles); auto const has_any_nulls = haystack_has_nulls || needles_has_nulls; + auto const preprocessed_needles = + cudf::experimental::row::equality::preprocessed_table::create(needles, stream); auto const preprocessed_haystack = cudf::experimental::row::equality::preprocessed_table::create(haystack, stream); - // Insert row indices of the haystack table as map keys. - { - auto const haystack_it = cudf::detail::make_counting_transform_iterator( - size_type{0}, - [] __device__(auto const idx) { return cuco::make_pair(lhs_index_type{idx}, 0); }); - - auto const hasher = cudf::experimental::row::hash::row_hasher(preprocessed_haystack); - auto const d_hasher = - strong_index_hasher_adapter{hasher.device_hasher(nullate::DYNAMIC{has_any_nulls})}; - - auto const comparator = - cudf::experimental::row::equality::self_comparator(preprocessed_haystack); - - // If the haystack table has nulls but they are compared unequal, don't insert them. - // Otherwise, it was known to cause performance issue: - // - https://github.com/rapidsai/cudf/pull/6943 - // - https://github.com/rapidsai/cudf/pull/8277 - if (haystack_has_nulls && compare_nulls == null_equality::UNEQUAL) { - auto const bitmask_buffer_and_ptr = build_row_bitmask(haystack, stream); - auto const row_bitmask_ptr = bitmask_buffer_and_ptr.second; - - auto const insert_map = [&](auto const value_comp) { - if (cudf::detail::has_nested_columns(haystack)) { - auto const d_eqcomp = strong_index_comparator_adapter{comparator.equal_to( - nullate::DYNAMIC{haystack_has_nulls}, compare_nulls, value_comp)}; - map.insert_if(haystack_it, - haystack_it + haystack.num_rows(), - thrust::counting_iterator(0), // stencil - row_is_valid{row_bitmask_ptr}, - d_hasher, - d_eqcomp, - stream.value()); - } else { - auto const d_eqcomp = strong_index_comparator_adapter{comparator.equal_to( - nullate::DYNAMIC{haystack_has_nulls}, compare_nulls, value_comp)}; - map.insert_if(haystack_it, - haystack_it + haystack.num_rows(), - thrust::counting_iterator(0), // stencil - row_is_valid{row_bitmask_ptr}, - d_hasher, - d_eqcomp, - stream.value()); - } - }; - - // Insert only rows that do not have any null at any level. - dispatch_nan_comparator(compare_nans, insert_map); - } else { // haystack_doesn't_have_nulls || compare_nulls == null_equality::EQUAL - auto const insert_map = [&](auto const value_comp) { - if (cudf::detail::has_nested_columns(haystack)) { - auto const d_eqcomp = strong_index_comparator_adapter{comparator.equal_to( - nullate::DYNAMIC{haystack_has_nulls}, compare_nulls, value_comp)}; - map.insert( - haystack_it, haystack_it + haystack.num_rows(), d_hasher, d_eqcomp, stream.value()); - } else { - auto const d_eqcomp = strong_index_comparator_adapter{comparator.equal_to( - nullate::DYNAMIC{haystack_has_nulls}, compare_nulls, value_comp)}; - map.insert( - haystack_it, haystack_it + haystack.num_rows(), d_hasher, d_eqcomp, stream.value()); - } - }; - - dispatch_nan_comparator(compare_nans, insert_map); - } - } + + auto const haystack_hasher = cudf::experimental::row::hash::row_hasher(preprocessed_haystack); + auto const d_haystack_hasher = haystack_hasher.device_hasher(nullate::DYNAMIC{has_any_nulls}); + auto const needle_hasher = cudf::experimental::row::hash::row_hasher(preprocessed_needles); + auto const d_needle_hasher = needle_hasher.device_hasher(nullate::DYNAMIC{has_any_nulls}); + auto const d_hasher = hasher_adapter{d_haystack_hasher, d_needle_hasher}; + + auto const self_equal = cudf::experimental::row::equality::self_comparator(preprocessed_haystack); + auto const two_table_equal = cudf::experimental::row::equality::two_table_comparator( + preprocessed_haystack, preprocessed_needles); // The output vector. auto contained = rmm::device_uvector(needles.num_rows(), stream, mr); - auto const preprocessed_needles = - cudf::experimental::row::equality::preprocessed_table::create(needles, stream); - // Check existence for each row of the needles table in the haystack table. - { - auto const needles_it = cudf::detail::make_counting_transform_iterator( - size_type{0}, [] __device__(auto const idx) { return rhs_index_type{idx}; }); - - auto const hasher = cudf::experimental::row::hash::row_hasher(preprocessed_needles); - auto const d_hasher = - strong_index_hasher_adapter{hasher.device_hasher(nullate::DYNAMIC{has_any_nulls})}; - - auto const comparator = cudf::experimental::row::equality::two_table_comparator( - preprocessed_haystack, preprocessed_needles); - - auto const check_contains = [&](auto const value_comp) { - if (cudf::detail::has_nested_columns(haystack) or cudf::detail::has_nested_columns(needles)) { - auto const d_eqcomp = - comparator.equal_to(nullate::DYNAMIC{has_any_nulls}, compare_nulls, value_comp); - map.contains(needles_it, - needles_it + needles.num_rows(), - contained.begin(), - d_hasher, - d_eqcomp, - stream.value()); + auto const haystack_iter = cudf::detail::make_counting_transform_iterator( + size_type{0}, [] __device__(auto idx) { return lhs_index_type{idx}; }); + auto const needles_iter = cudf::detail::make_counting_transform_iterator( + size_type{0}, [] __device__(auto idx) { return rhs_index_type{idx}; }); + + auto const helper_func = + [&](auto const& d_self_equal, auto const& d_two_table_equal, auto const& probing_scheme) { + auto const d_equal = comparator_adapter{d_self_equal, d_two_table_equal}; + + auto set = cuco::experimental::static_set{ + cuco::experimental::extent{compute_hash_table_size(haystack.num_rows())}, + cuco::empty_key{lhs_index_type{-1}}, + d_equal, + probing_scheme, + detail::hash_table_allocator_type{default_allocator{}, stream}, + stream.value()}; + + if (haystack_has_nulls && compare_nulls == null_equality::UNEQUAL) { + auto const bitmask_buffer_and_ptr = build_row_bitmask(haystack, stream); + auto const row_bitmask_ptr = bitmask_buffer_and_ptr.second; + + // If the haystack table has nulls but they are compared unequal, don't insert them. + // Otherwise, it was known to cause performance issue: + // - https://github.com/rapidsai/cudf/pull/6943 + // - https://github.com/rapidsai/cudf/pull/8277 + set.insert_if_async(haystack_iter, + haystack_iter + haystack.num_rows(), + thrust::counting_iterator(0), // stencil + row_is_valid{row_bitmask_ptr}, + stream.value()); } else { - auto const d_eqcomp = - comparator.equal_to(nullate::DYNAMIC{has_any_nulls}, compare_nulls, value_comp); - map.contains(needles_it, - needles_it + needles.num_rows(), - contained.begin(), - d_hasher, - d_eqcomp, - stream.value()); + set.insert_async(haystack_iter, haystack_iter + haystack.num_rows(), stream.value()); + } + + if (needles_has_nulls && compare_nulls == null_equality::UNEQUAL) { + auto const bitmask_buffer_and_ptr = build_row_bitmask(needles, stream); + auto const row_bitmask_ptr = bitmask_buffer_and_ptr.second; + set.contains_if_async(needles_iter, + needles_iter + needles.num_rows(), + thrust::counting_iterator(0), // stencil + row_is_valid{row_bitmask_ptr}, + contained.begin(), + stream.value()); + } else { + set.contains_async( + needles_iter, needles_iter + needles.num_rows(), contained.begin(), stream.value()); } }; - dispatch_nan_comparator(compare_nans, check_contains); + if (cudf::detail::has_nested_columns(haystack)) { + dispatch_nan_comparator(compare_nulls, + compare_nans, + haystack_has_nulls, + has_any_nulls, + self_equal, + two_table_equal, + d_hasher, + helper_func); + } else { + dispatch_nan_comparator(compare_nulls, + compare_nans, + haystack_has_nulls, + has_any_nulls, + self_equal, + two_table_equal, + d_hasher, + helper_func); } return contained; From b25b292f7f97cbb681f0244e1a20b30a925145a1 Mon Sep 17 00:00:00 2001 From: David Wendt <45795991+davidwendt@users.noreply.github.com> Date: Tue, 26 Sep 2023 18:53:43 -0400 Subject: [PATCH 112/150] Add nvtext::tokenize_with_vocabulary API (#13930) Adds tokenize with vocabulary APIs to libcudf. ``` struct tokenize_vocabulary{ ... }; std::unique_ptr load_vocabulary( cudf::strings_column_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); std::unique_ptr tokenize_with_vocabulary( cudf::strings_column_view const& input, tokenize_vocabulary const& vocabulary, cudf::string_scalar const& delimiter, cudf::size_type default_id, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); ``` Returns an integer lists column replacing individual tokens as resolved from the `input` using `delimiter` with id values which are the row indices of the input `vocabulary` column. If a token is not found in the `vocabulary` it is assigned `default_id`. The vocabulary can be loaded once using the `nvtext::load_vocabulary()` API and then used in repeated calls to `nvtext::tokenize_with_vocabulary()` with different input columns. Python interface is new class `TokenizeVocabulary` which can be used like the following: ``` >>> import cudf >>> from cudf.core.tokenize_vocabulary import TokenizeVocabulary >>> words = cudf.Series( ['brown', 'the', 'dog', 'jumps'] ) >>> vocab = TokenizeVocabulary(words) >>> s = cudf.Series( ['the brown dog jumps over the brown cat'] ) >>> print(vocab(s)) 0 [1, 0, 2, 3, -1, 1, 0, -1] dtype: list ``` Authors: - David Wendt (https://github.com/davidwendt) Approvers: - Robert Maynard (https://github.com/robertmaynard) - https://github.com/nvdbaranec - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cudf/pull/13930 --- cpp/CMakeLists.txt | 1 + cpp/include/nvtext/tokenize.hpp | 78 ++++++ cpp/src/text/vocabulary_tokenize.cu | 257 ++++++++++++++++++ cpp/tests/text/tokenize_tests.cpp | 93 +++++-- python/cudf/cudf/_lib/cpp/nvtext/tokenize.pxd | 17 +- python/cudf/cudf/_lib/nvtext/tokenize.pyx | 40 ++- python/cudf/cudf/_lib/strings/__init__.py | 1 + python/cudf/cudf/core/tokenize_vocabulary.py | 48 ++++ .../cudf/cudf/tests/text/test_text_methods.py | 59 ++++ 9 files changed, 574 insertions(+), 20 deletions(-) create mode 100644 cpp/src/text/vocabulary_tokenize.cu create mode 100644 python/cudf/cudf/core/tokenize_vocabulary.py diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index a84f7bd5224..9656bc40fd7 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -618,6 +618,7 @@ add_library( src/text/subword/subword_tokenize.cu src/text/subword/wordpiece_tokenizer.cu src/text/tokenize.cu + src/text/vocabulary_tokenize.cu src/transform/bools_to_mask.cu src/transform/compute_column.cu src/transform/encode.cu diff --git a/cpp/include/nvtext/tokenize.hpp b/cpp/include/nvtext/tokenize.hpp index a72f7dcfa59..44f8f44557c 100644 --- a/cpp/include/nvtext/tokenize.hpp +++ b/cpp/include/nvtext/tokenize.hpp @@ -215,5 +215,83 @@ std::unique_ptr detokenize( cudf::string_scalar const& separator = cudf::string_scalar(" "), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); +/** + * @brief Vocabulary object to be used with nvtext::tokenize_with_vocabulary + * + * Use nvtext::load_vocabulary to create this object. + */ +struct tokenize_vocabulary { + /** + * @brief Vocabulary object constructor + * + * Token ids are the row indices within the vocabulary column. + * Each vocabulary entry is expected to be unique otherwise the behavior is undefined. + * + * @throw cudf::logic_error if `vocabulary` contains nulls or is empty + * + * @param input Strings for the vocabulary + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + */ + tokenize_vocabulary(cudf::strings_column_view const& input, + rmm::cuda_stream_view stream = cudf::get_default_stream(), + rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); + ~tokenize_vocabulary(); + + struct tokenize_vocabulary_impl; + tokenize_vocabulary_impl* _impl{}; +}; + +/** + * @brief Create a tokenize_vocabulary object from a strings column + * + * Token ids are the row indices within the vocabulary column. + * Each vocabulary entry is expected to be unique otherwise the behavior is undefined. + * + * @throw cudf::logic_error if `vocabulary` contains nulls or is empty + * + * @param input Strings for the vocabulary + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return Object to be used with nvtext::tokenize_with_vocabulary + */ +std::unique_ptr load_vocabulary( + cudf::strings_column_view const& input, + rmm::cuda_stream_view stream = cudf::get_default_stream(), + rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); + +/** + * @brief Returns the token ids for the input string by looking up each delimited + * token in the given vocabulary + * + * @code{.pseudo} + * Example: + * s = ["hello world", "hello there", "there there world", "watch out world"] + * v = load_vocabulary(["hello", "there", "world"]) + * r = tokenize_with_vocabulary(s,v) + * r is now [[0,2], [0,1], [1,1,2], [-1,-1,2]] + * @endcode + * + * Any null row entry results in a corresponding null entry in the output + * + * @throw cudf::logic_error if `delimiter` is invalid + * + * @param input Strings column to tokenize + * @param vocabulary Used to lookup tokens within + * @param delimiter Used to identify tokens within `input` + * @param default_id The token id to be used for tokens not found in the `vocabulary`; + * Default is -1 + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return Lists column of token ids + */ +std::unique_ptr tokenize_with_vocabulary( + cudf::strings_column_view const& input, + tokenize_vocabulary const& vocabulary, + cudf::string_scalar const& delimiter, + cudf::size_type default_id = -1, + rmm::cuda_stream_view stream = cudf::get_default_stream(), + rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); + /** @} */ // end of tokenize group } // namespace nvtext diff --git a/cpp/src/text/vocabulary_tokenize.cu b/cpp/src/text/vocabulary_tokenize.cu new file mode 100644 index 00000000000..f998c9ec239 --- /dev/null +++ b/cpp/src/text/vocabulary_tokenize.cu @@ -0,0 +1,257 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +namespace nvtext { +namespace detail { +namespace { + +using string_hasher_type = cudf::hashing::detail::MurmurHash3_x86_32; +using hash_value_type = string_hasher_type::result_type; + +/** + * @brief Hasher function used for building and using the cuco static-map + * + * This takes advantage of heterogeneous lookup feature in cuco static-map which + * allows inserting with one type (index) and looking up with a different type (string). + */ +struct vocab_hasher { + cudf::column_device_view const d_strings; + string_hasher_type hasher{}; + // used by insert + __device__ hash_value_type operator()(cudf::size_type index) const + { + return hasher(d_strings.element(index)); + } + // used by find + __device__ hash_value_type operator()(cudf::string_view const& s) const { return hasher(s); } +}; + +/** + * @brief Equal function used for building and using the cuco static-map + * + * This takes advantage of heterogeneous lookup feature in cuco static-map which + * allows inserting with one type (index) and looking up with a different type (string). + */ +struct vocab_equal { + cudf::column_device_view const d_strings; + // used by insert + __device__ bool operator()(cudf::size_type lhs, cudf::size_type rhs) const noexcept + { + return lhs == rhs; // all rows are expected to be unique + } + // used by find + __device__ bool operator()(cudf::size_type lhs, cudf::string_view const& rhs) const noexcept + { + return d_strings.element(lhs) == rhs; + } +}; + +using hash_table_allocator_type = rmm::mr::stream_allocator_adaptor>; +using probe_scheme = cuco::experimental::linear_probing<1, vocab_hasher>; +using vocabulary_map_type = cuco::experimental::static_map, + cuda::thread_scope_device, + vocab_equal, + probe_scheme, + hash_table_allocator_type>; +} // namespace +} // namespace detail + +// since column_device_view::create returns is a little more than +// std::unique_ptr this helper simplifies the return type in a maintainable way +using col_device_view = std::invoke_result_t; + +struct tokenize_vocabulary::tokenize_vocabulary_impl { + std::unique_ptr const vocabulary; + col_device_view const d_vocabulary; + std::unique_ptr vocabulary_map; + + auto get_map_ref() const { return vocabulary_map->ref(cuco::experimental::op::find); } + + tokenize_vocabulary_impl(std::unique_ptr&& vocab, + col_device_view&& d_vocab, + std::unique_ptr&& map) + : vocabulary(std::move(vocab)), d_vocabulary(std::move(d_vocab)), vocabulary_map(std::move(map)) + { + } +}; + +struct key_pair { + __device__ auto operator()(cudf::size_type idx) const noexcept + { + return cuco::make_pair(idx, idx); + } +}; + +tokenize_vocabulary::tokenize_vocabulary(cudf::strings_column_view const& input, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + CUDF_EXPECTS(not input.is_empty(), "vocabulary must not be empty"); + CUDF_EXPECTS(not input.has_nulls(), "vocabulary must not have nulls"); + + // need to hold a copy of the input + auto vocabulary = std::make_unique(input.parent(), stream, mr); + auto d_vocabulary = cudf::column_device_view::create(vocabulary->view(), stream); + + auto vocab_map = std::make_unique( + static_cast(vocabulary->size() * 2), + cuco::empty_key{-1}, + cuco::empty_value{-1}, + detail::vocab_equal{*d_vocabulary}, + detail::probe_scheme{detail::vocab_hasher{*d_vocabulary}}, + detail::hash_table_allocator_type{default_allocator{}, stream}, + stream.value()); + + // the row index is the token id (value for each key in the map) + auto iter = cudf::detail::make_counting_transform_iterator(0, key_pair{}); + vocab_map->insert_async(iter, iter + vocabulary->size(), stream.value()); + + _impl = new tokenize_vocabulary_impl( + std::move(vocabulary), std::move(d_vocabulary), std::move(vocab_map)); +} +tokenize_vocabulary::~tokenize_vocabulary() { delete _impl; } + +std::unique_ptr load_vocabulary(cudf::strings_column_view const& input, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + return std::make_unique(input, stream, mr); +} + +namespace detail { +namespace { + +/** + * @brief Tokenizes each string and uses the map to assign token id values + * + * @tparam MapRefType Type of the static_map reference for calling find() + */ +template +struct vocabulary_tokenizer_fn { + cudf::column_device_view const d_strings; + cudf::string_view const d_delimiter; + MapRefType d_map; + cudf::size_type const default_id; + cudf::size_type const* d_offsets; + cudf::size_type* d_results; + + __device__ void operator()(cudf::size_type idx) const + { + if (d_strings.is_null(idx)) { return; } + + auto const d_str = d_strings.element(idx); + characters_tokenizer tokenizer(d_str, d_delimiter); + auto d_tokens = d_results + d_offsets[idx]; + + cudf::size_type token_idx = 0; + while (tokenizer.next_token()) { + auto const pos = tokenizer.token_byte_positions(); + auto const token = cudf::string_view{d_str.data() + pos.first, (pos.second - pos.first)}; + // lookup token in map + auto const itr = d_map.find(token); + auto const id = (itr != d_map.end()) ? itr->second : default_id; + // set value into the output + d_tokens[token_idx++] = id; + } + } +}; + +} // namespace + +std::unique_ptr tokenize_with_vocabulary(cudf::strings_column_view const& input, + tokenize_vocabulary const& vocabulary, + cudf::string_scalar const& delimiter, + cudf::size_type default_id, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + CUDF_EXPECTS(delimiter.is_valid(stream), "Parameter delimiter must be valid"); + + auto const output_type = cudf::data_type{cudf::type_to_id()}; + if (input.is_empty()) { return cudf::make_empty_column(output_type); } + + // count the tokens per string and build the offsets from the counts + auto const d_strings = cudf::column_device_view::create(input.parent(), stream); + auto const d_delimiter = delimiter.value(stream); + auto const sizes_itr = + cudf::detail::make_counting_transform_iterator(0, strings_tokenizer{*d_strings, d_delimiter}); + auto [token_offsets, total_count] = + cudf::detail::make_offsets_child_column(sizes_itr, sizes_itr + input.size(), stream, mr); + + // build the output column to hold all the token ids + auto tokens = + cudf::make_numeric_column(output_type, total_count, cudf::mask_state::UNALLOCATED, stream, mr); + auto map_ref = vocabulary._impl->get_map_ref(); + auto d_offsets = token_offsets->view().data(); + auto d_tokens = tokens->mutable_view().data(); + vocabulary_tokenizer_fn tokenizer{ + *d_strings, d_delimiter, map_ref, default_id, d_offsets, d_tokens}; + thrust::for_each_n(rmm::exec_policy(stream), + thrust::make_counting_iterator(0), + input.size(), + tokenizer); + + return cudf::make_lists_column(input.size(), + std::move(token_offsets), + std::move(tokens), + input.null_count(), + cudf::detail::copy_bitmask(input.parent(), stream, mr), + stream, + mr); +} + +} // namespace detail + +std::unique_ptr tokenize_with_vocabulary(cudf::strings_column_view const& input, + tokenize_vocabulary const& vocabulary, + cudf::string_scalar const& delimiter, + cudf::size_type default_id, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + return detail::tokenize_with_vocabulary(input, vocabulary, delimiter, default_id, stream, mr); +} + +} // namespace nvtext diff --git a/cpp/tests/text/tokenize_tests.cpp b/cpp/tests/text/tokenize_tests.cpp index 14fc4f8c6db..d78f2dfbdf3 100644 --- a/cpp/tests/text/tokenize_tests.cpp +++ b/cpp/tests/text/tokenize_tests.cpp @@ -14,14 +14,16 @@ * limitations under the License. */ -#include -#include -#include -#include - #include #include #include +#include + +#include + +#include +#include +#include #include @@ -125,29 +127,37 @@ TEST_F(TextTokenizeTest, CharacterTokenize) TEST_F(TextTokenizeTest, TokenizeEmptyTest) { - auto strings = cudf::make_empty_column(cudf::data_type{cudf::type_id::STRING}); - cudf::test::strings_column_wrapper all_empty({"", "", ""}); - cudf::test::strings_column_wrapper all_null({"", "", ""}, {0, 0, 0}); - cudf::test::fixed_width_column_wrapper expected({0, 0, 0}); - - auto results = nvtext::tokenize(cudf::strings_column_view(strings->view())); + auto input = cudf::make_empty_column(cudf::data_type{cudf::type_id::STRING}); + auto view = cudf::strings_column_view(input->view()); + cudf::test::strings_column_wrapper all_empty_wrapper({"", "", ""}); + auto all_empty = cudf::strings_column_view(all_empty_wrapper); + cudf::test::strings_column_wrapper all_null_wrapper({"", "", ""}, {0, 0, 0}); + auto all_null = cudf::strings_column_view(all_null_wrapper); + cudf::test::fixed_width_column_wrapper expected({0, 0, 0}); + + auto results = nvtext::tokenize(view); EXPECT_EQ(results->size(), 0); - results = nvtext::tokenize(cudf::strings_column_view(all_empty)); + results = nvtext::tokenize(all_empty); EXPECT_EQ(results->size(), 0); - results = nvtext::tokenize(cudf::strings_column_view(all_null)); + results = nvtext::tokenize(all_null); EXPECT_EQ(results->size(), 0); - results = nvtext::count_tokens(cudf::strings_column_view(strings->view())); + results = nvtext::count_tokens(view); EXPECT_EQ(results->size(), 0); - results = nvtext::count_tokens(cudf::strings_column_view(all_empty)); + results = nvtext::count_tokens(all_empty); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); results = nvtext::count_tokens(cudf::strings_column_view(all_null)); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); - results = nvtext::character_tokenize(cudf::strings_column_view(strings->view())); + results = nvtext::character_tokenize(view); EXPECT_EQ(results->size(), 0); - results = nvtext::character_tokenize(cudf::strings_column_view(all_empty)); + results = nvtext::character_tokenize(all_empty); EXPECT_EQ(results->size(), 0); - results = nvtext::character_tokenize(cudf::strings_column_view(all_null)); + results = nvtext::character_tokenize(all_null); EXPECT_EQ(results->size(), 0); + auto const delimiter = cudf::string_scalar{""}; + results = nvtext::tokenize_with_vocabulary(view, all_empty, delimiter); + EXPECT_EQ(results->size(), 0); + results = nvtext::tokenize_with_vocabulary(all_null, all_empty, delimiter); + EXPECT_EQ(results->size(), results->null_count()); } TEST_F(TextTokenizeTest, Detokenize) @@ -191,3 +201,50 @@ TEST_F(TextTokenizeTest, DetokenizeErrors) EXPECT_THROW(nvtext::detokenize(strings_view, one, cudf::string_scalar("", false)), cudf::logic_error); } + +TEST_F(TextTokenizeTest, Vocabulary) +{ + cudf::test::strings_column_wrapper vocabulary( // leaving out 'cat' on purpose + {"ate", "chased", "cheese", "dog", "fox", "jumped", "mouse", "mousé", "over", "the"}); + auto vocab = nvtext::load_vocabulary(cudf::strings_column_view(vocabulary)); + + auto validity = cudf::test::iterators::null_at(1); + cudf::test::strings_column_wrapper input({"the fox jumped over the dog", + "the dog chased the cat", + "the cat chased the mouse", + "the mousé ate cheese", + "", + ""}, + validity); + auto input_view = cudf::strings_column_view(input); + auto delimiter = cudf::string_scalar(" "); + auto default_id = -7; // should be the token for the missing 'cat' + auto results = nvtext::tokenize_with_vocabulary(input_view, *vocab, delimiter, default_id); + + using LCW = cudf::test::lists_column_wrapper; + // clang-format off + LCW expected({LCW{ 9, 4, 5, 8, 9, 3}, + LCW{ 9, 3, 1, 9,-7}, + LCW{ 9,-7, 1, 9, 6}, + LCW{ 9, 7, 0, 2}, + LCW{}, LCW{}}, + validity); + // clang-format on + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); +} + +TEST_F(TextTokenizeTest, TokenizeErrors) +{ + cudf::test::strings_column_wrapper empty{}; + cudf::strings_column_view view(empty); + EXPECT_THROW(nvtext::load_vocabulary(view), cudf::logic_error); + + cudf::test::strings_column_wrapper vocab_nulls({""}, {0}); + cudf::strings_column_view nulls(vocab_nulls); + EXPECT_THROW(nvtext::load_vocabulary(nulls), cudf::logic_error); + + cudf::test::strings_column_wrapper some{"hello"}; + auto vocab = nvtext::load_vocabulary(cudf::strings_column_view(some)); + EXPECT_THROW(nvtext::tokenize_with_vocabulary(view, *vocab, cudf::string_scalar("", false)), + cudf::logic_error); +} diff --git a/python/cudf/cudf/_lib/cpp/nvtext/tokenize.pxd b/python/cudf/cudf/_lib/cpp/nvtext/tokenize.pxd index 8b80f50e381..3cc3fd6251a 100644 --- a/python/cudf/cudf/_lib/cpp/nvtext/tokenize.pxd +++ b/python/cudf/cudf/_lib/cpp/nvtext/tokenize.pxd @@ -1,10 +1,11 @@ -# Copyright (c) 2020, NVIDIA CORPORATION. +# Copyright (c) 2020-2023, NVIDIA CORPORATION. from libcpp.memory cimport unique_ptr from cudf._lib.cpp.column.column cimport column from cudf._lib.cpp.column.column_view cimport column_view from cudf._lib.cpp.scalar.scalar cimport string_scalar +from cudf._lib.cpp.types cimport size_type cdef extern from "nvtext/tokenize.hpp" namespace "nvtext" nogil: @@ -38,3 +39,17 @@ cdef extern from "nvtext/tokenize.hpp" namespace "nvtext" nogil: const column_view & row_indices, const string_scalar & separator ) except + + + cdef struct tokenize_vocabulary "nvtext::tokenize_vocabulary": + pass + + cdef unique_ptr[tokenize_vocabulary] load_vocabulary( + const column_view & strings + ) except + + + cdef unique_ptr[column] tokenize_with_vocabulary( + const column_view & strings, + const tokenize_vocabulary & vocabulary, + const string_scalar & delimiter, + size_type default_id + ) except + diff --git a/python/cudf/cudf/_lib/nvtext/tokenize.pyx b/python/cudf/cudf/_lib/nvtext/tokenize.pyx index 2bb4fa8e108..bee9d6f6c4d 100644 --- a/python/cudf/cudf/_lib/nvtext/tokenize.pyx +++ b/python/cudf/cudf/_lib/nvtext/tokenize.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2018-2022, NVIDIA CORPORATION. +# Copyright (c) 2018-2023, NVIDIA CORPORATION. from cudf.core.buffer import acquire_spill_lock @@ -12,9 +12,13 @@ from cudf._lib.cpp.nvtext.tokenize cimport ( character_tokenize as cpp_character_tokenize, count_tokens as cpp_count_tokens, detokenize as cpp_detokenize, + load_vocabulary as cpp_load_vocabulary, tokenize as cpp_tokenize, + tokenize_vocabulary as cpp_tokenize_vocabulary, + tokenize_with_vocabulary as cpp_tokenize_with_vocabulary, ) from cudf._lib.cpp.scalar.scalar cimport string_scalar +from cudf._lib.cpp.types cimport size_type from cudf._lib.scalar cimport DeviceScalar @@ -122,3 +126,37 @@ def detokenize(Column strings, Column indices, object py_separator): ) return Column.from_unique_ptr(move(c_result)) + + +cdef class TokenizeVocabulary: + cdef unique_ptr[cpp_tokenize_vocabulary] c_obj + + def __cinit__(self, Column vocab): + cdef column_view c_vocab = vocab.view() + with nogil: + self.c_obj = move(cpp_load_vocabulary(c_vocab)) + + +@acquire_spill_lock() +def tokenize_with_vocabulary(Column strings, + TokenizeVocabulary vocabulary, + object py_delimiter, + size_type default_id): + + cdef DeviceScalar delimiter = py_delimiter.device_value + cdef column_view c_strings = strings.view() + cdef const string_scalar* c_delimiter = delimiter\ + .get_raw_ptr() + cdef unique_ptr[column] c_result + + with nogil: + c_result = move( + cpp_tokenize_with_vocabulary( + c_strings, + vocabulary.c_obj.get()[0], + c_delimiter[0], + default_id + ) + ) + + return Column.from_unique_ptr(move(c_result)) diff --git a/python/cudf/cudf/_lib/strings/__init__.py b/python/cudf/cudf/_lib/strings/__init__.py index 16875e4397e..47a194c4fda 100644 --- a/python/cudf/cudf/_lib/strings/__init__.py +++ b/python/cudf/cudf/_lib/strings/__init__.py @@ -23,6 +23,7 @@ _tokenize_scalar, character_tokenize, detokenize, + tokenize_with_vocabulary, ) from cudf._lib.strings.attributes import ( code_points, diff --git a/python/cudf/cudf/core/tokenize_vocabulary.py b/python/cudf/cudf/core/tokenize_vocabulary.py new file mode 100644 index 00000000000..afb3496311b --- /dev/null +++ b/python/cudf/cudf/core/tokenize_vocabulary.py @@ -0,0 +1,48 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. + +from __future__ import annotations + +import cudf +from cudf._lib.nvtext.tokenize import ( + TokenizeVocabulary as cpp_tokenize_vocabulary, + tokenize_with_vocabulary as cpp_tokenize_with_vocabulary, +) + + +class TokenizeVocabulary: + """ + A vocabulary object used to tokenize input text. + + Parameters + ---------- + vocabulary : str + Strings column of vocabulary terms + """ + + def __init__(self, vocabulary: "cudf.Series"): + self.vocabulary = cpp_tokenize_vocabulary(vocabulary._column) + + def tokenize(self, text, delimiter: str = "", default_id: int = -1): + """ + Parameters + ---------- + text : cudf string series + The strings to be tokenized. + delimiter : str + Delimiter to identify tokens. Default is whitespace. + default_id : int + Value to use for tokens not found in the vocabulary. + Default is -1. + + Returns + ------- + Tokenized strings + """ + if delimiter is None: + delimiter = "" + delim = cudf.Scalar(delimiter, dtype="str") + result = cpp_tokenize_with_vocabulary( + text._column, self.vocabulary, delim, default_id + ) + + return cudf.Series(result) diff --git a/python/cudf/cudf/tests/text/test_text_methods.py b/python/cudf/cudf/tests/text/test_text_methods.py index 8cda15e4acc..2241390a531 100644 --- a/python/cudf/cudf/tests/text/test_text_methods.py +++ b/python/cudf/cudf/tests/text/test_text_methods.py @@ -7,6 +7,7 @@ import pytest import cudf +from cudf.core.tokenize_vocabulary import TokenizeVocabulary from cudf.testing._utils import assert_eq @@ -156,6 +157,64 @@ def test_token_count(delimiter, expected_token_counts): assert_eq(expected, actual, check_dtype=False) +@pytest.mark.parametrize( + "delimiter, input, default_id, results", + [ + ( + "", + "the quick brown fox jumps over the lazy brown dog", + 99, + [0, 1, 2, 3, 4, 5, 0, 99, 2, 6], + ), + ( + " ", + " the sable siamésé cat jumps under the brown sofa ", + -1, + [0, 7, 8, 9, 4, 10, 0, 2, 11], + ), + ( + "_", + "the_quick_brown_fox_jumped__over_the_lazy_brown_dog", + -99, + [0, 1, 2, 3, -99, 5, 0, -99, 2, 6], + ), + ], +) +def test_tokenize_with_vocabulary(delimiter, input, default_id, results): + vocabulary = cudf.Series( + [ + "the", + "quick", + "brown", + "fox", + "jumps", + "over", + "dog", + "sable", + "siamésé", + "cat", + "under", + "sofa", + ] + ) + tokenizer = TokenizeVocabulary(vocabulary) + + strings = cudf.Series([input, None, "", input]) + + expected = cudf.Series( + [ + cudf.Series(results, dtype=np.int32), + None, + cudf.Series([], dtype=np.int32), + cudf.Series(results, dtype=np.int32), + ] + ) + + actual = tokenizer.tokenize(strings, delimiter, default_id) + assert type(expected) == type(actual) + assert_eq(expected, actual) + + def test_normalize_spaces(): strings = cudf.Series( [ From 31e56702fe15f44b3e849207d31d0bb79c307367 Mon Sep 17 00:00:00 2001 From: Karthikeyan <6488848+karthikeyann@users.noreply.github.com> Date: Wed, 27 Sep 2023 09:29:35 +0530 Subject: [PATCH 113/150] Workaround for illegal instruction error in sm90 for warp instrinsics with mask (#14201) Workaround for illegal instruction error in sm90 for warp instrinsics with non `0xffffffff` mask Removed the mask, and used ~0u (`0xffffffff`) as MASK because - all threads in warp has correct data on error since is_within_bounds==true thread update error. - init_state is not required at last iteration only where MASK is not ~0u. Fixes #14183 Authors: - Karthikeyan (https://github.com/karthikeyann) Approvers: - Divye Gala (https://github.com/divyegala) - Elias Stehle (https://github.com/elstehle) - Mark Harris (https://github.com/harrism) URL: https://github.com/rapidsai/cudf/pull/14201 --- cpp/src/io/utilities/data_casting.cu | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/cpp/src/io/utilities/data_casting.cu b/cpp/src/io/utilities/data_casting.cu index d16237d7afe..9e5c5c76392 100644 --- a/cpp/src/io/utilities/data_casting.cu +++ b/cpp/src/io/utilities/data_casting.cu @@ -534,8 +534,7 @@ __global__ void parse_fn_string_parallel(str_tuple_it str_tuples, char_index < cudf::util::round_up_safe(in_end - in_begin, static_cast(BLOCK_SIZE)); char_index += BLOCK_SIZE) { bool const is_within_bounds = char_index < (in_end - in_begin); - auto const MASK = is_warp ? __ballot_sync(0xffffffff, is_within_bounds) : 0xffffffff; - auto const c = is_within_bounds ? in_begin[char_index] : '\0'; + auto const c = is_within_bounds ? in_begin[char_index] : '\0'; auto const prev_c = (char_index > 0 and is_within_bounds) ? in_begin[char_index - 1] : '\0'; auto const escaped_char = get_escape_char(c); @@ -571,7 +570,7 @@ __global__ void parse_fn_string_parallel(str_tuple_it str_tuples, __shared__ typename SlashScan::TempStorage temp_slash[num_warps]; SlashScan(temp_slash[warp_id]).InclusiveScan(curr, scanned, composite_op); is_escaping_backslash = scanned.get(init_state); - init_state = __shfl_sync(MASK, is_escaping_backslash, BLOCK_SIZE - 1); + init_state = __shfl_sync(~0u, is_escaping_backslash, BLOCK_SIZE - 1); __syncwarp(); is_slash.shift(warp_id); is_slash.set_bits(warp_id, is_escaping_backslash); @@ -604,7 +603,7 @@ __global__ void parse_fn_string_parallel(str_tuple_it str_tuples, } // Make sure all threads have no errors before continuing if constexpr (is_warp) { - error = __any_sync(MASK, error); + error = __any_sync(~0u, error); } else { using ErrorReduce = cub::BlockReduce; __shared__ typename ErrorReduce::TempStorage temp_storage_error; @@ -932,13 +931,8 @@ std::unique_ptr parse_data( auto str_tuples = thrust::make_transform_iterator(offset_length_begin, to_string_view_pair{data}); if (col_type == cudf::data_type{cudf::type_id::STRING}) { - return parse_string(str_tuples, - col_size, - std::forward(null_mask), - d_null_count, - options, - stream, - mr); + return parse_string( + str_tuples, col_size, std::move(null_mask), d_null_count, options, stream, mr); } auto out_col = From cdc03a73db880e294f8c4916d942a4568a64d5db Mon Sep 17 00:00:00 2001 From: Lawrence Mitchell Date: Wed, 27 Sep 2023 14:15:33 +0100 Subject: [PATCH 114/150] Marginally reduce memory footprint of joins (#14197) If we drop the gather maps as soon as we are done with them, we have a little more headroom for joins that are close to hitting the device memory limit. Authors: - Lawrence Mitchell (https://github.com/wence-) Approvers: - GALI PREM SAGAR (https://github.com/galipremsagar) - Bradley Dice (https://github.com/bdice) - Matthew Roeschke (https://github.com/mroeschke) URL: https://github.com/rapidsai/cudf/pull/14197 --- python/cudf/cudf/core/join/join.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/cudf/cudf/core/join/join.py b/python/cudf/cudf/core/join/join.py index 6a6e37180ca..b94f8f583f4 100644 --- a/python/cudf/cudf/core/join/join.py +++ b/python/cudf/cudf/core/join/join.py @@ -203,6 +203,7 @@ def perform_merge(self) -> cudf.DataFrame: if left_rows is not None else cudf.DataFrame._from_data({}) ) + del left_rows right_result = ( self.rhs._gather( GatherMap.from_column_unchecked( @@ -213,7 +214,7 @@ def perform_merge(self) -> cudf.DataFrame: if right_rows is not None else cudf.DataFrame._from_data({}) ) - + del right_rows result = cudf.DataFrame._from_data( *self._merge_results(left_result, right_result) ) From ce247961216dd70f389763dc086f137c11ad7346 Mon Sep 17 00:00:00 2001 From: Nghia Truong <7416935+ttnghia@users.noreply.github.com> Date: Wed, 27 Sep 2023 10:10:31 -0700 Subject: [PATCH 115/150] Implement `HISTOGRAM` and `MERGE_HISTOGRAM` aggregations (#14045) This adds two more aggregations for groupby and reduction: * `HISTOGRAM`: Count the number of occurrences (aka frequency) for each element, and * `MERGE_HISTOGRAM`: Merge different outputs generated by `HISTOGRAM` aggregations This is the prerequisite for implementing the exact distributed percentile aggregation (https://github.com/rapidsai/cudf/issues/13885). However, these two new aggregations may be useful in other use-cases that need to do frequency counting. Closes https://github.com/rapidsai/cudf/issues/13885. Merging checklist: * [X] Working prototypes. * [X] Cleanup and docs. * [X] Unit test. * [ ] Test with spark-rapids integration tests. Authors: - Nghia Truong (https://github.com/ttnghia) Approvers: - Robert Maynard (https://github.com/robertmaynard) - Yunsong Wang (https://github.com/PointKernel) - Vukasin Milovanovic (https://github.com/vuule) URL: https://github.com/rapidsai/cudf/pull/14045 --- cpp/CMakeLists.txt | 2 + cpp/include/cudf/aggregation.hpp | 22 +- .../cudf/detail/aggregation/aggregation.hpp | 60 +++ .../cudf/detail/hash_reduce_by_row.cuh | 4 + .../cudf/reduction/detail/histogram.hpp | 57 +++ .../reduction/detail/reduction_functions.hpp | 27 ++ cpp/src/aggregation/aggregation.cpp | 42 ++ cpp/src/groupby/groupby.cu | 10 + cpp/src/groupby/sort/aggregate.cpp | 30 ++ cpp/src/groupby/sort/group_histogram.cu | 152 +++++++ cpp/src/groupby/sort/group_reductions.hpp | 57 ++- cpp/src/reductions/histogram.cu | 273 ++++++++++++ cpp/src/reductions/reductions.cpp | 12 + cpp/tests/CMakeLists.txt | 1 + cpp/tests/groupby/histogram_tests.cpp | 396 ++++++++++++++++++ cpp/tests/reductions/reduction_tests.cpp | 207 +++++++++ 16 files changed, 1349 insertions(+), 3 deletions(-) create mode 100644 cpp/include/cudf/reduction/detail/histogram.hpp create mode 100644 cpp/src/groupby/sort/group_histogram.cu create mode 100644 cpp/src/reductions/histogram.cu create mode 100644 cpp/tests/groupby/histogram_tests.cpp diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index 9656bc40fd7..ec58c391001 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -323,6 +323,7 @@ add_library( src/groupby/sort/group_collect.cu src/groupby/sort/group_correlation.cu src/groupby/sort/group_count.cu + src/groupby/sort/group_histogram.cu src/groupby/sort/group_m2.cu src/groupby/sort/group_max.cu src/groupby/sort/group_min.cu @@ -471,6 +472,7 @@ add_library( src/reductions/all.cu src/reductions/any.cu src/reductions/collect_ops.cu + src/reductions/histogram.cu src/reductions/max.cu src/reductions/mean.cu src/reductions/min.cu diff --git a/cpp/include/cudf/aggregation.hpp b/cpp/include/cudf/aggregation.hpp index d319041f8b1..d458c831f19 100644 --- a/cpp/include/cudf/aggregation.hpp +++ b/cpp/include/cudf/aggregation.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -116,7 +116,9 @@ class aggregation { COVARIANCE, ///< covariance between two sets of elements CORRELATION, ///< correlation between two sets of elements TDIGEST, ///< create a tdigest from a set of input values - MERGE_TDIGEST ///< create a tdigest by merging multiple tdigests together + MERGE_TDIGEST, ///< create a tdigest by merging multiple tdigests together + HISTOGRAM, ///< compute frequency of each element + MERGE_HISTOGRAM ///< merge partial values of HISTOGRAM aggregation, }; aggregation() = delete; @@ -288,6 +290,11 @@ std::unique_ptr make_any_aggregation(); template std::unique_ptr make_all_aggregation(); +/// Factory to create a HISTOGRAM aggregation +/// @return A HISTOGRAM aggregation object +template +std::unique_ptr make_histogram_aggregation(); + /// Factory to create a SUM_OF_SQUARES aggregation /// @return A SUM_OF_SQUARES aggregation object template @@ -610,6 +617,17 @@ std::unique_ptr make_merge_sets_aggregation( template std::unique_ptr make_merge_m2_aggregation(); +/** + * @brief Factory to create a MERGE_HISTOGRAM aggregation + * + * Merges the results of `HISTOGRAM` aggregations on independent sets into a new `HISTOGRAM` value + * equivalent to if a single `HISTOGRAM` aggregation was done across all of the sets at once. + * + * @return A MERGE_HISTOGRAM aggregation object + */ +template +std::unique_ptr make_merge_histogram_aggregation(); + /** * @brief Factory to create a COVARIANCE aggregation * diff --git a/cpp/include/cudf/detail/aggregation/aggregation.hpp b/cpp/include/cudf/detail/aggregation/aggregation.hpp index 4d3984cab93..784f05a964e 100644 --- a/cpp/include/cudf/detail/aggregation/aggregation.hpp +++ b/cpp/include/cudf/detail/aggregation/aggregation.hpp @@ -45,6 +45,8 @@ class simple_aggregations_collector { // Declares the interface for the simple class max_aggregation const& agg); virtual std::vector> visit(data_type col_type, class count_aggregation const& agg); + virtual std::vector> visit(data_type col_type, + class histogram_aggregation const& agg); virtual std::vector> visit(data_type col_type, class any_aggregation const& agg); virtual std::vector> visit(data_type col_type, @@ -89,6 +91,8 @@ class simple_aggregations_collector { // Declares the interface for the simple class merge_sets_aggregation const& agg); virtual std::vector> visit(data_type col_type, class merge_m2_aggregation const& agg); + virtual std::vector> visit( + data_type col_type, class merge_histogram_aggregation const& agg); virtual std::vector> visit(data_type col_type, class covariance_aggregation const& agg); virtual std::vector> visit(data_type col_type, @@ -108,6 +112,7 @@ class aggregation_finalizer { // Declares the interface for the finalizer virtual void visit(class min_aggregation const& agg); virtual void visit(class max_aggregation const& agg); virtual void visit(class count_aggregation const& agg); + virtual void visit(class histogram_aggregation const& agg); virtual void visit(class any_aggregation const& agg); virtual void visit(class all_aggregation const& agg); virtual void visit(class sum_of_squares_aggregation const& agg); @@ -130,6 +135,7 @@ class aggregation_finalizer { // Declares the interface for the finalizer virtual void visit(class merge_lists_aggregation const& agg); virtual void visit(class merge_sets_aggregation const& agg); virtual void visit(class merge_m2_aggregation const& agg); + virtual void visit(class merge_histogram_aggregation const& agg); virtual void visit(class covariance_aggregation const& agg); virtual void visit(class correlation_aggregation const& agg); virtual void visit(class tdigest_aggregation const& agg); @@ -251,6 +257,25 @@ class count_aggregation final : public rolling_aggregation, void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); } }; +/** + * @brief Derived class for specifying a histogram aggregation + */ +class histogram_aggregation final : public groupby_aggregation, public reduce_aggregation { + public: + histogram_aggregation() : aggregation(HISTOGRAM) {} + + [[nodiscard]] std::unique_ptr clone() const override + { + return std::make_unique(*this); + } + std::vector> get_simple_aggregations( + data_type col_type, simple_aggregations_collector& collector) const override + { + return collector.visit(col_type, *this); + } + void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); } +}; + /** * @brief Derived class for specifying an any aggregation */ @@ -972,6 +997,25 @@ class merge_m2_aggregation final : public groupby_aggregation { void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); } }; +/** + * @brief Derived aggregation class for specifying MERGE_HISTOGRAM aggregation + */ +class merge_histogram_aggregation final : public groupby_aggregation, public reduce_aggregation { + public: + explicit merge_histogram_aggregation() : aggregation{MERGE_HISTOGRAM} {} + + [[nodiscard]] std::unique_ptr clone() const override + { + return std::make_unique(*this); + } + std::vector> get_simple_aggregations( + data_type col_type, simple_aggregations_collector& collector) const override + { + return collector.visit(col_type, *this); + } + void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); } +}; + /** * @brief Derived aggregation class for specifying COVARIANCE aggregation */ @@ -1148,6 +1192,12 @@ struct target_type_impl { using type = size_type; }; +// Use list for HISTOGRAM +template +struct target_type_impl { + using type = list_view; +}; + // Computing ANY of any type, use bool accumulator template struct target_type_impl { @@ -1326,6 +1376,12 @@ struct target_type_impl { using type = struct_view; }; +// Use list for MERGE_HISTOGRAM +template +struct target_type_impl { + using type = list_view; +}; + // Always use double for COVARIANCE template struct target_type_impl { @@ -1417,6 +1473,8 @@ CUDF_HOST_DEVICE inline decltype(auto) aggregation_dispatcher(aggregation::Kind return f.template operator()(std::forward(args)...); case aggregation::COUNT_ALL: return f.template operator()(std::forward(args)...); + case aggregation::HISTOGRAM: + return f.template operator()(std::forward(args)...); case aggregation::ANY: return f.template operator()(std::forward(args)...); case aggregation::ALL: @@ -1460,6 +1518,8 @@ CUDF_HOST_DEVICE inline decltype(auto) aggregation_dispatcher(aggregation::Kind return f.template operator()(std::forward(args)...); case aggregation::MERGE_M2: return f.template operator()(std::forward(args)...); + case aggregation::MERGE_HISTOGRAM: + return f.template operator()(std::forward(args)...); case aggregation::COVARIANCE: return f.template operator()(std::forward(args)...); case aggregation::CORRELATION: diff --git a/cpp/include/cudf/detail/hash_reduce_by_row.cuh b/cpp/include/cudf/detail/hash_reduce_by_row.cuh index 2d2b43f1d4a..f63d1922950 100644 --- a/cpp/include/cudf/detail/hash_reduce_by_row.cuh +++ b/cpp/include/cudf/detail/hash_reduce_by_row.cuh @@ -14,12 +14,15 @@ * limitations under the License. */ +#include +#include #include #include #include #include #include +#include #include #include @@ -29,6 +32,7 @@ namespace cudf::detail { +using hash_table_allocator_type = rmm::mr::stream_allocator_adaptor>; using hash_map_type = cuco::static_map; diff --git a/cpp/include/cudf/reduction/detail/histogram.hpp b/cpp/include/cudf/reduction/detail/histogram.hpp new file mode 100644 index 00000000000..97c711fda4e --- /dev/null +++ b/cpp/include/cudf/reduction/detail/histogram.hpp @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include +#include + +#include +#include + +#include +#include + +namespace cudf::reduction::detail { + +/** + * @brief Compute the frequency for each distinct row in the input table. + * + * @param input The input table to compute histogram + * @param partial_counts An optional column containing count for each row + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate memory of the returned objects + * @return A pair of array contains the (stable-order) indices of the distinct rows in the input + * table, and their corresponding distinct counts + */ +[[nodiscard]] std::pair>, std::unique_ptr> +compute_row_frequencies(table_view const& input, + std::optional const& partial_counts, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr); + +/** + * @brief Create an empty histogram column. + * + * A histogram column is a structs column `STRUCT` where T is type of the input + * values. + * + * @returns An empty histogram column + */ +[[nodiscard]] std::unique_ptr make_empty_histogram_like(column_view const& values); + +} // namespace cudf::reduction::detail diff --git a/cpp/include/cudf/reduction/detail/reduction_functions.hpp b/cpp/include/cudf/reduction/detail/reduction_functions.hpp index 014a6ba70eb..704332c8e1d 100644 --- a/cpp/include/cudf/reduction/detail/reduction_functions.hpp +++ b/cpp/include/cudf/reduction/detail/reduction_functions.hpp @@ -131,6 +131,33 @@ std::unique_ptr all(column_view const& col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); +/** + * @brief Compute frequency for each unique element in the input column. + * + * The result histogram is stored in structs column having two children. The first child contains + * unique elements from the input, and the second child contains their corresponding frequencies. + * + * @param input The column to compute histogram + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned scalar's device memory + * @return A list_scalar storing a structs column as the result histogram + */ +std::unique_ptr histogram(column_view const& input, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr); + +/** + * @brief Merge multiple histograms together. + * + * @param input The input given as multiple histograms concatenated together + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned scalar's device memory + * @return A list_scalar storing the result histogram + */ +std::unique_ptr merge_histogram(column_view const& input, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr); + /** * @brief Computes product of elements in input column * diff --git a/cpp/src/aggregation/aggregation.cpp b/cpp/src/aggregation/aggregation.cpp index 2e6a643484e..b3f2a774a60 100644 --- a/cpp/src/aggregation/aggregation.cpp +++ b/cpp/src/aggregation/aggregation.cpp @@ -64,6 +64,12 @@ std::vector> simple_aggregations_collector::visit( return visit(col_type, static_cast(agg)); } +std::vector> simple_aggregations_collector::visit( + data_type col_type, histogram_aggregation const& agg) +{ + return visit(col_type, static_cast(agg)); +} + std::vector> simple_aggregations_collector::visit( data_type col_type, any_aggregation const& agg) { @@ -196,6 +202,12 @@ std::vector> simple_aggregations_collector::visit( return visit(col_type, static_cast(agg)); } +std::vector> simple_aggregations_collector::visit( + data_type col_type, merge_histogram_aggregation const& agg) +{ + return visit(col_type, static_cast(agg)); +} + std::vector> simple_aggregations_collector::visit( data_type col_type, covariance_aggregation const& agg) { @@ -246,6 +258,10 @@ void aggregation_finalizer::visit(count_aggregation const& agg) { visit(static_cast(agg)); } +void aggregation_finalizer::visit(histogram_aggregation const& agg) +{ + visit(static_cast(agg)); +} void aggregation_finalizer::visit(any_aggregation const& agg) { @@ -357,6 +373,11 @@ void aggregation_finalizer::visit(merge_m2_aggregation const& agg) visit(static_cast(agg)); } +void aggregation_finalizer::visit(merge_histogram_aggregation const& agg) +{ + visit(static_cast(agg)); +} + void aggregation_finalizer::visit(covariance_aggregation const& agg) { visit(static_cast(agg)); @@ -460,6 +481,16 @@ template std::unique_ptr make_count_aggregation make_count_aggregation( null_policy null_handling); +/// Factory to create a HISTOGRAM aggregation +template +std::unique_ptr make_histogram_aggregation() +{ + return std::make_unique(); +} +template std::unique_ptr make_histogram_aggregation(); +template std::unique_ptr make_histogram_aggregation(); +template std::unique_ptr make_histogram_aggregation(); + /// Factory to create a ANY aggregation template std::unique_ptr make_any_aggregation() @@ -764,6 +795,17 @@ std::unique_ptr make_merge_m2_aggregation() template std::unique_ptr make_merge_m2_aggregation(); template std::unique_ptr make_merge_m2_aggregation(); +/// Factory to create a MERGE_HISTOGRAM aggregation +template +std::unique_ptr make_merge_histogram_aggregation() +{ + return std::make_unique(); +} +template std::unique_ptr make_merge_histogram_aggregation(); +template std::unique_ptr +make_merge_histogram_aggregation(); +template std::unique_ptr make_merge_histogram_aggregation(); + /// Factory to create a COVARIANCE aggregation template std::unique_ptr make_covariance_aggregation(size_type min_periods, size_type ddof) diff --git a/cpp/src/groupby/groupby.cu b/cpp/src/groupby/groupby.cu index ce1fc71968f..e3c021eb66a 100644 --- a/cpp/src/groupby/groupby.cu +++ b/cpp/src/groupby/groupby.cu @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -110,6 +111,15 @@ struct empty_column_constructor { 0, make_empty_column(type_to_id()), empty_like(values), 0, {}); } + if constexpr (k == aggregation::Kind::HISTOGRAM) { + return make_lists_column(0, + make_empty_column(type_to_id()), + cudf::reduction::detail::make_empty_histogram_like(values), + 0, + {}); + } + if constexpr (k == aggregation::Kind::MERGE_HISTOGRAM) { return empty_like(values); } + if constexpr (k == aggregation::Kind::RANK) { auto const& rank_agg = dynamic_cast(agg); if (rank_agg._method == cudf::rank_method::AVERAGE or diff --git a/cpp/src/groupby/sort/aggregate.cpp b/cpp/src/groupby/sort/aggregate.cpp index 3f977dc81d7..10c271f76f9 100644 --- a/cpp/src/groupby/sort/aggregate.cpp +++ b/cpp/src/groupby/sort/aggregate.cpp @@ -89,6 +89,18 @@ void aggregate_result_functor::operator()(aggregation co detail::group_count_all(helper.group_offsets(stream), helper.num_groups(stream), stream, mr)); } +template <> +void aggregate_result_functor::operator()(aggregation const& agg) +{ + if (cache.has_result(values, agg)) return; + + cache.add_result( + values, + agg, + detail::group_histogram( + get_grouped_values(), helper.group_labels(stream), helper.num_groups(stream), stream, mr)); +} + template <> void aggregate_result_functor::operator()(aggregation const& agg) { @@ -534,6 +546,24 @@ void aggregate_result_functor::operator()(aggregation con get_grouped_values(), helper.group_offsets(stream), helper.num_groups(stream), stream, mr)); } +/** + * @brief Perform merging for multiple histograms that correspond to the same key value. + * + * The partial results input to this aggregation is a structs column that is concatenated from + * multiple outputs of HISTOGRAM aggregations. + */ +template <> +void aggregate_result_functor::operator()(aggregation const& agg) +{ + if (cache.has_result(values, agg)) { return; } + + cache.add_result( + values, + agg, + detail::group_merge_histogram( + get_grouped_values(), helper.group_offsets(stream), helper.num_groups(stream), stream, mr)); +} + /** * @brief Creates column views with only valid elements in both input column views * diff --git a/cpp/src/groupby/sort/group_histogram.cu b/cpp/src/groupby/sort/group_histogram.cu new file mode 100644 index 00000000000..bb70037aaef --- /dev/null +++ b/cpp/src/groupby/sort/group_histogram.cu @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +namespace cudf::groupby::detail { + +namespace { + +std::unique_ptr build_histogram(column_view const& values, + cudf::device_span group_labels, + std::optional const& partial_counts, + size_type num_groups, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + CUDF_EXPECTS(static_cast(values.size()) == group_labels.size(), + "Size of values column should be the same as that of group labels.", + std::invalid_argument); + + // Attach group labels to the input values. + auto const labels_cv = column_view{data_type{type_to_id()}, + static_cast(group_labels.size()), + group_labels.data(), + nullptr, + 0}; + auto const labeled_values = table_view{{labels_cv, values}}; + + // Build histogram for the labeled values. + auto [distinct_indices, distinct_counts] = + cudf::reduction::detail::compute_row_frequencies(labeled_values, partial_counts, stream, mr); + + // Gather the distinct rows for the output histogram. + auto out_table = cudf::detail::gather(labeled_values, + *distinct_indices, + out_of_bounds_policy::DONT_CHECK, + cudf::detail::negative_index_policy::NOT_ALLOWED, + stream, + mr); + + // Build offsets for the output lists column containing output histograms. + // Each list will be a histogram corresponding to one value group. + auto out_offsets = cudf::lists::detail::reconstruct_offsets( + out_table->get_column(0).view(), num_groups, stream, mr); + + std::vector> struct_children; + struct_children.emplace_back(std::move(out_table->release().back())); + struct_children.emplace_back(std::move(distinct_counts)); + auto out_structs = make_structs_column(static_cast(distinct_indices->size()), + std::move(struct_children), + 0, + {}, + stream, + mr); + + return make_lists_column( + num_groups, std::move(out_offsets), std::move(out_structs), 0, {}, stream, mr); +} + +} // namespace + +std::unique_ptr group_histogram(column_view const& values, + cudf::device_span group_labels, + size_type num_groups, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + // Empty group should be handled before reaching here. + CUDF_EXPECTS(num_groups > 0, "Group should not be empty.", std::invalid_argument); + + return build_histogram(values, group_labels, std::nullopt, num_groups, stream, mr); +} + +std::unique_ptr group_merge_histogram(column_view const& values, + cudf::device_span group_offsets, + size_type num_groups, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + // Empty group should be handled before reaching here. + CUDF_EXPECTS(num_groups > 0, "Group should not be empty.", std::invalid_argument); + + // The input must be a lists column without nulls. + CUDF_EXPECTS(!values.has_nulls(), "The input column must not have nulls.", std::invalid_argument); + CUDF_EXPECTS(values.type().id() == type_id::LIST, + "The input of MERGE_HISTOGRAM aggregation must be a lists column.", + std::invalid_argument); + + // Child of the input lists column must be a structs column without nulls, + // and its second child is a columns of integer type having no nulls. + auto const lists_cv = lists_column_view{values}; + auto const histogram_cv = lists_cv.get_sliced_child(stream); + CUDF_EXPECTS(!histogram_cv.has_nulls(), + "Child of the input lists column must not have nulls.", + std::invalid_argument); + CUDF_EXPECTS(histogram_cv.type().id() == type_id::STRUCT && histogram_cv.num_children() == 2, + "The input column has invalid histograms structure.", + std::invalid_argument); + CUDF_EXPECTS( + cudf::is_integral(histogram_cv.child(1).type()) && !histogram_cv.child(1).has_nulls(), + "The input column has invalid histograms structure.", + std::invalid_argument); + + // Concatenate the histograms corresponding to the same key values. + // That is equivalent to creating a new lists column (view) from the input lists column + // with new offsets gathered as below. + auto new_offsets = rmm::device_uvector(num_groups + 1, stream); + thrust::gather(rmm::exec_policy(stream), + group_offsets.begin(), + group_offsets.end(), + lists_cv.offsets_begin(), + new_offsets.begin()); + + // Generate labels for the new lists. + auto key_labels = rmm::device_uvector(histogram_cv.size(), stream); + cudf::detail::label_segments( + new_offsets.begin(), new_offsets.end(), key_labels.begin(), key_labels.end(), stream); + + auto const structs_cv = structs_column_view{histogram_cv}; + auto const input_values = structs_cv.get_sliced_child(0, stream); + auto const input_counts = structs_cv.get_sliced_child(1, stream); + + return build_histogram(input_values, key_labels, input_counts, num_groups, stream, mr); +} + +} // namespace cudf::groupby::detail diff --git a/cpp/src/groupby/sort/group_reductions.hpp b/cpp/src/groupby/sort/group_reductions.hpp index fc24b679db5..3aa79f226a3 100644 --- a/cpp/src/groupby/sort/group_reductions.hpp +++ b/cpp/src/groupby/sort/group_reductions.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -216,6 +216,33 @@ std::unique_ptr group_count_all(cudf::device_span group size_type num_groups, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); +/** + * @brief Internal API to compute histogram for each group in @p values. + * + * The returned column is a lists column, each list corresponds to one input group and stores the + * histogram of the distinct elements in that group in the form of `STRUCT`. + * + * Note that the order of distinct elements in each output list is not specified. + * + * @code{.pseudo} + * values = [2, 1, 1, 3, 5, 2, 2, 3, 1, 4] + * group_labels = [0, 0, 0, 1, 1, 1, 1, 1, 2, 2] + * num_groups = 3 + * + * output = [[<1, 2>, <2, 1>], [<2, 2>, <3, 2>, <5, 1>], [<1, 1>, <4, 1>]] + * @endcode + * + * @param values Grouped values to compute histogram + * @param group_labels ID of group that the corresponding value belongs to + * @param num_groups Number of groups + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + */ +std::unique_ptr group_histogram(column_view const& values, + cudf::device_span group_labels, + size_type num_groups, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr); /** * @brief Internal API to calculate sum of squares of differences from means. @@ -441,6 +468,34 @@ std::unique_ptr group_merge_m2(column_view const& values, size_type num_groups, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); + +/** + * @brief Internal API to merge multiple output of HISTOGRAM aggregation. + * + * The input values column should be given as a lists column in the form of + * `LIST>`. + * After merging, the order of distinct elements in each output list is not specified. + * + * @code{.pseudo} + * values = [ [<1, 2>, <2, 1>], [<2, 2>], [<3, 2>, <2, 1>], [<1, 1>, <2, 1>] ] + * group_offsets = [ 0, 2, 4] + * num_groups = 2 + * + * output = [[<1, 2>, <2, 3>], [<1, 1>, <2, 2>, <3, 2>]]] + * @endcode + * + * @param values Grouped values to get valid count of + * @param group_offsets Offsets of groups' starting points within @p values + * @param num_groups Number of groups + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + */ +std::unique_ptr group_merge_histogram(column_view const& values, + cudf::device_span group_offsets, + size_type num_groups, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr); + /** * @brief Internal API to find covariance of child columns of a non-nullable struct column. * diff --git a/cpp/src/reductions/histogram.cu b/cpp/src/reductions/histogram.cu new file mode 100644 index 00000000000..fa84bbeb25d --- /dev/null +++ b/cpp/src/reductions/histogram.cu @@ -0,0 +1,273 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include + +namespace cudf::reduction::detail { + +namespace { + +// Always use 64-bit signed integer for storing count. +using histogram_count_type = int64_t; + +/** + * @brief The functor to accumulate the frequency of each distinct rows in the input table. + */ +template +struct reduce_fn : cudf::detail::reduce_by_row_fn_base { + CountType const* d_partial_output; + + reduce_fn(MapView const& d_map, + KeyHasher const& d_hasher, + KeyEqual const& d_equal, + CountType* const d_output, + CountType const* const d_partial_output) + : cudf::detail::reduce_by_row_fn_base{d_map, + d_hasher, + d_equal, + d_output}, + d_partial_output{d_partial_output} + { + } + + // Count the number of rows in each group of rows that are compared equal. + __device__ void operator()(size_type const idx) const + { + auto const increment = d_partial_output ? d_partial_output[idx] : CountType{1}; + auto const count = + cuda::atomic_ref(*this->get_output_ptr(idx)); + count.fetch_add(increment, cuda::std::memory_order_relaxed); + } +}; + +/** + * @brief The builder to construct an instance of `reduce_fn` functor. + */ +template +struct reduce_func_builder { + CountType const* const d_partial_output; + + reduce_func_builder(CountType const* const d_partial_output) : d_partial_output{d_partial_output} + { + } + + template + auto build(MapView const& d_map, + KeyHasher const& d_hasher, + KeyEqual const& d_equal, + CountType* const d_output) + { + return reduce_fn{ + d_map, d_hasher, d_equal, d_output, d_partial_output}; + } +}; + +/** + * @brief Specialized functor to check for not-zero of the second component of the input. + */ +struct is_not_zero { + template + __device__ bool operator()(Pair const input) const + { + return thrust::get<1>(input) != 0; + } +}; + +/** + * @brief Building a histogram by gathering distinct rows from the input table and their + * corresponding distinct counts. + * + * @param input The input table + * @param distinct_indices Indices of the distinct rows + * @param distinct_counts Distinct counts corresponding to the distinct rows + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned object's device memory + * @return A list_scalar storing the output histogram + */ +auto gather_histogram(table_view const& input, + device_span distinct_indices, + std::unique_ptr&& distinct_counts, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + auto distinct_rows = cudf::detail::gather(input, + distinct_indices, + out_of_bounds_policy::DONT_CHECK, + cudf::detail::negative_index_policy::NOT_ALLOWED, + stream, + mr); + + std::vector> struct_children; + struct_children.emplace_back(std::move(distinct_rows->release().front())); + struct_children.emplace_back(std::move(distinct_counts)); + auto output_structs = make_structs_column( + static_cast(distinct_indices.size()), std::move(struct_children), 0, {}, stream, mr); + + return std::make_unique( + std::move(*output_structs.release()), true, stream, mr); +} + +} // namespace + +std::unique_ptr make_empty_histogram_like(column_view const& values) +{ + std::vector> struct_children; + struct_children.emplace_back(empty_like(values)); + struct_children.emplace_back(make_numeric_column(data_type{type_id::INT64}, 0)); + return std::make_unique(data_type{type_id::STRUCT}, + 0, + rmm::device_buffer{}, + rmm::device_buffer{}, + 0, + std::move(struct_children)); +} + +std::pair>, std::unique_ptr> +compute_row_frequencies(table_view const& input, + std::optional const& partial_counts, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + auto const has_nested_columns = cudf::detail::has_nested_columns(input); + + // Nested types are not tested, thus we just throw exception if we see such input for now. + // We should remove this check after having enough tests. + CUDF_EXPECTS(!has_nested_columns, + "Nested types are not yet supported in histogram aggregation.", + std::invalid_argument); + + auto map = cudf::detail::hash_map_type{ + compute_hash_table_size(input.num_rows()), + cuco::empty_key{-1}, + cuco::empty_value{std::numeric_limits::min()}, + cudf::detail::hash_table_allocator_type{default_allocator{}, stream}, + stream.value()}; + + auto const preprocessed_input = + cudf::experimental::row::hash::preprocessed_table::create(input, stream); + auto const has_nulls = nullate::DYNAMIC{cudf::has_nested_nulls(input)}; + + auto const row_hasher = cudf::experimental::row::hash::row_hasher(preprocessed_input); + auto const key_hasher = row_hasher.device_hasher(has_nulls); + auto const row_comp = cudf::experimental::row::equality::self_comparator(preprocessed_input); + + auto const pair_iter = cudf::detail::make_counting_transform_iterator( + size_type{0}, [] __device__(size_type const i) { return cuco::make_pair(i, i); }); + + // Always compare NaNs as equal. + using nan_equal_comparator = + cudf::experimental::row::equality::nan_equal_physical_equality_comparator; + auto const value_comp = nan_equal_comparator{}; + + if (has_nested_columns) { + auto const key_equal = row_comp.equal_to(has_nulls, null_equality::EQUAL, value_comp); + map.insert(pair_iter, pair_iter + input.num_rows(), key_hasher, key_equal, stream.value()); + } else { + auto const key_equal = row_comp.equal_to(has_nulls, null_equality::EQUAL, value_comp); + map.insert(pair_iter, pair_iter + input.num_rows(), key_hasher, key_equal, stream.value()); + } + + // Gather the indices of distinct rows. + auto distinct_indices = std::make_unique>( + static_cast(map.get_size()), stream, mr); + + // Store the number of occurrences of each distinct row. + auto distinct_counts = make_numeric_column(data_type{type_to_id()}, + static_cast(map.get_size()), + mask_state::UNALLOCATED, + stream, + mr); + + // Compute frequencies (aka distinct counts) for the input rows. + // Note that we consider null and NaNs as always equal. + auto const reduction_results = cudf::detail::hash_reduce_by_row( + map, + preprocessed_input, + input.num_rows(), + has_nulls, + has_nested_columns, + null_equality::EQUAL, + nan_equality::ALL_EQUAL, + reduce_func_builder{ + partial_counts ? partial_counts.value().begin() : nullptr}, + histogram_count_type{0}, + stream, + rmm::mr::get_current_device_resource()); + + auto const input_it = thrust::make_zip_iterator( + thrust::make_tuple(thrust::make_counting_iterator(0), reduction_results.begin())); + auto const output_it = thrust::make_zip_iterator(thrust::make_tuple( + distinct_indices->begin(), distinct_counts->mutable_view().begin())); + + // Reduction results above are either group sizes of equal rows, or `0`. + // The final output is non-zero group sizes only. + thrust::copy_if( + rmm::exec_policy(stream), input_it, input_it + input.num_rows(), output_it, is_not_zero{}); + + return {std::move(distinct_indices), std::move(distinct_counts)}; +} + +std::unique_ptr histogram(column_view const& input, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + // Empty group should be handled before reaching here. + CUDF_EXPECTS(input.size() > 0, "Input should not be empty.", std::invalid_argument); + + auto const input_tv = table_view{{input}}; + auto [distinct_indices, distinct_counts] = + compute_row_frequencies(input_tv, std::nullopt, stream, mr); + return gather_histogram(input_tv, *distinct_indices, std::move(distinct_counts), stream, mr); +} + +std::unique_ptr merge_histogram(column_view const& input, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + // Empty group should be handled before reaching here. + CUDF_EXPECTS(input.size() > 0, "Input should not be empty.", std::invalid_argument); + CUDF_EXPECTS(!input.has_nulls(), "The input column must not have nulls.", std::invalid_argument); + CUDF_EXPECTS(input.type().id() == type_id::STRUCT && input.num_children() == 2, + "The input must be a structs column having two children.", + std::invalid_argument); + CUDF_EXPECTS(cudf::is_integral(input.child(1).type()) && !input.child(1).has_nulls(), + "The second child of the input column must be of integral type and without nulls.", + std::invalid_argument); + + auto const structs_cv = structs_column_view{input}; + auto const input_values = structs_cv.get_sliced_child(0, stream); + auto const input_counts = structs_cv.get_sliced_child(1, stream); + + auto const values_tv = table_view{{input_values}}; + auto [distinct_indices, distinct_counts] = + compute_row_frequencies(values_tv, input_counts, stream, mr); + return gather_histogram(values_tv, *distinct_indices, std::move(distinct_counts), stream, mr); +} + +} // namespace cudf::reduction::detail diff --git a/cpp/src/reductions/reductions.cpp b/cpp/src/reductions/reductions.cpp index 2fef8aa8785..23171baaa45 100644 --- a/cpp/src/reductions/reductions.cpp +++ b/cpp/src/reductions/reductions.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -59,6 +60,8 @@ struct reduce_dispatch_functor { case aggregation::MAX: return max(col, output_dtype, init, stream, mr); case aggregation::ANY: return any(col, output_dtype, init, stream, mr); case aggregation::ALL: return all(col, output_dtype, init, stream, mr); + case aggregation::HISTOGRAM: return histogram(col, stream, mr); + case aggregation::MERGE_HISTOGRAM: return merge_histogram(col, stream, mr); case aggregation::SUM_OF_SQUARES: return sum_of_squares(col, output_dtype, stream, mr); case aggregation::MEAN: return mean(col, output_dtype, stream, mr); case aggregation::VARIANCE: { @@ -165,6 +168,15 @@ std::unique_ptr reduce(column_view const& col, return tdigest::detail::make_empty_tdigest_scalar(stream, mr); } + if (agg.kind == aggregation::HISTOGRAM) { + return std::make_unique( + std::move(*reduction::detail::make_empty_histogram_like(col)), true, stream, mr); + } + if (agg.kind == aggregation::MERGE_HISTOGRAM) { + return std::make_unique( + std::move(*reduction::detail::make_empty_histogram_like(col.child(0))), true, stream, mr); + } + if (output_dtype.id() == type_id::LIST) { if (col.type() == output_dtype) { return make_empty_scalar_like(col, stream, mr); } // Under some circumstance, the output type will become the List of input type, diff --git a/cpp/tests/CMakeLists.txt b/cpp/tests/CMakeLists.txt index 68ff6c54c99..04939f3cd6d 100644 --- a/cpp/tests/CMakeLists.txt +++ b/cpp/tests/CMakeLists.txt @@ -116,6 +116,7 @@ ConfigureTest( groupby/covariance_tests.cpp groupby/groupby_test_util.cpp groupby/groups_tests.cpp + groupby/histogram_tests.cpp groupby/keys_tests.cpp groupby/lists_tests.cpp groupby/m2_tests.cpp diff --git a/cpp/tests/groupby/histogram_tests.cpp b/cpp/tests/groupby/histogram_tests.cpp new file mode 100644 index 00000000000..c5833f40cf2 --- /dev/null +++ b/cpp/tests/groupby/histogram_tests.cpp @@ -0,0 +1,396 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +using int32s_col = cudf::test::fixed_width_column_wrapper; +using int64s_col = cudf::test::fixed_width_column_wrapper; +using structs_col = cudf::test::structs_column_wrapper; + +auto groupby_histogram(cudf::column_view const& keys, + cudf::column_view const& values, + cudf::aggregation::Kind agg_kind) +{ + CUDF_EXPECTS( + agg_kind == cudf::aggregation::HISTOGRAM || agg_kind == cudf::aggregation::MERGE_HISTOGRAM, + "Aggregation must be either HISTOGRAM or MERGE_HISTOGRAM."); + + std::vector requests; + requests.emplace_back(); + requests[0].values = values; + if (agg_kind == cudf::aggregation::HISTOGRAM) { + requests[0].aggregations.push_back( + cudf::make_histogram_aggregation()); + } else { + requests[0].aggregations.push_back( + cudf::make_merge_histogram_aggregation()); + } + + auto gb_obj = cudf::groupby::groupby(cudf::table_view({keys})); + auto const agg_results = gb_obj.aggregate(requests, cudf::test::get_default_stream()); + auto const agg_histogram = agg_results.second[0].results[0]->view(); + EXPECT_EQ(agg_histogram.type().id(), cudf::type_id::LIST); + EXPECT_EQ(agg_histogram.null_count(), 0); + + auto const histograms = cudf::lists_column_view{agg_histogram}.child(); + EXPECT_EQ(histograms.num_children(), 2); + EXPECT_EQ(histograms.null_count(), 0); + EXPECT_EQ(histograms.child(1).null_count(), 0); + + auto const key_sort_order = cudf::sorted_order(agg_results.first->view(), {}, {}); + auto sorted_keys = + std::move(cudf::gather(agg_results.first->view(), *key_sort_order)->release().front()); + auto const sorted_vals = + std::move(cudf::gather(cudf::table_view{{agg_histogram}}, *key_sort_order)->release().front()); + auto sorted_histograms = cudf::lists::sort_lists(cudf::lists_column_view{*sorted_vals}, + cudf::order::ASCENDING, + cudf::null_order::BEFORE, + rmm::mr::get_current_device_resource()); + + return std::pair{std::move(sorted_keys), std::move(sorted_histograms)}; +} + +template +struct GroupbyHistogramTest : public cudf::test::BaseFixture {}; + +template +struct GroupbyMergeHistogramTest : public cudf::test::BaseFixture {}; + +// Avoid unsigned types, as the tests below have negative values in their input. +using HistogramTestTypes = cudf::test::Concat, + cudf::test::FloatingPointTypes, + cudf::test::FixedPointTypes, + cudf::test::ChronoTypes>; +TYPED_TEST_SUITE(GroupbyHistogramTest, HistogramTestTypes); +TYPED_TEST_SUITE(GroupbyMergeHistogramTest, HistogramTestTypes); + +TYPED_TEST(GroupbyHistogramTest, EmptyInput) +{ + using col_data = cudf::test::fixed_width_column_wrapper; + + auto const keys = int32s_col{}; + auto const values = col_data{}; + auto const [res_keys, res_histogram] = + groupby_histogram(keys, values, cudf::aggregation::HISTOGRAM); + + // The structure of the output is already verified in the function `groupby_histogram`. + ASSERT_EQ(res_histogram->size(), 0); +} + +TYPED_TEST(GroupbyHistogramTest, SimpleInputNoNull) +{ + using col_data = cudf::test::fixed_width_column_wrapper; + + // key = 0: values = [2, 2, -3, -2, 2] + // key = 1: values = [2, 0, 5, 2, 1] + // key = 2: values = [-3, 1, 1, 2, 2] + auto const keys = int32s_col{2, 0, 2, 1, 1, 1, 0, 0, 0, 1, 2, 2, 1, 0, 2}; + auto const values = col_data{-3, 2, 1, 2, 0, 5, 2, -3, -2, 2, 1, 2, 1, 2, 2}; + + auto const expected_keys = int32s_col{0, 1, 2}; + auto const expected_histogram = [] { + auto structs = [] { + auto values = col_data{-3, -2, 2, 0, 1, 2, 5, -3, 1, 2}; + auto counts = int64s_col{1, 1, 3, 1, 1, 2, 1, 1, 2, 2}; + return structs_col{{values, counts}}; + }(); + return cudf::make_lists_column( + 3, int32s_col{0, 3, 7, 10}.release(), structs.release(), 0, rmm::device_buffer{}); + }(); + + auto const [res_keys, res_histogram] = + groupby_histogram(keys, values, cudf::aggregation::HISTOGRAM); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected_keys, *res_keys); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*expected_histogram, *res_histogram); +} + +TYPED_TEST(GroupbyHistogramTest, SlicedInputNoNull) +{ + using col_data = cudf::test::fixed_width_column_wrapper; + + auto const keys_original = int32s_col{2, 0, 2, 1, 0, 2, 0, 2, 1, 1, 1, 0, 0, 0, 1, 2, 2, 1, 0, 2}; + auto const values_original = + col_data{1, 2, 0, 2, 1, -3, 2, 1, 2, 0, 5, 2, -3, -2, 2, 1, 2, 1, 2, 2}; + // key = 0: values = [2, 2, -3, -2, 2] + // key = 1: values = [2, 0, 5, 2, 1] + // key = 2: values = [-3, 1, 1, 2, 2] + auto const keys = cudf::slice(keys_original, {5, 20})[0]; + auto const values = cudf::slice(values_original, {5, 20})[0]; + + auto const expected_keys = int32s_col{0, 1, 2}; + auto const expected_histogram = [] { + auto structs = [] { + auto values = col_data{-3, -2, 2, 0, 1, 2, 5, -3, 1, 2}; + auto counts = int64s_col{1, 1, 3, 1, 1, 2, 1, 1, 2, 2}; + return structs_col{{values, counts}}; + }(); + return cudf::make_lists_column( + 3, int32s_col{0, 3, 7, 10}.release(), structs.release(), 0, rmm::device_buffer{}); + }(); + + auto const [res_keys, res_histogram] = + groupby_histogram(keys, values, cudf::aggregation::HISTOGRAM); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected_keys, *res_keys); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*expected_histogram, *res_histogram); +} + +TYPED_TEST(GroupbyHistogramTest, InputWithNulls) +{ + using col_data = cudf::test::fixed_width_column_wrapper; + using namespace cudf::test::iterators; + auto constexpr null{0}; + + // key = 0: values = [-3, null, 2, null, 2] + // key = 1: values = [1, 2, null, 5, 2, -3, 1, 1] + // key = 2: values = [null, 2, 0, -2, 2, null, 2] + auto const keys = int32s_col{2, 0, 2, 1, 1, 1, 2, 1, 1, 0, 1, 2, 0, 0, 1, 2, 2, 1, 0, 2}; + auto const values = + col_data{{null, -3, 2, 1, 2, null, 0, 5, 2, null, -3, -2, 2, null, 1, 2, null, 1, 2, 2}, + nulls_at({0, 5, 9, 13, 16})}; + + auto const expected_keys = int32s_col{0, 1, 2}; + auto const expected_histogram = [] { + auto structs = [] { + auto values = col_data{{null, -3, 2, null, -3, 1, 2, 5, null, -2, 0, 2}, nulls_at({0, 3, 8})}; + auto counts = int64s_col{2, 1, 2, 1, 1, 3, 2, 1, 2, 1, 1, 3}; + return structs_col{{values, counts}}; + }(); + return cudf::make_lists_column( + 3, int32s_col{0, 3, 8, 12}.release(), structs.release(), 0, rmm::device_buffer{}); + }(); + + auto const [res_keys, res_histogram] = + groupby_histogram(keys, values, cudf::aggregation::HISTOGRAM); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected_keys, *res_keys); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*expected_histogram, *res_histogram); +} + +TYPED_TEST(GroupbyHistogramTest, SlicedInputWithNulls) +{ + using col_data = cudf::test::fixed_width_column_wrapper; + using namespace cudf::test::iterators; + auto constexpr null{0}; + + auto const keys_original = + int32s_col{1, 0, 2, 2, 0, 2, 0, 2, 1, 1, 1, 2, 1, 1, 0, 1, 2, 0, 0, 1, 2, 2, 1, 0, 2, 0, 1, 2}; + auto const values_original = + col_data{{null, 1, 1, 2, 1, null, -3, 2, 1, 2, null, 0, 5, 2, + null, -3, -2, 2, null, 1, 2, null, 1, 2, 2, null, 1, 2}, + nulls_at({0, 5, 10, 14, 18, 21, 25})}; + + // key = 0: values = [-3, null, 2, null, 2] + // key = 1: values = [1, 2, null, 5, 2, -3, 1, 1] + // key = 2: values = [null, 2, 0, -2, 2, null, 2] + auto const keys = cudf::slice(keys_original, {5, 25})[0]; + auto const values = cudf::slice(values_original, {5, 25})[0]; + + auto const expected_keys = int32s_col{0, 1, 2}; + auto const expected_histogram = [] { + auto structs = [] { + auto values = col_data{{null, -3, 2, null, -3, 1, 2, 5, null, -2, 0, 2}, nulls_at({0, 3, 8})}; + auto counts = int64s_col{2, 1, 2, 1, 1, 3, 2, 1, 2, 1, 1, 3}; + return structs_col{{values, counts}}; + }(); + return cudf::make_lists_column( + 3, int32s_col{0, 3, 8, 12}.release(), structs.release(), 0, rmm::device_buffer{}); + }(); + + auto const [res_keys, res_histogram] = + groupby_histogram(keys, values, cudf::aggregation::HISTOGRAM); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected_keys, *res_keys); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*expected_histogram, *res_histogram); +} + +TYPED_TEST(GroupbyMergeHistogramTest, EmptyInput) +{ + using col_data = cudf::test::fixed_width_column_wrapper; + + auto const keys = int32s_col{}; + auto const values = [] { + auto structs = [] { + auto values = col_data{}; + auto counts = int64s_col{}; + return structs_col{{values, counts}}; + }(); + return cudf::make_lists_column( + 0, int32s_col{}.release(), structs.release(), 0, rmm::device_buffer{}); + }(); + auto const [res_keys, res_histogram] = + groupby_histogram(keys, *values, cudf::aggregation::MERGE_HISTOGRAM); + + // The structure of the output is already verified in the function `groupby_histogram`. + ASSERT_EQ(res_histogram->size(), 0); +} + +TYPED_TEST(GroupbyMergeHistogramTest, SimpleInputNoNull) +{ + using col_data = cudf::test::fixed_width_column_wrapper; + + // key = 0: histograms = [[<-3, 1>, <-2, 1>, <2, 3>], [<0, 1>, <1, 1>], [<-3, 3>, <0, 1>, <1, 2>]] + // key = 1: histograms = [[<-2, 1>, <1, 3>, <2, 2>], [<0, 2>, <1, 1>, <2, 2>]] + auto const keys = int32s_col{0, 1, 0, 1, 0}; + auto const values = [] { + auto structs = [] { + auto values = col_data{-3, -2, 2, -2, 1, 2, 0, 1, 0, 1, 2, -3, 0, 1}; + auto counts = int64s_col{1, 1, 3, 1, 3, 2, 1, 1, 2, 1, 2, 3, 1, 2}; + return structs_col{{values, counts}}; + }(); + return cudf::make_lists_column( + 5, int32s_col{0, 3, 6, 8, 11, 14}.release(), structs.release(), 0, rmm::device_buffer{}); + }(); + + auto const expected_keys = int32s_col{0, 1}; + auto const expected_histogram = [] { + auto structs = [] { + auto values = col_data{-3, -2, 0, 1, 2, -2, 0, 1, 2}; + auto counts = int64s_col{4, 1, 2, 3, 3, 1, 2, 4, 4}; + return structs_col{{values, counts}}; + }(); + return cudf::make_lists_column( + 2, int32s_col{0, 5, 9}.release(), structs.release(), 0, rmm::device_buffer{}); + }(); + + auto const [res_keys, res_histogram] = + groupby_histogram(keys, *values, cudf::aggregation::MERGE_HISTOGRAM); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected_keys, *res_keys); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*expected_histogram, *res_histogram); +} + +TYPED_TEST(GroupbyMergeHistogramTest, SlicedInputNoNull) +{ + using col_data = cudf::test::fixed_width_column_wrapper; + + // key = 0: histograms = [[<-3, 1>, <-2, 1>, <2, 3>], [<0, 1>, <1, 1>], [<-3, 3>, <0, 1>, <1, 2>]] + // key = 1: histograms = [[<-2, 1>, <1, 3>, <2, 2>], [<0, 2>, <1, 1>, <2, 2>]] + auto const keys_original = int32s_col{0, 1, 0, 1, 0, 1, 0}; + auto const values_original = [] { + auto structs = [] { + auto values = col_data{0, 2, -3, 1, -3, -2, 2, -2, 1, 2, 0, 1, 0, 1, 2, -3, 0, 1}; + auto counts = int64s_col{1, 2, 3, 1, 1, 1, 3, 1, 3, 2, 1, 1, 2, 1, 2, 3, 1, 2}; + return structs_col{{values, counts}}; + }(); + return cudf::make_lists_column(7, + int32s_col{0, 2, 4, 7, 10, 12, 15, 18}.release(), + structs.release(), + 0, + rmm::device_buffer{}); + }(); + auto const keys = cudf::slice(keys_original, {2, 7})[0]; + auto const values = cudf::slice(*values_original, {2, 7})[0]; + + auto const expected_keys = int32s_col{0, 1}; + auto const expected_histogram = [] { + auto structs = [] { + auto values = col_data{-3, -2, 0, 1, 2, -2, 0, 1, 2}; + auto counts = int64s_col{4, 1, 2, 3, 3, 1, 2, 4, 4}; + return structs_col{{values, counts}}; + }(); + return cudf::make_lists_column( + 2, int32s_col{0, 5, 9}.release(), structs.release(), 0, rmm::device_buffer{}); + }(); + + auto const [res_keys, res_histogram] = + groupby_histogram(keys, values, cudf::aggregation::MERGE_HISTOGRAM); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected_keys, *res_keys); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*expected_histogram, *res_histogram); +} + +TYPED_TEST(GroupbyMergeHistogramTest, InputWithNulls) +{ + using col_data = cudf::test::fixed_width_column_wrapper; + using namespace cudf::test::iterators; + auto constexpr null{0}; + + // key = 0: histograms = [[, <2, 3>], [, <1, 1>], [<0, 1>, <1, 2>]] + // key = 1: histograms = [[, <1, 3>, <2, 2>], [<0, 2>, <1, 1>, <2, 2>]] + auto const keys = int32s_col{0, 1, 1, 0, 0}; + auto const values = [] { + auto structs = [] { + auto values = col_data{{null, 2, null, 1, 2, 0, 1, 2, null, 1, 0, 1}, nulls_at({0, 2, 8})}; + auto counts = int64s_col{1, 3, 1, 3, 2, 2, 1, 2, 2, 1, 1, 2}; + return structs_col{{values, counts}}; + }(); + return cudf::make_lists_column( + 5, int32s_col{0, 2, 5, 8, 10, 12}.release(), structs.release(), 0, rmm::device_buffer{}); + }(); + + auto const expected_keys = int32s_col{0, 1}; + auto const expected_histogram = [] { + auto structs = [] { + auto values = col_data{{null, 0, 1, 2, null, 0, 1, 2}, nulls_at({0, 4})}; + auto counts = int64s_col{3, 1, 3, 3, 1, 2, 4, 4}; + return structs_col{{values, counts}}; + }(); + return cudf::make_lists_column( + 2, int32s_col{0, 4, 8}.release(), structs.release(), 0, rmm::device_buffer{}); + }(); + + auto const [res_keys, res_histogram] = + groupby_histogram(keys, *values, cudf::aggregation::MERGE_HISTOGRAM); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected_keys, *res_keys); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*expected_histogram, *res_histogram); +} + +TYPED_TEST(GroupbyMergeHistogramTest, SlicedInputWithNulls) +{ + using col_data = cudf::test::fixed_width_column_wrapper; + using namespace cudf::test::iterators; + auto constexpr null{0}; + + // key = 0: histograms = [[, <2, 3>], [, <1, 1>], [<0, 1>, <1, 2>]] + // key = 1: histograms = [[, <1, 3>, <2, 2>], [<0, 2>, <1, 1>, <2, 2>]] + auto const keys_original = int32s_col{0, 1, 0, 1, 1, 0, 0}; + auto const values_original = [] { + auto structs = [] { + auto values = col_data{{null, 2, null, 1, null, 2, null, 1, 2, 0, 1, 2, null, 1, 0, 1}, + nulls_at({0, 2, 4, 6, 12})}; + auto counts = int64s_col{1, 3, 2, 1, 1, 3, 1, 3, 2, 2, 1, 2, 2, 1, 1, 2}; + return structs_col{{values, counts}}; + }(); + return cudf::make_lists_column(7, + int32s_col{0, 2, 4, 6, 9, 12, 14, 16}.release(), + structs.release(), + 0, + rmm::device_buffer{}); + }(); + auto const keys = cudf::slice(keys_original, {2, 7})[0]; + auto const values = cudf::slice(*values_original, {2, 7})[0]; + + auto const expected_keys = int32s_col{0, 1}; + auto const expected_histogram = [] { + auto structs = [] { + auto values = col_data{{null, 0, 1, 2, null, 0, 1, 2}, nulls_at({0, 4})}; + auto counts = int64s_col{3, 1, 3, 3, 1, 2, 4, 4}; + return structs_col{{values, counts}}; + }(); + return cudf::make_lists_column( + 2, int32s_col{0, 4, 8}.release(), structs.release(), 0, rmm::device_buffer{}); + }(); + + auto const [res_keys, res_histogram] = + groupby_histogram(keys, values, cudf::aggregation::MERGE_HISTOGRAM); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected_keys, *res_keys); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*expected_histogram, *res_histogram); +} diff --git a/cpp/tests/reductions/reduction_tests.cpp b/cpp/tests/reductions/reduction_tests.cpp index 2561f3f9886..7644ac48892 100644 --- a/cpp/tests/reductions/reduction_tests.cpp +++ b/cpp/tests/reductions/reduction_tests.cpp @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -379,6 +380,212 @@ TYPED_TEST(ReductionTest, SumOfSquare) expected_null_value); } +auto histogram_reduction(cudf::column_view const& input, + std::unique_ptr const& agg) +{ + CUDF_EXPECTS( + agg->kind == cudf::aggregation::HISTOGRAM || agg->kind == cudf::aggregation::MERGE_HISTOGRAM, + "Aggregation must be either HISTOGRAM or MERGE_HISTOGRAM."); + + auto const result_scalar = cudf::reduce(input, *agg, cudf::data_type{cudf::type_id::INT64}); + EXPECT_EQ(result_scalar->is_valid(), true); + + auto const result_list_scalar = dynamic_cast(result_scalar.get()); + EXPECT_NE(result_list_scalar, nullptr); + + auto const histogram = result_list_scalar->view(); + EXPECT_EQ(histogram.num_children(), 2); + EXPECT_EQ(histogram.null_count(), 0); + EXPECT_EQ(histogram.child(1).null_count(), 0); + + // Sort the histogram based on the first column (unique input values). + auto const sort_order = cudf::sorted_order(cudf::table_view{{histogram.child(0)}}, {}, {}); + return std::move(cudf::gather(cudf::table_view{{histogram}}, *sort_order)->release().front()); +} + +template +struct ReductionHistogramTest : public cudf::test::BaseFixture {}; + +// Avoid unsigned types, as the tests below have negative values in their input. +using HistogramTestTypes = cudf::test::Concat, + cudf::test::FloatingPointTypes, + cudf::test::FixedPointTypes, + cudf::test::ChronoTypes>; +TYPED_TEST_SUITE(ReductionHistogramTest, HistogramTestTypes); + +TYPED_TEST(ReductionHistogramTest, Histogram) +{ + using data_col = cudf::test::fixed_width_column_wrapper; + using int64_col = cudf::test::fixed_width_column_wrapper; + using structs_col = cudf::test::structs_column_wrapper; + + auto const agg = cudf::make_histogram_aggregation(); + + // Empty input. + { + auto const input = data_col{}; + auto const expected = [] { + auto child1 = data_col{}; + auto child2 = int64_col{}; + return structs_col{{child1, child2}}; + }(); + auto const result = histogram_reduction(input, agg); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *result); + } + + { + auto const input = data_col{-3, 2, 1, 2, 0, 5, 2, -3, -2, 2, 1}; + auto const expected = [] { + auto child1 = data_col{-3, -2, 0, 1, 2, 5}; + auto child2 = int64_col{2, 1, 1, 2, 4, 1}; + return structs_col{{child1, child2}}; + }(); + auto const result = histogram_reduction(input, agg); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *result); + } + + // Test without nulls, sliced input. + { + auto const input_original = data_col{-3, 2, 1, 2, 0, 5, 2, -3, -2, 2, 1}; + auto const input = cudf::slice(input_original, {0, 7})[0]; + auto const expected = [] { + auto child1 = data_col{-3, 0, 1, 2, 5}; + auto child2 = int64_col{1, 1, 1, 3, 1}; + return structs_col{{child1, child2}}; + }(); + auto const result = histogram_reduction(input, agg); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *result); + } + + // Test with nulls. + using namespace cudf::test::iterators; + auto constexpr null{0}; + { + auto const input = data_col{{null, -3, 2, 1, 2, 0, null, 5, 2, null, -3, -2, null, 2, 1}, + nulls_at({0, 6, 9, 12})}; + auto const expected = [] { + auto child1 = data_col{{null, -3, -2, 0, 1, 2, 5}, null_at(0)}; + auto child2 = int64_col{4, 2, 1, 1, 2, 4, 1}; + return structs_col{{child1, child2}}; + }(); + auto const result = histogram_reduction(input, agg); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *result); + } + + // Test with nulls, sliced input. + { + auto const input_original = data_col{ + {null, -3, 2, 1, 2, 0, null, 5, 2, null, -3, -2, null, 2, 1}, nulls_at({0, 6, 9, 12})}; + auto const input = cudf::slice(input_original, {0, 9})[0]; + auto const expected = [] { + auto child1 = data_col{{null, -3, 0, 1, 2, 5}, null_at(0)}; + auto child2 = int64_col{2, 1, 1, 1, 3, 1}; + return structs_col{{child1, child2}}; + }(); + auto const result = histogram_reduction(input, agg); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *result); + } +} + +TYPED_TEST(ReductionHistogramTest, MergeHistogram) +{ + using data_col = cudf::test::fixed_width_column_wrapper; + using int64_col = cudf::test::fixed_width_column_wrapper; + using structs_col = cudf::test::structs_column_wrapper; + + auto const agg = cudf::make_merge_histogram_aggregation(); + + // Empty input. + { + auto const input = [] { + auto child1 = data_col{}; + auto child2 = int64_col{}; + return structs_col{{child1, child2}}; + }(); + auto const expected = [] { + auto child1 = data_col{}; + auto child2 = int64_col{}; + return structs_col{{child1, child2}}; + }(); + auto const result = histogram_reduction(input, agg); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *result); + } + + // Test without nulls. + { + auto const input = [] { + auto child1 = data_col{-3, 2, 1, 2, 0, 5, 2, -3, -2, 2, 1}; + auto child2 = int64_col{2, 1, 1, 2, 4, 1, 2, 3, 5, 3, 4}; + return structs_col{{child1, child2}}; + }(); + + auto const expected = [] { + auto child1 = data_col{-3, -2, 0, 1, 2, 5}; + auto child2 = int64_col{5, 5, 4, 5, 8, 1}; + return structs_col{{child1, child2}}; + }(); + auto const result = histogram_reduction(input, agg); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *result); + } + + // Test without nulls, sliced input. + { + auto const input_original = [] { + auto child1 = data_col{-3, 2, 1, 2, 0, 5, 2, -3, -2, 2, 1}; + auto child2 = int64_col{2, 1, 1, 2, 4, 1, 2, 3, 5, 3, 4}; + return structs_col{{child1, child2}}; + }(); + auto const input = cudf::slice(input_original, {0, 7})[0]; + + auto const expected = [] { + auto child1 = data_col{-3, 0, 1, 2, 5}; + auto child2 = int64_col{2, 4, 1, 5, 1}; + return structs_col{{child1, child2}}; + }(); + auto const result = histogram_reduction(input, agg); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *result); + } + + // Test with nulls. + using namespace cudf::test::iterators; + auto constexpr null{0}; + { + auto const input = [] { + auto child1 = data_col{{-3, 2, null, 1, 2, null, 0, 5, null, 2, -3, null, -2, 2, 1, null}, + nulls_at({2, 5, 8, 11, 15})}; + auto child2 = int64_col{2, 1, 12, 1, 2, 11, 4, 1, 10, 2, 3, 15, 5, 3, 4, 19}; + return structs_col{{child1, child2}}; + }(); + + auto const expected = [] { + auto child1 = data_col{{null, -3, -2, 0, 1, 2, 5}, null_at(0)}; + auto child2 = int64_col{67, 5, 5, 4, 5, 8, 1}; + return structs_col{{child1, child2}}; + }(); + auto const result = histogram_reduction(input, agg); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *result); + } + + // Test with nulls, sliced input. + { + auto const input_original = [] { + auto child1 = data_col{{-3, 2, null, 1, 2, null, 0, 5, null, 2, -3, null, -2, 2, 1, null}, + nulls_at({2, 5, 8, 11, 15})}; + auto child2 = int64_col{2, 1, 12, 1, 2, 11, 4, 1, 10, 2, 3, 15, 5, 3, 4, 19}; + return structs_col{{child1, child2}}; + }(); + auto const input = cudf::slice(input_original, {0, 9})[0]; + + auto const expected = [] { + auto child1 = data_col{{null, -3, 0, 1, 2, 5}, null_at(0)}; + auto child2 = int64_col{33, 2, 4, 1, 3, 1}; + return structs_col{{child1, child2}}; + }(); + auto const result = histogram_reduction(input, agg); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *result); + } +} + template struct ReductionAnyAllTest : public ReductionTest {}; using AnyAllTypes = cudf::test::Types; From a97020f9c7e4e2be86788b5f7d83608839d3207b Mon Sep 17 00:00:00 2001 From: Robert Maynard Date: Wed, 27 Sep 2023 13:33:48 -0400 Subject: [PATCH 116/150] Correct numerous 20054-D: dynamic initialization errors found on arm+12.2 (#14108) Compile issues found by compiling libcudf with the `rapidsai/devcontainers:23.10-cpp-gcc9-cuda12.2-ubuntu20.04` docker container. Authors: - Robert Maynard (https://github.com/robertmaynard) Approvers: - Mark Harris (https://github.com/harrism) - David Wendt (https://github.com/davidwendt) - Bradley Dice (https://github.com/bdice) - Vyas Ramasubramani (https://github.com/vyasr) - Mike Wilson (https://github.com/hyperbolic2346) URL: https://github.com/rapidsai/cudf/pull/14108 --- cpp/src/io/avro/avro_common.hpp | 3 +- cpp/src/io/comp/unsnap.cu | 18 +++--- cpp/src/io/orc/orc_gpu.hpp | 39 +++++------- cpp/src/io/orc/stats_enc.cu | 10 +-- cpp/src/io/orc/stripe_init.cu | 29 ++++----- cpp/src/io/parquet/page_decode.cuh | 67 +++++++++++---------- cpp/src/io/parquet/page_hdr.cu | 12 ++-- cpp/src/io/parquet/parquet_gpu.hpp | 56 ++++++++--------- cpp/src/io/statistics/column_statistics.cuh | 12 ++-- cpp/src/io/statistics/statistics.cuh | 30 ++++----- 10 files changed, 138 insertions(+), 138 deletions(-) diff --git a/cpp/src/io/avro/avro_common.hpp b/cpp/src/io/avro/avro_common.hpp index ff8ee206dd4..0058d236d8c 100644 --- a/cpp/src/io/avro/avro_common.hpp +++ b/cpp/src/io/avro/avro_common.hpp @@ -25,7 +25,8 @@ namespace cudf { namespace io { namespace avro { struct block_desc_s { - block_desc_s() {} + block_desc_s() = default; // required to compile on ctk-12.2 + aarch64 + explicit constexpr block_desc_s( size_t offset_, uint32_t size_, uint32_t row_offset_, uint32_t first_row_, uint32_t num_rows_) : offset(offset_), diff --git a/cpp/src/io/comp/unsnap.cu b/cpp/src/io/comp/unsnap.cu index c699502317f..504a2fe377c 100644 --- a/cpp/src/io/comp/unsnap.cu +++ b/cpp/src/io/comp/unsnap.cu @@ -52,6 +52,8 @@ struct unsnap_batch_s { * @brief Queue structure used to exchange data between warps */ struct unsnap_queue_s { + unsnap_queue_s() = default; // required to compile on ctk-12.2 + aarch64 + uint32_t prefetch_wrpos; ///< Prefetcher write position uint32_t prefetch_rdpos; ///< Prefetch consumer read position int32_t prefetch_end; ///< Prefetch enable flag (nonzero stops prefetcher) @@ -64,13 +66,15 @@ struct unsnap_queue_s { * @brief snappy decompression state */ struct unsnap_state_s { - uint8_t const* base; ///< base ptr of compressed stream - uint8_t const* end; ///< end of compressed stream - uint32_t uncompressed_size; ///< uncompressed stream size - uint32_t bytes_left; ///< remaining bytes to decompress - int32_t error; ///< current error status - uint32_t tstart; ///< start time for perf logging - volatile unsnap_queue_s q; ///< queue for cross-warp communication + constexpr unsnap_state_s() noexcept {} // required to compile on ctk-12.2 + aarch64 + + uint8_t const* base{}; ///< base ptr of compressed stream + uint8_t const* end{}; ///< end of compressed stream + uint32_t uncompressed_size{}; ///< uncompressed stream size + uint32_t bytes_left{}; ///< remaining bytes to decompress + int32_t error{}; ///< current error status + uint32_t tstart{}; ///< start time for perf logging + volatile unsnap_queue_s q{}; ///< queue for cross-warp communication device_span src; ///< input for current block device_span dst; ///< output for current block }; diff --git a/cpp/src/io/orc/orc_gpu.hpp b/cpp/src/io/orc/orc_gpu.hpp index 9b8df50a22a..dba7a9ffda5 100644 --- a/cpp/src/io/orc/orc_gpu.hpp +++ b/cpp/src/io/orc/orc_gpu.hpp @@ -59,31 +59,24 @@ struct CompressedStreamInfo { explicit constexpr CompressedStreamInfo(uint8_t const* compressed_data_, size_t compressed_size_) : compressed_data(compressed_data_), uncompressed_data(nullptr), - compressed_data_size(compressed_size_), - dec_in_ctl(nullptr), - dec_out_ctl(nullptr), - copy_in_ctl(nullptr), - copy_out_ctl(nullptr), - num_compressed_blocks(0), - num_uncompressed_blocks(0), - max_uncompressed_size(0), - max_uncompressed_block_size(0) + compressed_data_size(compressed_size_) { } - uint8_t const* compressed_data; // [in] base ptr to compressed stream data - uint8_t* uncompressed_data; // [in] base ptr to uncompressed stream data or NULL if not known yet - size_t compressed_data_size; // [in] compressed data size for this stream - device_span* dec_in_ctl; // [in] input buffer to decompress - device_span* dec_out_ctl; // [in] output buffer to decompress into - device_span dec_res; // [in] results of decompression - device_span* copy_in_ctl; // [out] input buffer to copy - device_span* copy_out_ctl; // [out] output buffer to copy to - uint32_t num_compressed_blocks; // [in,out] number of entries in decctl(in), number of compressed - // blocks(out) - uint32_t num_uncompressed_blocks; // [in,out] number of entries in dec_in_ctl(in), number of - // uncompressed blocks(out) - uint64_t max_uncompressed_size; // [out] maximum uncompressed data size of stream - uint32_t max_uncompressed_block_size; // [out] maximum uncompressed size of any block in stream + uint8_t const* compressed_data{}; // [in] base ptr to compressed stream data + uint8_t* + uncompressed_data{}; // [in] base ptr to uncompressed stream data or NULL if not known yet + size_t compressed_data_size{}; // [in] compressed data size for this stream + device_span* dec_in_ctl{}; // [in] input buffer to decompress + device_span* dec_out_ctl{}; // [in] output buffer to decompress into + device_span dec_res{}; // [in] results of decompression + device_span* copy_in_ctl{}; // [out] input buffer to copy + device_span* copy_out_ctl{}; // [out] output buffer to copy to + uint32_t num_compressed_blocks{}; // [in,out] number of entries in decctl(in), number of + // compressed blocks(out) + uint32_t num_uncompressed_blocks{}; // [in,out] number of entries in dec_in_ctl(in), number of + // uncompressed blocks(out) + uint64_t max_uncompressed_size{}; // [out] maximum uncompressed data size of stream + uint32_t max_uncompressed_block_size{}; // [out] maximum uncompressed size of any block in stream }; enum StreamIndexType { diff --git a/cpp/src/io/orc/stats_enc.cu b/cpp/src/io/orc/stats_enc.cu index 69d7ec95acd..95f1db5bfd1 100644 --- a/cpp/src/io/orc/stats_enc.cu +++ b/cpp/src/io/orc/stats_enc.cu @@ -134,11 +134,11 @@ __global__ void __launch_bounds__(block_size, 1) } struct stats_state_s { - uint8_t* base; ///< Output buffer start - uint8_t* end; ///< Output buffer end - statistics_chunk chunk; - statistics_merge_group group; - statistics_dtype stats_dtype; //!< Statistics data type for this column + uint8_t* base{}; ///< Output buffer start + uint8_t* end{}; ///< Output buffer end + statistics_chunk chunk{}; + statistics_merge_group group{}; + statistics_dtype stats_dtype{}; //!< Statistics data type for this column }; /* diff --git a/cpp/src/io/orc/stripe_init.cu b/cpp/src/io/orc/stripe_init.cu index d8a60350356..8eeca504121 100644 --- a/cpp/src/io/orc/stripe_init.cu +++ b/cpp/src/io/orc/stripe_init.cu @@ -30,14 +30,14 @@ namespace orc { namespace gpu { struct comp_in_out { - uint8_t const* in_ptr; - size_t in_size; - uint8_t* out_ptr; - size_t out_size; + uint8_t const* in_ptr{}; + size_t in_size{}; + uint8_t* out_ptr{}; + size_t out_size{}; }; struct compressed_stream_s { - CompressedStreamInfo info; - comp_in_out ctl; + CompressedStreamInfo info{}; + comp_in_out ctl{}; }; // blockDim {128,1,1} @@ -208,14 +208,15 @@ __global__ void __launch_bounds__(128, 8) * @brief Shared mem state for gpuParseRowGroupIndex */ struct rowindex_state_s { - ColumnDesc chunk; - uint32_t rowgroup_start; - uint32_t rowgroup_end; - int is_compressed; - uint32_t row_index_entry[3][CI_PRESENT]; // NOTE: Assumes CI_PRESENT follows CI_DATA and CI_DATA2 - CompressedStreamInfo strm_info[2]; - RowGroup rowgroups[128]; - uint32_t compressed_offset[128][2]; + ColumnDesc chunk{}; + uint32_t rowgroup_start{}; + uint32_t rowgroup_end{}; + int is_compressed{}; + uint32_t row_index_entry[3] + [CI_PRESENT]{}; // NOTE: Assumes CI_PRESENT follows CI_DATA and CI_DATA2 + CompressedStreamInfo strm_info[2]{}; + RowGroup rowgroups[128]{}; + uint32_t compressed_offset[128][2]{}; }; enum row_entry_state_e { diff --git a/cpp/src/io/parquet/page_decode.cuh b/cpp/src/io/parquet/page_decode.cuh index 26e3c951b2e..5e66885d746 100644 --- a/cpp/src/io/parquet/page_decode.cuh +++ b/cpp/src/io/parquet/page_decode.cuh @@ -26,48 +26,49 @@ namespace cudf::io::parquet::gpu { struct page_state_s { - uint8_t const* data_start; - uint8_t const* data_end; - uint8_t const* lvl_end; - uint8_t const* dict_base; // ptr to dictionary page data - int32_t dict_size; // size of dictionary data - int32_t first_row; // First row in page to output - int32_t num_rows; // Rows in page to decode (including rows to be skipped) - int32_t first_output_value; // First value in page to output - int32_t num_input_values; // total # of input/level values in the page - int32_t dtype_len; // Output data type length - int32_t dtype_len_in; // Can be larger than dtype_len if truncating 32-bit into 8-bit - int32_t dict_bits; // # of bits to store dictionary indices - uint32_t dict_run; - int32_t dict_val; - uint32_t initial_rle_run[NUM_LEVEL_TYPES]; // [def,rep] - int32_t initial_rle_value[NUM_LEVEL_TYPES]; // [def,rep] - int32_t error; - PageInfo page; - ColumnChunkDesc col; + constexpr page_state_s() noexcept {} + uint8_t const* data_start{}; + uint8_t const* data_end{}; + uint8_t const* lvl_end{}; + uint8_t const* dict_base{}; // ptr to dictionary page data + int32_t dict_size{}; // size of dictionary data + int32_t first_row{}; // First row in page to output + int32_t num_rows{}; // Rows in page to decode (including rows to be skipped) + int32_t first_output_value{}; // First value in page to output + int32_t num_input_values{}; // total # of input/level values in the page + int32_t dtype_len{}; // Output data type length + int32_t dtype_len_in{}; // Can be larger than dtype_len if truncating 32-bit into 8-bit + int32_t dict_bits{}; // # of bits to store dictionary indices + uint32_t dict_run{}; + int32_t dict_val{}; + uint32_t initial_rle_run[NUM_LEVEL_TYPES]{}; // [def,rep] + int32_t initial_rle_value[NUM_LEVEL_TYPES]{}; // [def,rep] + int32_t error{}; + PageInfo page{}; + ColumnChunkDesc col{}; // (leaf) value decoding - int32_t nz_count; // number of valid entries in nz_idx (write position in circular buffer) - int32_t dict_pos; // write position of dictionary indices - int32_t src_pos; // input read position of final output value - int32_t ts_scale; // timestamp scale: <0: divide by -ts_scale, >0: multiply by ts_scale + int32_t nz_count{}; // number of valid entries in nz_idx (write position in circular buffer) + int32_t dict_pos{}; // write position of dictionary indices + int32_t src_pos{}; // input read position of final output value + int32_t ts_scale{}; // timestamp scale: <0: divide by -ts_scale, >0: multiply by ts_scale // repetition/definition level decoding - int32_t input_value_count; // how many values of the input we've processed - int32_t input_row_count; // how many rows of the input we've processed - int32_t input_leaf_count; // how many leaf values of the input we've processed - uint8_t const* lvl_start[NUM_LEVEL_TYPES]; // [def,rep] - uint8_t const* abs_lvl_start[NUM_LEVEL_TYPES]; // [def,rep] - uint8_t const* abs_lvl_end[NUM_LEVEL_TYPES]; // [def,rep] - int32_t lvl_count[NUM_LEVEL_TYPES]; // how many of each of the streams we've decoded - int32_t row_index_lower_bound; // lower bound of row indices we should process + int32_t input_value_count{}; // how many values of the input we've processed + int32_t input_row_count{}; // how many rows of the input we've processed + int32_t input_leaf_count{}; // how many leaf values of the input we've processed + uint8_t const* lvl_start[NUM_LEVEL_TYPES]{}; // [def,rep] + uint8_t const* abs_lvl_start[NUM_LEVEL_TYPES]{}; // [def,rep] + uint8_t const* abs_lvl_end[NUM_LEVEL_TYPES]{}; // [def,rep] + int32_t lvl_count[NUM_LEVEL_TYPES]{}; // how many of each of the streams we've decoded + int32_t row_index_lower_bound{}; // lower bound of row indices we should process // a shared-memory cache of frequently used data when decoding. The source of this data is // normally stored in global memory which can yield poor performance. So, when possible // we copy that info here prior to decoding - PageNestingDecodeInfo nesting_decode_cache[max_cacheable_nesting_decode_info]; + PageNestingDecodeInfo nesting_decode_cache[max_cacheable_nesting_decode_info]{}; // points to either nesting_decode_cache above when possible, or to the global source otherwise - PageNestingDecodeInfo* nesting_info; + PageNestingDecodeInfo* nesting_info{}; }; // buffers only used in the decode kernel. separated from page_state_s to keep diff --git a/cpp/src/io/parquet/page_hdr.cu b/cpp/src/io/parquet/page_hdr.cu index 0d611643b46..6f8b2f50443 100644 --- a/cpp/src/io/parquet/page_hdr.cu +++ b/cpp/src/io/parquet/page_hdr.cu @@ -45,13 +45,13 @@ static const __device__ __constant__ uint8_t g_list2struct[16] = {0, ST_FLD_LIST}; struct byte_stream_s { - uint8_t const* cur; - uint8_t const* end; - uint8_t const* base; + uint8_t const* cur{}; + uint8_t const* end{}; + uint8_t const* base{}; // Parsed symbols - PageType page_type; - PageInfo page; - ColumnChunkDesc ck; + PageType page_type{}; + PageInfo page{}; + ColumnChunkDesc ck{}; }; /** diff --git a/cpp/src/io/parquet/parquet_gpu.hpp b/cpp/src/io/parquet/parquet_gpu.hpp index a3cc37dee4f..a760c2448dc 100644 --- a/cpp/src/io/parquet/parquet_gpu.hpp +++ b/cpp/src/io/parquet/parquet_gpu.hpp @@ -228,7 +228,7 @@ struct PageInfo { * @brief Struct describing a particular chunk of column data */ struct ColumnChunkDesc { - ColumnChunkDesc() = default; + constexpr ColumnChunkDesc() noexcept {}; explicit ColumnChunkDesc(size_t compressed_size_, uint8_t* compressed_data_, size_t num_values_, @@ -275,34 +275,34 @@ struct ColumnChunkDesc { { } - uint8_t const* compressed_data; // pointer to compressed column chunk data - size_t compressed_size; // total compressed data size for this chunk - size_t num_values; // total number of values in this column - size_t start_row; // starting row of this chunk - uint32_t num_rows; // number of rows in this chunk - int16_t max_level[level_type::NUM_LEVEL_TYPES]; // max definition/repetition level - int16_t max_nesting_depth; // max nesting depth of the output - uint16_t data_type; // basic column data type, ((type_length << 3) | - // parquet::Type) + uint8_t const* compressed_data{}; // pointer to compressed column chunk data + size_t compressed_size{}; // total compressed data size for this chunk + size_t num_values{}; // total number of values in this column + size_t start_row{}; // starting row of this chunk + uint32_t num_rows{}; // number of rows in this chunk + int16_t max_level[level_type::NUM_LEVEL_TYPES]{}; // max definition/repetition level + int16_t max_nesting_depth{}; // max nesting depth of the output + uint16_t data_type{}; // basic column data type, ((type_length << 3) | + // parquet::Type) uint8_t - level_bits[level_type::NUM_LEVEL_TYPES]; // bits to encode max definition/repetition levels - int32_t num_data_pages; // number of data pages - int32_t num_dict_pages; // number of dictionary pages - int32_t max_num_pages; // size of page_info array - PageInfo* page_info; // output page info for up to num_dict_pages + - // num_data_pages (dictionary pages first) - string_index_pair* str_dict_index; // index for string dictionary - bitmask_type** valid_map_base; // base pointers of valid bit map for this column - void** column_data_base; // base pointers of column data - void** column_string_base; // base pointers of column string data - int8_t codec; // compressed codec enum - int8_t converted_type; // converted type enum - LogicalType logical_type; // logical type - int8_t decimal_precision; // Decimal precision - int32_t ts_clock_rate; // output timestamp clock frequency (0=default, 1000=ms, 1000000000=ns) - - int32_t src_col_index; // my input column index - int32_t src_col_schema; // my schema index in the file + level_bits[level_type::NUM_LEVEL_TYPES]{}; // bits to encode max definition/repetition levels + int32_t num_data_pages{}; // number of data pages + int32_t num_dict_pages{}; // number of dictionary pages + int32_t max_num_pages{}; // size of page_info array + PageInfo* page_info{}; // output page info for up to num_dict_pages + + // num_data_pages (dictionary pages first) + string_index_pair* str_dict_index{}; // index for string dictionary + bitmask_type** valid_map_base{}; // base pointers of valid bit map for this column + void** column_data_base{}; // base pointers of column data + void** column_string_base{}; // base pointers of column string data + int8_t codec{}; // compressed codec enum + int8_t converted_type{}; // converted type enum + LogicalType logical_type{}; // logical type + int8_t decimal_precision{}; // Decimal precision + int32_t ts_clock_rate{}; // output timestamp clock frequency (0=default, 1000=ms, 1000000000=ns) + + int32_t src_col_index{}; // my input column index + int32_t src_col_schema{}; // my schema index in the file }; /** diff --git a/cpp/src/io/statistics/column_statistics.cuh b/cpp/src/io/statistics/column_statistics.cuh index 28e77f62a43..f71fb95949f 100644 --- a/cpp/src/io/statistics/column_statistics.cuh +++ b/cpp/src/io/statistics/column_statistics.cuh @@ -34,18 +34,18 @@ namespace io { * @brief shared state for statistics calculation kernel */ struct stats_state_s { - stats_column_desc col; ///< Column information - statistics_group group; ///< Group description - statistics_chunk ck; ///< Output statistics chunk + stats_column_desc col{}; ///< Column information + statistics_group group{}; ///< Group description + statistics_chunk ck{}; ///< Output statistics chunk }; /** * @brief shared state for statistics merge kernel */ struct merge_state_s { - stats_column_desc col; ///< Column information - statistics_merge_group group; ///< Group description - statistics_chunk ck; ///< Resulting statistics chunk + stats_column_desc col{}; ///< Column information + statistics_merge_group group{}; ///< Group description + statistics_chunk ck{}; ///< Resulting statistics chunk }; template diff --git a/cpp/src/io/statistics/statistics.cuh b/cpp/src/io/statistics/statistics.cuh index 805ca43553e..b6e698fee11 100644 --- a/cpp/src/io/statistics/statistics.cuh +++ b/cpp/src/io/statistics/statistics.cuh @@ -98,27 +98,27 @@ union statistics_val { }; struct statistics_chunk { - uint32_t non_nulls; //!< number of non-null values in chunk - uint32_t null_count; //!< number of null values in chunk - statistics_val min_value; //!< minimum value in chunk - statistics_val max_value; //!< maximum value in chunk - statistics_val sum; //!< sum of chunk - uint8_t has_minmax; //!< Nonzero if min_value and max_values are valid - uint8_t has_sum; //!< Nonzero if sum is valid + uint32_t non_nulls{}; //!< number of non-null values in chunk + uint32_t null_count{}; //!< number of null values in chunk + statistics_val min_value{}; //!< minimum value in chunk + statistics_val max_value{}; //!< maximum value in chunk + statistics_val sum{}; //!< sum of chunk + uint8_t has_minmax{}; //!< Nonzero if min_value and max_values are valid + uint8_t has_sum{}; //!< Nonzero if sum is valid }; struct statistics_group { - stats_column_desc const* col; //!< Column information - uint32_t start_row; //!< Start row of this group - uint32_t num_rows; //!< Number of rows in group - uint32_t non_leaf_nulls; //!< Number of null non-leaf values in the group + stats_column_desc const* col{}; //!< Column information + uint32_t start_row{}; //!< Start row of this group + uint32_t num_rows{}; //!< Number of rows in group + uint32_t non_leaf_nulls{}; //!< Number of null non-leaf values in the group }; struct statistics_merge_group { - data_type col_dtype; //!< Column data type - statistics_dtype stats_dtype; //!< Statistics data type for this column - uint32_t start_chunk; //!< Start chunk of this group - uint32_t num_chunks; //!< Number of chunks in group + data_type col_dtype; //!< Column data type + statistics_dtype stats_dtype{dtype_none}; //!< Statistics data type for this column + uint32_t start_chunk{}; //!< Start chunk of this group + uint32_t num_chunks{}; //!< Number of chunks in group }; template >* = nullptr> From bff0fcd721320210c53d3533e63fb34eac883f4e Mon Sep 17 00:00:00 2001 From: Raza Jafri Date: Wed, 27 Sep 2023 11:25:25 -0700 Subject: [PATCH 117/150] [Java] Add JNI bindings for `integers_to_hex` (#14205) This PR adds a method to ColumnView class to allow for conversion from Integers to hex closes #14081 Authors: - Raza Jafri (https://github.com/razajafri) Approvers: - Kuhu Shukla (https://github.com/kuhushukla) - Robert (Bobby) Evans (https://github.com/revans2) URL: https://github.com/rapidsai/cudf/pull/14205 --- .../main/java/ai/rapids/cudf/ColumnView.java | 27 +++++++++++++++++++ java/src/main/java/ai/rapids/cudf/DType.java | 19 +++++++++++++ java/src/main/native/src/ColumnViewJni.cpp | 9 +++++++ .../java/ai/rapids/cudf/ColumnVectorTest.java | 10 +++++++ 4 files changed, 65 insertions(+) diff --git a/java/src/main/java/ai/rapids/cudf/ColumnView.java b/java/src/main/java/ai/rapids/cudf/ColumnView.java index 3f3a55f0970..0b66701629b 100644 --- a/java/src/main/java/ai/rapids/cudf/ColumnView.java +++ b/java/src/main/java/ai/rapids/cudf/ColumnView.java @@ -4089,6 +4089,8 @@ static DeviceMemoryBufferView getOffsetsBuffer(long viewHandle) { private static native long isFixedPoint(long viewHandle, int nativeTypeId, int scale); + private static native long toHex(long viewHandle); + /** * Native method to concatenate a list column of strings (each row is a list of strings), * concatenates the strings within each row and returns a single strings column result. @@ -5231,4 +5233,29 @@ static ColumnView[] getColumnViewsFromPointers(long[] nativeHandles) { } } } + + /** + * Convert this integer column to hexadecimal column and return a new strings column + * + * Any null entries will result in corresponding null entries in the output column. + * + * The output character set is '0'-'9' and 'A'-'F'. The output string width will + * be a multiple of 2 depending on the size of the integer type. A single leading + * zero is applied to the first non-zero output byte if it is less than 0x10. + * + * Example: + * input = [123, -1, 0, 27, 342718233] + * s = input.toHex() + * s is [ '04D2', 'FFFFFFFF', '00', '1B', '146D7719'] + * + * The example above shows an `INT32` type column where each integer is 4 bytes. + * Leading zeros are suppressed unless filling out a complete byte as in + * `123 -> '04D2'` instead of `000004D2` or `4D2`. + * + * @return new string ColumnVector + */ + public ColumnVector toHex() { + assert getType().isIntegral() : "Only integers are supported"; + return new ColumnVector(toHex(this.getNativeView())); + } } diff --git a/java/src/main/java/ai/rapids/cudf/DType.java b/java/src/main/java/ai/rapids/cudf/DType.java index d0bb7761da4..07bc4fe3bbf 100644 --- a/java/src/main/java/ai/rapids/cudf/DType.java +++ b/java/src/main/java/ai/rapids/cudf/DType.java @@ -413,6 +413,14 @@ public boolean isDurationType() { } /** + * Returns true for strictly Integer types not a type backed by + * ints + */ + public boolean isIntegral() { + return INTEGRALS.contains(this.typeId); + } + + /** * Returns true for nested types */ public boolean isNestedType() { @@ -506,4 +514,15 @@ public boolean hasOffsets() { DTypeEnum.STRING, DTypeEnum.LIST ); + + private static final EnumSet INTEGRALS = EnumSet.of( + DTypeEnum.INT8, + DTypeEnum.INT16, + DTypeEnum.INT32, + DTypeEnum.INT64, + DTypeEnum.UINT8, + DTypeEnum.UINT16, + DTypeEnum.UINT32, + DTypeEnum.UINT64 + ); } diff --git a/java/src/main/native/src/ColumnViewJni.cpp b/java/src/main/native/src/ColumnViewJni.cpp index d5aad03645f..0ddaa2c15b5 100644 --- a/java/src/main/native/src/ColumnViewJni.cpp +++ b/java/src/main/native/src/ColumnViewJni.cpp @@ -2563,4 +2563,13 @@ Java_ai_rapids_cudf_ColumnView_purgeNonEmptyNulls(JNIEnv *env, jclass, jlong col CATCH_STD(env, 0); } +JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_toHex(JNIEnv *env, jclass, jlong input_ptr) { + JNI_NULL_CHECK(env, input_ptr, "input is null", 0); + try { + cudf::jni::auto_set_device(env); + const cudf::column_view *input = reinterpret_cast(input_ptr); + return release_as_jlong(cudf::strings::integers_to_hex(*input)); + } + CATCH_STD(env, 0); +} } // extern "C" diff --git a/java/src/test/java/ai/rapids/cudf/ColumnVectorTest.java b/java/src/test/java/ai/rapids/cudf/ColumnVectorTest.java index f6dffc88b92..9a0f8bda994 100644 --- a/java/src/test/java/ai/rapids/cudf/ColumnVectorTest.java +++ b/java/src/test/java/ai/rapids/cudf/ColumnVectorTest.java @@ -6876,4 +6876,14 @@ public void testUseAfterFree() { vector.close(); assertThrows(NullPointerException.class, vector::getDeviceMemorySize); } + + @Test + public void testConvertIntegerToHex() { + try ( + ColumnVector input = ColumnVector.fromInts(14, 2621, 50); + ColumnVector expected = ColumnVector.fromStrings("0E", "0A3D", "32"); + ColumnVector actual = input.toHex()) { + assertColumnsAreEqual(expected, actual); + } + } } From 66ac962dbeb69eade22b3bcaf186e3df2bae71b5 Mon Sep 17 00:00:00 2001 From: Nghia Truong <7416935+ttnghia@users.noreply.github.com> Date: Wed, 27 Sep 2023 12:20:20 -0700 Subject: [PATCH 118/150] JNI for `HISTOGRAM` and `MERGE_HISTOGRAM` aggregations (#14154) This implements JNI for `HISTOGRAM` and `MERGE_HISTOGRAM` aggregations in both groupby and reduction. Depends on: * https://github.com/rapidsai/cudf/pull/14045 Contributes to: * https://github.com/rapidsai/cudf/issues/13885. Authors: - Nghia Truong (https://github.com/ttnghia) Approvers: - Jason Lowe (https://github.com/jlowe) URL: https://github.com/rapidsai/cudf/pull/14154 --- .../main/java/ai/rapids/cudf/Aggregation.java | 26 ++++- .../ai/rapids/cudf/GroupByAggregation.java | 24 +++- .../ai/rapids/cudf/ReductionAggregation.java | 20 +++- java/src/main/native/src/AggregationJni.cpp | 7 +- .../test/java/ai/rapids/cudf/TableTest.java | 109 ++++++++++++++++++ 5 files changed, 181 insertions(+), 5 deletions(-) diff --git a/java/src/main/java/ai/rapids/cudf/Aggregation.java b/java/src/main/java/ai/rapids/cudf/Aggregation.java index d10329ca0f2..379750bb0b7 100644 --- a/java/src/main/java/ai/rapids/cudf/Aggregation.java +++ b/java/src/main/java/ai/rapids/cudf/Aggregation.java @@ -1,6 +1,6 @@ /* * - * Copyright (c) 2020-2022, NVIDIA CORPORATION. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -68,7 +68,9 @@ enum Kind { DENSE_RANK(29), PERCENT_RANK(30), TDIGEST(31), // This can take a delta argument for accuracy level - MERGE_TDIGEST(32); // This can take a delta argument for accuracy level + MERGE_TDIGEST(32), // This can take a delta argument for accuracy level + HISTOGRAM(33), + MERGE_HISTOGRAM(34); final int nativeId; @@ -918,6 +920,26 @@ static TDigestAggregation mergeTDigest(int delta) { return new TDigestAggregation(Kind.MERGE_TDIGEST, delta); } + static final class HistogramAggregation extends NoParamAggregation { + private HistogramAggregation() { + super(Kind.HISTOGRAM); + } + } + + static final class MergeHistogramAggregation extends NoParamAggregation { + private MergeHistogramAggregation() { + super(Kind.MERGE_HISTOGRAM); + } + } + + static HistogramAggregation histogram() { + return new HistogramAggregation(); + } + + static MergeHistogramAggregation mergeHistogram() { + return new MergeHistogramAggregation(); + } + /** * Create one of the aggregations that only needs a kind, no other parameters. This does not * work for all types and for code safety reasons each kind is added separately. diff --git a/java/src/main/java/ai/rapids/cudf/GroupByAggregation.java b/java/src/main/java/ai/rapids/cudf/GroupByAggregation.java index 500d18f7eae..0fae33927b6 100644 --- a/java/src/main/java/ai/rapids/cudf/GroupByAggregation.java +++ b/java/src/main/java/ai/rapids/cudf/GroupByAggregation.java @@ -1,6 +1,6 @@ /* * - * Copyright (c) 2021, NVIDIA CORPORATION. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -315,4 +315,26 @@ public static GroupByAggregation createTDigest(int delta) { public static GroupByAggregation mergeTDigest(int delta) { return new GroupByAggregation(Aggregation.mergeTDigest(delta)); } + + /** + * Histogram aggregation, computing the frequencies for each unique row. + * + * A histogram is given as a lists column, in which the first child stores unique rows from + * the input values and the second child stores their corresponding frequencies. + * + * @return A lists of structs column in which each list contains a histogram corresponding to + * an input key. + */ + public static GroupByAggregation histogram() { + return new GroupByAggregation(Aggregation.histogram()); + } + + /** + * MergeHistogram aggregation, to merge multiple histograms. + * + * @return A new histogram in which the frequencies of the unique rows are sum up. + */ + public static GroupByAggregation mergeHistogram() { + return new GroupByAggregation(Aggregation.mergeHistogram()); + } } diff --git a/java/src/main/java/ai/rapids/cudf/ReductionAggregation.java b/java/src/main/java/ai/rapids/cudf/ReductionAggregation.java index eab1c94fd2c..ba8ae379bae 100644 --- a/java/src/main/java/ai/rapids/cudf/ReductionAggregation.java +++ b/java/src/main/java/ai/rapids/cudf/ReductionAggregation.java @@ -1,6 +1,6 @@ /* * - * Copyright (c) 2021-2022, NVIDIA CORPORATION. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -286,4 +286,22 @@ public static ReductionAggregation mergeSets(NullEquality nullEquality, NaNEqual return new ReductionAggregation(Aggregation.mergeSets(nullEquality, nanEquality)); } + /** + * Create HistogramAggregation, computing the frequencies for each unique row. + * + * @return A structs column in which the first child stores unique rows from the input and the + * second child stores their corresponding frequencies. + */ + public static ReductionAggregation histogram() { + return new ReductionAggregation(Aggregation.histogram()); + } + + /** + * Create MergeHistogramAggregation, to merge multiple histograms. + * + * @return A new histogram in which the frequencies of the unique rows are sum up. + */ + public static ReductionAggregation mergeHistogram() { + return new ReductionAggregation(Aggregation.mergeHistogram()); + } } diff --git a/java/src/main/native/src/AggregationJni.cpp b/java/src/main/native/src/AggregationJni.cpp index 6ac73282615..bc62e95c36a 100644 --- a/java/src/main/native/src/AggregationJni.cpp +++ b/java/src/main/native/src/AggregationJni.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -90,6 +90,11 @@ JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Aggregation_createNoParamAgg(JNIEnv case 30: // ANSI SQL PERCENT_RANK return cudf::make_rank_aggregation(cudf::rank_method::MIN, {}, cudf::null_policy::INCLUDE, {}, cudf::rank_percentage::ONE_NORMALIZED); + case 33: // HISTOGRAM + return cudf::make_histogram_aggregation(); + case 34: // MERGE_HISTOGRAM + return cudf::make_merge_histogram_aggregation(); + default: throw std::logic_error("Unsupported No Parameter Aggregation Operation"); } }(); diff --git a/java/src/test/java/ai/rapids/cudf/TableTest.java b/java/src/test/java/ai/rapids/cudf/TableTest.java index 59f0d180c6e..faa73ac4322 100644 --- a/java/src/test/java/ai/rapids/cudf/TableTest.java +++ b/java/src/test/java/ai/rapids/cudf/TableTest.java @@ -4129,6 +4129,115 @@ void testMergeTDigestReduction() { } } + @Test + void testGroupbyHistogram() { + StructType histogramStruct = new StructType(false, + new BasicType(false, DType.INT32), // values + new BasicType(false, DType.INT64)); // frequencies + ListType histogramList = new ListType(false, histogramStruct); + + // key = 0: values = [2, 2, -3, -2, 2] + // key = 1: values = [2, 0, 5, 2, 1] + // key = 2: values = [-3, 1, 1, 2, 2] + try (Table input = new Table.TestBuilder() + .column(2, 0, 2, 1, 1, 1, 0, 0, 0, 1, 2, 2, 1, 0, 2) + .column(-3, 2, 1, 2, 0, 5, 2, -3, -2, 2, 1, 2, 1, 2, 2) + .build(); + Table result = input.groupBy(0) + .aggregate(GroupByAggregation.histogram().onColumn(1)); + Table sortedResult = result.orderBy(OrderByArg.asc(0)); + ColumnVector sortedOutHistograms = sortedResult.getColumn(1).listSortRows(false, false); + + ColumnVector expectedKeys = ColumnVector.fromInts(0, 1, 2); + ColumnVector expectedHistograms = ColumnVector.fromLists(histogramList, + Arrays.asList(new StructData(-3, 1L), new StructData(-2, 1L), new StructData(2, 3L)), + Arrays.asList(new StructData(0, 1L), new StructData(1, 1L), new StructData(2, 2L), + new StructData(5, 1L)), + Arrays.asList(new StructData(-3, 1L), new StructData(1, 2L), new StructData(2, 2L))) + ) { + assertColumnsAreEqual(expectedKeys, sortedResult.getColumn(0)); + assertColumnsAreEqual(expectedHistograms, sortedOutHistograms); + } + } + + @Test + void testGroupbyMergeHistogram() { + StructType histogramStruct = new StructType(false, + new BasicType(false, DType.INT32), // values + new BasicType(false, DType.INT64)); // frequencies + ListType histogramList = new ListType(false, histogramStruct); + + // key = 0: histograms = [[<-3, 1>, <-2, 1>, <2, 3>], [<0, 1>, <1, 1>], [<-3, 3>, <0, 1>, <1, 2>]] + // key = 1: histograms = [[<-2, 1>, <1, 3>, <2, 2>], [<0, 2>, <1, 1>, <2, 2>]] + try (Table input = new Table.TestBuilder() + .column(0, 1, 0, 1, 0) + .column(histogramStruct, + new StructData[]{new StructData(-3, 1L), new StructData(-2, 1L), new StructData(2, 3L)}, + new StructData[]{new StructData(-2, 1L), new StructData(1, 3L), new StructData(2, 2L)}, + new StructData[]{new StructData(0, 1L), new StructData(1, 1L)}, + new StructData[]{new StructData(0, 2L), new StructData(1, 1L), new StructData(2, 2L)}, + new StructData[]{new StructData(-3, 3L), new StructData(0, 1L), new StructData(1, 2L)}) + .build(); + Table result = input.groupBy(0) + .aggregate(GroupByAggregation.mergeHistogram().onColumn(1)); + Table sortedResult = result.orderBy(OrderByArg.asc(0)); + ColumnVector sortedOutHistograms = sortedResult.getColumn(1).listSortRows(false, false); + + ColumnVector expectedKeys = ColumnVector.fromInts(0, 1); + ColumnVector expectedHistograms = ColumnVector.fromLists(histogramList, + Arrays.asList(new StructData(-3, 4L), new StructData(-2, 1L), new StructData(0, 2L), + new StructData(1, 3L), new StructData(2, 3L)), + Arrays.asList(new StructData(-2, 1L), new StructData(0, 2L), new StructData(1, 4L), + new StructData(2, 4L))) + ) { + assertColumnsAreEqual(expectedKeys, sortedResult.getColumn(0)); + assertColumnsAreEqual(expectedHistograms, sortedOutHistograms); + } + } + + @Test + void testReductionHistogram() { + StructType histogramStruct = new StructType(false, + new BasicType(false, DType.INT32), // values + new BasicType(false, DType.INT64)); // frequencies + + try (ColumnVector input = ColumnVector.fromInts(-3, 2, 1, 2, 0, 5, 2, -3, -2, 2, 1); + Scalar result = input.reduce(ReductionAggregation.histogram(), DType.LIST); + ColumnVector resultCV = result.getListAsColumnView().copyToColumnVector(); + Table resultTable = new Table(resultCV); + Table sortedResult = resultTable.orderBy(OrderByArg.asc(0)); + + ColumnVector expectedHistograms = ColumnVector.fromStructs(histogramStruct, + new StructData(-3, 2L), new StructData(-2, 1L), new StructData(0, 1L), + new StructData(1, 2L), new StructData(2, 4L), new StructData(5, 1L)) + ) { + assertColumnsAreEqual(expectedHistograms, sortedResult.getColumn(0)); + } + } + + @Test + void testReductionMergeHistogram() { + StructType histogramStruct = new StructType(false, + new BasicType(false, DType.INT32), // values + new BasicType(false, DType.INT64)); // frequencies + + try (ColumnVector input = ColumnVector.fromStructs(histogramStruct, + new StructData(-3, 2L), new StructData(2, 1L), new StructData(1, 1L), + new StructData(2, 2L), new StructData(0, 4L), new StructData(5, 1L), + new StructData(2, 2L), new StructData(-3, 3L), new StructData(-2, 5L), + new StructData(2, 3L), new StructData(1, 4L)); + Scalar result = input.reduce(ReductionAggregation.mergeHistogram(), DType.LIST); + ColumnVector resultCV = result.getListAsColumnView().copyToColumnVector(); + Table resultTable = new Table(resultCV); + Table sortedResult = resultTable.orderBy(OrderByArg.asc(0)); + + ColumnVector expectedHistograms = ColumnVector.fromStructs(histogramStruct, + new StructData(-3, 5L), new StructData(-2, 5L), new StructData(0, 4L), + new StructData(1, 5L), new StructData(2, 8L), new StructData(5, 1L)) + ) { + assertColumnsAreEqual(expectedHistograms, sortedResult.getColumn(0)); + } + } @Test void testGroupByMinMaxDecimal() { try (Table t1 = new Table.TestBuilder() From b789d4ce3c090a3f25a8657d9a8582a1edb54f12 Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Wed, 27 Sep 2023 12:20:46 -0700 Subject: [PATCH 119/150] Preserve name of the column while initializing a `DataFrame` (#14110) Fixes: #14088 This PR preserves `names` of `column` object while constructing a `DataFrame` through various constructor flows. Authors: - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Bradley Dice (https://github.com/bdice) - Ashwin Srinath (https://github.com/shwina) URL: https://github.com/rapidsai/cudf/pull/14110 --- python/cudf/cudf/core/column_accessor.py | 2 -- python/cudf/cudf/core/dataframe.py | 26 ++++++++++++++++++--- python/cudf/cudf/core/indexed_frame.py | 4 +++- python/cudf/cudf/tests/test_dataframe.py | 29 ++++++++++++++++++++---- 4 files changed, 51 insertions(+), 10 deletions(-) diff --git a/python/cudf/cudf/core/column_accessor.py b/python/cudf/cudf/core/column_accessor.py index bec9c367ba9..cb79a30422e 100644 --- a/python/cudf/cudf/core/column_accessor.py +++ b/python/cudf/cudf/core/column_accessor.py @@ -197,8 +197,6 @@ def nlevels(self) -> int: @property def name(self) -> Any: - if len(self._data) == 0: - return None return self.level_names[-1] @property diff --git a/python/cudf/cudf/core/dataframe.py b/python/cudf/cudf/core/dataframe.py index 8a3dbe77787..ead2f182e2d 100644 --- a/python/cudf/cudf/core/dataframe.py +++ b/python/cudf/cudf/core/dataframe.py @@ -665,7 +665,10 @@ def __init__( len(self), dtype="object", masked=True ) for k in columns - } + }, + level_names=tuple(columns.names) + if isinstance(columns, pd.Index) + else None, ) elif isinstance(data, ColumnAccessor): raise TypeError( @@ -712,6 +715,11 @@ def __init__( self._data = new_df._data self._index = new_df._index + self._data._level_names = ( + tuple(columns.names) + if isinstance(columns, pd.Index) + else self._data._level_names + ) elif len(data) > 0 and isinstance(data[0], Series): self._init_from_series_list( data=data, columns=columns, index=index @@ -834,6 +842,11 @@ def _init_from_series_list(self, data, columns, index): self._data[col_name] = column.column_empty( row_count=len(self), dtype=None, masked=True ) + self._data._level_names = ( + tuple(columns.names) + if isinstance(columns, pd.Index) + else self._data._level_names + ) self._data = self._data.select_by_label(columns) @_cudf_nvtx_annotate @@ -957,6 +970,11 @@ def _init_from_dict_like( data[col_name], nan_as_null=nan_as_null, ) + self._data._level_names = ( + tuple(columns.names) + if isinstance(columns, pd.Index) + else self._data._level_names + ) @classmethod def _from_data( @@ -5131,7 +5149,7 @@ def from_pandas(cls, dataframe, nan_as_null=None): index = cudf.from_pandas(dataframe.index, nan_as_null=nan_as_null) df = cls._from_data(data, index) - df._data._level_names = list(dataframe.columns.names) + df._data._level_names = tuple(dataframe.columns.names) # Set columns only if it is a MultiIndex if isinstance(dataframe.columns, pd.MultiIndex): @@ -5377,6 +5395,8 @@ def from_records(cls, data, index=None, columns=None, nan_as_null=False): df = df.set_index(index) else: df._index = as_index(index) + if isinstance(columns, pd.Index): + df._data._level_names = tuple(columns.names) return df @classmethod @@ -5434,7 +5454,7 @@ def _from_arrays(cls, data, index=None, columns=None, nan_as_null=False): data, nan_as_null=nan_as_null ) if isinstance(columns, pd.Index): - df._data._level_names = list(columns.names) + df._data._level_names = tuple(columns.names) if index is None: df._index = RangeIndex(start=0, stop=len(data)) diff --git a/python/cudf/cudf/core/indexed_frame.py b/python/cudf/cudf/core/indexed_frame.py index aacf1fa8dae..1008cbdb67f 100644 --- a/python/cudf/cudf/core/indexed_frame.py +++ b/python/cudf/cudf/core/indexed_frame.py @@ -2661,7 +2661,9 @@ def _reindex( data=cudf.core.column_accessor.ColumnAccessor( cols, multiindex=self._data.multiindex, - level_names=self._data.level_names, + level_names=tuple(column_names.names) + if isinstance(column_names, pd.Index) + else None, ), index=index, ) diff --git a/python/cudf/cudf/tests/test_dataframe.py b/python/cudf/cudf/tests/test_dataframe.py index 67b63028fab..c297748f7e5 100644 --- a/python/cudf/cudf/tests/test_dataframe.py +++ b/python/cudf/cudf/tests/test_dataframe.py @@ -6394,6 +6394,7 @@ def test_df_series_dataframe_astype_dtype_dict(copy): ([range(100), range(100)], ["range" + str(i) for i in range(100)]), (((1, 2, 3), (1, 2, 3)), ["tuple0", "tuple1", "tuple2"]), ([[1, 2, 3]], ["list col1", "list col2", "list col3"]), + ([[1, 2, 3]], pd.Index(["col1", "col2", "col3"], name="rapids")), ([range(100)], ["range" + str(i) for i in range(100)]), (((1, 2, 3),), ["k1", "k2", "k3"]), ], @@ -7969,6 +7970,7 @@ def test_series_empty(ps): @pytest.mark.parametrize( "data", [ + None, [], [1], {"a": [10, 11, 12]}, @@ -7979,7 +7981,10 @@ def test_series_empty(ps): }, ], ) -@pytest.mark.parametrize("columns", [["a"], ["another column name"], None]) +@pytest.mark.parametrize( + "columns", + [["a"], ["another column name"], None, pd.Index(["a"], name="index name")], +) def test_dataframe_init_with_columns(data, columns): pdf = pd.DataFrame(data, columns=columns) gdf = cudf.DataFrame(data, columns=columns) @@ -8047,7 +8052,16 @@ def test_dataframe_init_with_columns(data, columns): ], ) @pytest.mark.parametrize( - "columns", [None, ["0"], [0], ["abc"], [144, 13], [2, 1, 0]] + "columns", + [ + None, + ["0"], + [0], + ["abc"], + [144, 13], + [2, 1, 0], + pd.Index(["abc"], name="custom_name"), + ], ) def test_dataframe_init_from_series_list(data, ignore_dtype, columns): gd_data = [cudf.from_pandas(obj) for obj in data] @@ -10239,14 +10253,21 @@ def test_dataframe_binop_with_datetime_index(): @pytest.mark.parametrize( - "columns", ([], ["c", "a"], ["a", "d", "b", "e", "c"], ["a", "b", "c"]) + "columns", + ( + [], + ["c", "a"], + ["a", "d", "b", "e", "c"], + ["a", "b", "c"], + pd.Index(["b", "a", "c"], name="custom_name"), + ), ) @pytest.mark.parametrize("index", (None, [4, 5, 6])) def test_dataframe_dict_like_with_columns(columns, index): data = {"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]} expect = pd.DataFrame(data, columns=columns, index=index) actual = cudf.DataFrame(data, columns=columns, index=index) - if index is None and columns == []: + if index is None and len(columns) == 0: # We make an empty range index, pandas makes an empty index expect = expect.reset_index(drop=True) assert_eq(expect, actual) From 2c19bf328ffefb97d17e5ae600197a4ea9ca4445 Mon Sep 17 00:00:00 2001 From: Vukasin Milovanovic Date: Wed, 27 Sep 2023 20:37:04 -0700 Subject: [PATCH 120/150] Propagate errors from Parquet reader kernels back to host (#14167) Pass the error code to the host when a kernel detects invalid input. If multiple errors types are detected, they are combined using a bitwise OR so that caller gets the aggregate error code that includes all types of errors that occurred. Does not change the kernel side checks. Authors: - Vukasin Milovanovic (https://github.com/vuule) Approvers: - https://github.com/nvdbaranec - Divye Gala (https://github.com/divyegala) - Yunsong Wang (https://github.com/PointKernel) - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cudf/pull/14167 --- cpp/src/io/parquet/page_data.cu | 25 ++++++++--- cpp/src/io/parquet/page_decode.cuh | 57 +++++++++++++++++------- cpp/src/io/parquet/page_delta_decode.cu | 25 ++++++++--- cpp/src/io/parquet/page_string_decode.cu | 25 ++++++++--- cpp/src/io/parquet/parquet_gpu.hpp | 21 +++++++++ cpp/src/io/parquet/reader_impl.cpp | 19 ++++++-- 6 files changed, 130 insertions(+), 42 deletions(-) diff --git a/cpp/src/io/parquet/page_data.cu b/cpp/src/io/parquet/page_data.cu index c26802aa3c2..230834632dd 100644 --- a/cpp/src/io/parquet/page_data.cu +++ b/cpp/src/io/parquet/page_data.cu @@ -430,10 +430,15 @@ static __device__ void gpuOutputGeneric( * @param chunks List of column chunks * @param min_row Row index to start reading at * @param num_rows Maximum number of rows to read + * @param error_code Error code to set if an error is encountered */ template -__global__ void __launch_bounds__(decode_block_size) gpuDecodePageData( - PageInfo* pages, device_span chunks, size_t min_row, size_t num_rows) +__global__ void __launch_bounds__(decode_block_size) + gpuDecodePageData(PageInfo* pages, + device_span chunks, + size_t min_row, + size_t num_rows, + int32_t* error_code) { __shared__ __align__(16) page_state_s state_g; __shared__ __align__(16) @@ -472,7 +477,8 @@ __global__ void __launch_bounds__(decode_block_size) gpuDecodePageData( // skipped_leaf_values will always be 0 for flat hierarchies. uint32_t skipped_leaf_values = s->page.skipped_leaf_values; - while (!s->error && (s->input_value_count < s->num_input_values || s->src_pos < s->nz_count)) { + while (s->error == 0 && + (s->input_value_count < s->num_input_values || s->src_pos < s->nz_count)) { int target_pos; int src_pos = s->src_pos; @@ -596,6 +602,10 @@ __global__ void __launch_bounds__(decode_block_size) gpuDecodePageData( } __syncthreads(); } + if (t == 0 and s->error != 0) { + cuda::atomic_ref ref{*error_code}; + ref.fetch_or(s->error, cuda::std::memory_order_relaxed); + } } struct mask_tform { @@ -621,6 +631,7 @@ void __host__ DecodePageData(cudf::detail::hostdevice_vector& pages, size_t num_rows, size_t min_row, int level_type_size, + int32_t* error_code, rmm::cuda_stream_view stream) { CUDF_EXPECTS(pages.size() > 0, "There is no page to decode"); @@ -629,11 +640,11 @@ void __host__ DecodePageData(cudf::detail::hostdevice_vector& pages, dim3 dim_grid(pages.size(), 1); // 1 threadblock per page if (level_type_size == 1) { - gpuDecodePageData - <<>>(pages.device_ptr(), chunks, min_row, num_rows); + gpuDecodePageData<<>>( + pages.device_ptr(), chunks, min_row, num_rows, error_code); } else { - gpuDecodePageData - <<>>(pages.device_ptr(), chunks, min_row, num_rows); + gpuDecodePageData<<>>( + pages.device_ptr(), chunks, min_row, num_rows, error_code); } } diff --git a/cpp/src/io/parquet/page_decode.cuh b/cpp/src/io/parquet/page_decode.cuh index 5e66885d746..cdc29197eb3 100644 --- a/cpp/src/io/parquet/page_decode.cuh +++ b/cpp/src/io/parquet/page_decode.cuh @@ -21,6 +21,7 @@ #include +#include #include namespace cudf::io::parquet::gpu { @@ -69,6 +70,18 @@ struct page_state_s { PageNestingDecodeInfo nesting_decode_cache[max_cacheable_nesting_decode_info]{}; // points to either nesting_decode_cache above when possible, or to the global source otherwise PageNestingDecodeInfo* nesting_info{}; + + inline __device__ void set_error_code(decode_error err) volatile + { + cuda::atomic_ref ref{const_cast(error)}; + ref.fetch_or(static_cast(err), cuda::std::memory_order_relaxed); + } + + inline __device__ void reset_error_code() volatile + { + cuda::atomic_ref ref{const_cast(error)}; + ref.store(0, cuda::std::memory_order_release); + } }; // buffers only used in the decode kernel. separated from page_state_s to keep @@ -471,7 +484,7 @@ __device__ void gpuDecodeStream( int32_t value_count = s->lvl_count[lvl]; int32_t batch_coded_count = 0; - while (value_count < target_count && value_count < num_input_values) { + while (s->error == 0 && value_count < target_count && value_count < num_input_values) { int batch_len; if (level_run <= 1) { // Get a new run symbol from the byte stream @@ -487,7 +500,14 @@ __device__ void gpuDecodeStream( cur++; } } - if (cur > end || level_run <= 1) { s->error = 0x10; } + if (cur > end) { + s->set_error_code(decode_error::LEVEL_STREAM_OVERRUN); + break; + } + if (level_run <= 1) { + s->set_error_code(decode_error::INVALID_LEVEL_RUN); + break; + } sym_len = (int32_t)(cur - cur_def); __threadfence_block(); } @@ -496,7 +516,7 @@ __device__ void gpuDecodeStream( level_run = shuffle(level_run); cur_def += sym_len; } - if (s->error) { break; } + if (s->error != 0) { break; } batch_len = min(num_input_values - value_count, 32); if (level_run & 1) { @@ -852,7 +872,7 @@ __device__ void gpuDecodeLevels(page_state_s* s, constexpr int batch_size = 32; int cur_leaf_count = target_leaf_count; - while (!s->error && s->nz_count < target_leaf_count && + while (s->error == 0 && s->nz_count < target_leaf_count && s->input_value_count < s->num_input_values) { if (has_repetition) { gpuDecodeStream(rep, s, cur_leaf_count, t, level_type::REPETITION); @@ -916,7 +936,7 @@ inline __device__ uint32_t InitLevelSection(page_state_s* s, } s->lvl_start[lvl] = cur; - if (cur > end) { s->error = 2; } + if (cur > end) { s->set_error_code(decode_error::LEVEL_STREAM_OVERRUN); } }; // this is a little redundant. if level_bits == 0, then nothing should be encoded @@ -941,8 +961,8 @@ inline __device__ uint32_t InitLevelSection(page_state_s* s, // add back the 4 bytes for the length len += 4; } else { - len = 0; - s->error = 2; + len = 0; + s->set_error_code(decode_error::LEVEL_STREAM_OVERRUN); } } else if (encoding == Encoding::BIT_PACKED) { len = (s->page.num_input_values * level_bits + 7) >> 3; @@ -951,8 +971,8 @@ inline __device__ uint32_t InitLevelSection(page_state_s* s, s->lvl_start[lvl] = cur; s->abs_lvl_start[lvl] = cur; } else { - s->error = 3; - len = 0; + len = 0; + s->set_error_code(decode_error::UNSUPPORTED_ENCODING); } s->abs_lvl_end[lvl] = start + len; @@ -1094,7 +1114,7 @@ inline __device__ bool setupLocalPageInfo(page_state_s* const s, } if (!t) { - s->error = 0; + s->reset_error_code(); // IMPORTANT : nested schemas can have 0 rows in a page but still have // values. The case is: @@ -1152,7 +1172,7 @@ inline __device__ bool setupLocalPageInfo(page_state_s* const s, break; default: // FIXED_LEN_BYTE_ARRAY: s->dtype_len = dtype_len_out; - s->error |= (s->dtype_len <= 0); + if (s->dtype_len <= 0) { s->set_error_code(decode_error::INVALID_DATA_TYPE); } break; } // Special check for downconversions @@ -1268,7 +1288,9 @@ inline __device__ bool setupLocalPageInfo(page_state_s* const s, s->dict_run = 0; s->dict_val = 0; s->dict_bits = (cur < end) ? *cur++ : 0; - if (s->dict_bits > 32 || !s->dict_base) { s->error = (10 << 8) | s->dict_bits; } + if (s->dict_bits > 32 || !s->dict_base) { + s->set_error_code(decode_error::INVALID_DICT_WIDTH); + } break; case Encoding::PLAIN: s->dict_size = static_cast(end - cur); @@ -1279,22 +1301,23 @@ inline __device__ bool setupLocalPageInfo(page_state_s* const s, // first 4 bytes are length of RLE data int const len = (cur[0]) + (cur[1] << 8) + (cur[2] << 16) + (cur[3] << 24); cur += 4; - if (cur + len > end) { s->error = 2; } + if (cur + len > end) { s->set_error_code(decode_error::DATA_STREAM_OVERRUN); } s->dict_run = 0; } break; case Encoding::DELTA_BINARY_PACKED: // nothing to do, just don't error break; - default: - s->error = 1; // Unsupported encoding + default: { + s->set_error_code(decode_error::UNSUPPORTED_ENCODING); break; + } } - if (cur > end) { s->error = 1; } + if (cur > end) { s->set_error_code(decode_error::DATA_STREAM_OVERRUN); } s->lvl_end = cur; s->data_start = cur; s->data_end = end; } else { - s->error = 1; + s->set_error_code(decode_error::EMPTY_PAGE); } s->lvl_count[level_type::REPETITION] = 0; diff --git a/cpp/src/io/parquet/page_delta_decode.cu b/cpp/src/io/parquet/page_delta_decode.cu index 35f33a761be..2b78dead205 100644 --- a/cpp/src/io/parquet/page_delta_decode.cu +++ b/cpp/src/io/parquet/page_delta_decode.cu @@ -32,8 +32,12 @@ namespace { // with V2 page headers; see https://www.mail-archive.com/dev@parquet.apache.org/msg11826.html). // this kernel only needs 96 threads (3 warps)(for now). template -__global__ void __launch_bounds__(96) gpuDecodeDeltaBinary( - PageInfo* pages, device_span chunks, size_t min_row, size_t num_rows) +__global__ void __launch_bounds__(96) + gpuDecodeDeltaBinary(PageInfo* pages, + device_span chunks, + size_t min_row, + size_t num_rows, + int32_t* error_code) { using cudf::detail::warp_size; __shared__ __align__(16) delta_binary_decoder db_state; @@ -79,7 +83,8 @@ __global__ void __launch_bounds__(96) gpuDecodeDeltaBinary( // that has a value we need. if (skipped_leaf_values > 0) { db->skip_values(skipped_leaf_values); } - while (!s->error && (s->input_value_count < s->num_input_values || s->src_pos < s->nz_count)) { + while (s->error == 0 && + (s->input_value_count < s->num_input_values || s->src_pos < s->nz_count)) { uint32_t target_pos; uint32_t const src_pos = s->src_pos; @@ -145,6 +150,11 @@ __global__ void __launch_bounds__(96) gpuDecodeDeltaBinary( } __syncthreads(); } + + if (t == 0 and s->error != 0) { + cuda::atomic_ref ref{*error_code}; + ref.fetch_or(s->error, cuda::std::memory_order_relaxed); + } } } // anonymous namespace @@ -157,6 +167,7 @@ void __host__ DecodeDeltaBinary(cudf::detail::hostdevice_vector& pages size_t num_rows, size_t min_row, int level_type_size, + int32_t* error_code, rmm::cuda_stream_view stream) { CUDF_EXPECTS(pages.size() > 0, "There is no page to decode"); @@ -165,11 +176,11 @@ void __host__ DecodeDeltaBinary(cudf::detail::hostdevice_vector& pages dim3 dim_grid(pages.size(), 1); // 1 threadblock per page if (level_type_size == 1) { - gpuDecodeDeltaBinary - <<>>(pages.device_ptr(), chunks, min_row, num_rows); + gpuDecodeDeltaBinary<<>>( + pages.device_ptr(), chunks, min_row, num_rows, error_code); } else { - gpuDecodeDeltaBinary - <<>>(pages.device_ptr(), chunks, min_row, num_rows); + gpuDecodeDeltaBinary<<>>( + pages.device_ptr(), chunks, min_row, num_rows, error_code); } } diff --git a/cpp/src/io/parquet/page_string_decode.cu b/cpp/src/io/parquet/page_string_decode.cu index 1ac4c95f713..d79abe4a6d2 100644 --- a/cpp/src/io/parquet/page_string_decode.cu +++ b/cpp/src/io/parquet/page_string_decode.cu @@ -582,8 +582,12 @@ __global__ void __launch_bounds__(preprocess_block_size) gpuComputePageStringSiz * @tparam level_t Type used to store decoded repetition and definition levels */ template -__global__ void __launch_bounds__(decode_block_size) gpuDecodeStringPageData( - PageInfo* pages, device_span chunks, size_t min_row, size_t num_rows) +__global__ void __launch_bounds__(decode_block_size) + gpuDecodeStringPageData(PageInfo* pages, + device_span chunks, + size_t min_row, + size_t num_rows, + int32_t* error_code) { __shared__ __align__(16) page_state_s state_g; __shared__ __align__(4) size_type last_offset; @@ -617,7 +621,8 @@ __global__ void __launch_bounds__(decode_block_size) gpuDecodeStringPageData( // skipped_leaf_values will always be 0 for flat hierarchies. uint32_t skipped_leaf_values = s->page.skipped_leaf_values; - while (!s->error && (s->input_value_count < s->num_input_values || s->src_pos < s->nz_count)) { + while (s->error == 0 && + (s->input_value_count < s->num_input_values || s->src_pos < s->nz_count)) { int target_pos; int src_pos = s->src_pos; @@ -742,6 +747,11 @@ __global__ void __launch_bounds__(decode_block_size) gpuDecodeStringPageData( auto const offptr = reinterpret_cast(nesting_info_base[leaf_level_index].data_out); block_excl_sum(offptr, value_count, s->page.str_offset); + + if (t == 0 and s->error != 0) { + cuda::atomic_ref ref{*error_code}; + ref.fetch_or(s->error, cuda::std::memory_order_relaxed); + } } } // anonymous namespace @@ -775,6 +785,7 @@ void __host__ DecodeStringPageData(cudf::detail::hostdevice_vector& pa size_t num_rows, size_t min_row, int level_type_size, + int32_t* error_code, rmm::cuda_stream_view stream) { CUDF_EXPECTS(pages.size() > 0, "There is no page to decode"); @@ -783,11 +794,11 @@ void __host__ DecodeStringPageData(cudf::detail::hostdevice_vector& pa dim3 dim_grid(pages.size(), 1); // 1 threadblock per page if (level_type_size == 1) { - gpuDecodeStringPageData - <<>>(pages.device_ptr(), chunks, min_row, num_rows); + gpuDecodeStringPageData<<>>( + pages.device_ptr(), chunks, min_row, num_rows, error_code); } else { - gpuDecodeStringPageData - <<>>(pages.device_ptr(), chunks, min_row, num_rows); + gpuDecodeStringPageData<<>>( + pages.device_ptr(), chunks, min_row, num_rows, error_code); } } diff --git a/cpp/src/io/parquet/parquet_gpu.hpp b/cpp/src/io/parquet/parquet_gpu.hpp index a760c2448dc..3c37c0df021 100644 --- a/cpp/src/io/parquet/parquet_gpu.hpp +++ b/cpp/src/io/parquet/parquet_gpu.hpp @@ -54,6 +54,21 @@ constexpr int rolling_index(int index) return index % rolling_size; } +/** + * @brief Enum for the different types of errors that can occur during decoding. + * + * These values are used as bitmasks, so they must be powers of 2. + */ +enum class decode_error : int32_t { + DATA_STREAM_OVERRUN = 0x1, + LEVEL_STREAM_OVERRUN = 0x2, + UNSUPPORTED_ENCODING = 0x4, + INVALID_LEVEL_RUN = 0x8, + INVALID_DATA_TYPE = 0x10, + EMPTY_PAGE = 0x20, + INVALID_DICT_WIDTH = 0x40, +}; + /** * @brief Struct representing an input column in the file. */ @@ -566,6 +581,7 @@ void ComputePageStringSizes(cudf::detail::hostdevice_vector& pages, * @param[in] num_rows Total number of rows to read * @param[in] min_row Minimum number of rows to read * @param[in] level_type_size Size in bytes of the type for level decoding + * @param[out] error_code Error code for kernel failures * @param[in] stream CUDA stream to use */ void DecodePageData(cudf::detail::hostdevice_vector& pages, @@ -573,6 +589,7 @@ void DecodePageData(cudf::detail::hostdevice_vector& pages, size_t num_rows, size_t min_row, int level_type_size, + int32_t* error_code, rmm::cuda_stream_view stream); /** @@ -586,6 +603,7 @@ void DecodePageData(cudf::detail::hostdevice_vector& pages, * @param[in] num_rows Total number of rows to read * @param[in] min_row Minimum number of rows to read * @param[in] level_type_size Size in bytes of the type for level decoding + * @param[out] error_code Error code for kernel failures * @param[in] stream CUDA stream to use */ void DecodeStringPageData(cudf::detail::hostdevice_vector& pages, @@ -593,6 +611,7 @@ void DecodeStringPageData(cudf::detail::hostdevice_vector& pages, size_t num_rows, size_t min_row, int level_type_size, + int32_t* error_code, rmm::cuda_stream_view stream); /** @@ -606,6 +625,7 @@ void DecodeStringPageData(cudf::detail::hostdevice_vector& pages, * @param[in] num_rows Total number of rows to read * @param[in] min_row Minimum number of rows to read * @param[in] level_type_size Size in bytes of the type for level decoding + * @param[out] error_code Error code for kernel failures * @param[in] stream CUDA stream to use, default 0 */ void DecodeDeltaBinary(cudf::detail::hostdevice_vector& pages, @@ -613,6 +633,7 @@ void DecodeDeltaBinary(cudf::detail::hostdevice_vector& pages, size_t num_rows, size_t min_row, int level_type_size, + int32_t* error_code, rmm::cuda_stream_view stream); /** diff --git a/cpp/src/io/parquet/reader_impl.cpp b/cpp/src/io/parquet/reader_impl.cpp index 8b0a0bd4eb0..6cbe64e227b 100644 --- a/cpp/src/io/parquet/reader_impl.cpp +++ b/cpp/src/io/parquet/reader_impl.cpp @@ -163,6 +163,8 @@ void reader::impl::decode_page_data(size_t skip_rows, size_t num_rows) chunk_nested_valids.host_to_device_async(_stream); chunk_nested_data.host_to_device_async(_stream); + rmm::device_scalar error_code(0, _stream); + // get the number of streams we need from the pool and tell them to wait on the H2D copies int const nkernels = std::bitset<32>(kernel_mask).count(); auto streams = cudf::detail::fork_streams(_stream, nkernels); @@ -174,17 +176,20 @@ void reader::impl::decode_page_data(size_t skip_rows, size_t num_rows) if (has_strings) { auto& stream = streams[s_idx++]; chunk_nested_str_data.host_to_device_async(stream); - gpu::DecodeStringPageData(pages, chunks, num_rows, skip_rows, level_type_size, stream); + gpu::DecodeStringPageData( + pages, chunks, num_rows, skip_rows, level_type_size, error_code.data(), stream); } // launch delta binary decoder if ((kernel_mask & gpu::KERNEL_MASK_DELTA_BINARY) != 0) { - gpu::DecodeDeltaBinary(pages, chunks, num_rows, skip_rows, level_type_size, streams[s_idx++]); + gpu::DecodeDeltaBinary( + pages, chunks, num_rows, skip_rows, level_type_size, error_code.data(), streams[s_idx++]); } // launch the catch-all page decoder if ((kernel_mask & gpu::KERNEL_MASK_GENERAL) != 0) { - gpu::DecodePageData(pages, chunks, num_rows, skip_rows, level_type_size, streams[s_idx++]); + gpu::DecodePageData( + pages, chunks, num_rows, skip_rows, level_type_size, error_code.data(), streams[s_idx++]); } // synchronize the streams @@ -193,7 +198,13 @@ void reader::impl::decode_page_data(size_t skip_rows, size_t num_rows) pages.device_to_host_async(_stream); page_nesting.device_to_host_async(_stream); page_nesting_decode.device_to_host_async(_stream); - _stream.synchronize(); + + auto const decode_error = error_code.value(_stream); + if (decode_error != 0) { + std::stringstream stream; + stream << std::hex << decode_error; + CUDF_FAIL("Parquet data decode failed with code(s) 0x" + stream.str()); + } // for list columns, add the final offset to every offset buffer. // TODO : make this happen in more efficiently. Maybe use thrust::for_each From 53f0f74f6c6d66441225278f19a69885fb8b43c6 Mon Sep 17 00:00:00 2001 From: nvdbaranec <56695930+nvdbaranec@users.noreply.github.com> Date: Wed, 27 Sep 2023 23:32:46 -0500 Subject: [PATCH 121/150] Support for progressive parquet chunked reading. (#14079) Previously, the parquet chunked reader operated by controlling the size of output chunks only. It would still ingest the entire input file and decompress it, which can take up a considerable amount of memory. With this new 'progressive' support, we also 'chunk' at the input level. Specifically, the user can pass a `pass_read_limit` value which controls how much memory is used for storing compressed/decompressed data. The reader will make multiple passes over the file, reading as many row groups as it can to attempt to fit within this limit. Within each pass, chunks are emitted as before. From the external user's perspective, the chunked read mechanism is the same. You call `has_next()` and `read_chunk()`. If the user has specified a value for `pass_read_limit` the set of chunks produced might end up being different (although the concatenation of all of them will still be the same). The core idea of the code change is to add the idea of the internal `pass`. Previously we had a `file_intermediate_data` which held data across `read_chunk()` calls. There is now a `pass_intermediate_data` struct which holds information specific to a given pass. Many of the invariant things from the file level before (row groups and chunks to process) are now stored in the pass intermediate data. As we begin each pass, we take the subset of global row groups and chunks that we are going to process for this pass, copy them to out intermediate data, and the remainder of the reader reference this instead of the file-level data. In order to avoid breaking pre-existing interfaces, there's a new contructor for the `chunked_parquet_reader` class: ``` chunked_parquet_reader( std::size_t chunk_read_limit, std::size_t pass_read_limit, parquet_reader_options const& options, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); ``` Authors: - https://github.com/nvdbaranec Approvers: - Yunsong Wang (https://github.com/PointKernel) - Vukasin Milovanovic (https://github.com/vuule) URL: https://github.com/rapidsai/cudf/pull/14079 --- cpp/include/cudf/io/detail/parquet.hpp | 39 ++- cpp/include/cudf/io/parquet.hpp | 24 ++ cpp/src/io/functions.cpp | 17 + cpp/src/io/parquet/parquet_gpu.hpp | 69 +++- cpp/src/io/parquet/reader.cpp | 4 +- cpp/src/io/parquet/reader_impl.cpp | 128 ++++--- cpp/src/io/parquet/reader_impl.hpp | 52 ++- cpp/src/io/parquet/reader_impl_helpers.cpp | 4 +- cpp/src/io/parquet/reader_impl_helpers.hpp | 15 +- cpp/src/io/parquet/reader_impl_preprocess.cu | 344 +++++++++++++------ cpp/tests/io/parquet_chunked_reader_test.cpp | 68 +++- 11 files changed, 561 insertions(+), 203 deletions(-) diff --git a/cpp/include/cudf/io/detail/parquet.hpp b/cpp/include/cudf/io/detail/parquet.hpp index 3f2e1fa5e6c..074f690d2c7 100644 --- a/cpp/include/cudf/io/detail/parquet.hpp +++ b/cpp/include/cudf/io/detail/parquet.hpp @@ -91,7 +91,8 @@ class reader { class chunked_reader : private reader { public: /** - * @brief Constructor from a read size limit and an array of data sources with reader options. + * @brief Constructor from an output size memory limit and an input size memory limit and an array + * of data sources with reader options. * * The typical usage should be similar to this: * ``` @@ -102,17 +103,45 @@ class chunked_reader : private reader { * * ``` * - * If `chunk_read_limit == 0` (i.e., no reading limit), a call to `read_chunk()` will read the - * whole file and return a table containing all rows. + * If `chunk_read_limit == 0` (i.e., no output limit), and `pass_read_limit == 0` (no input + * temporary memory size limit) a call to `read_chunk()` will read the whole file and return a + * table containing all rows. + * + * The chunk_read_limit parameter controls the size of the output chunks produces. If the user + * specifies 100 MB of data, the reader will attempt to return chunks containing tables that have + * a total bytes size (over all columns) of 100 MB or less. This is a soft limit and the code + * will not fail if it cannot satisfy the limit. It will make a best-effort atttempt only. + * + * The pass_read_limit parameter controls how much temporary memory is used in the process of + * decoding the file. The primary contributor to this memory usage is the uncompressed size of + * the data read out of the file and the decompressed (but not yet decoded) size of the data. The + * granularity of a given pass is at the row group level. It will not attempt to read at the sub + * row-group level. + * + * Combined, the way to visualize passes and chunks is as follows: + * + * @code{.pseudo} + * for(each pass){ + * for(each output chunk within a pass){ + * return a table that fits within the output chunk limit + * } + * } + * @endcode + * + * With a pass_read_limit of `0` you are simply saying you have one pass that reads the entire + * file as normal. * * @param chunk_read_limit Limit on total number of bytes to be returned per read, - * or `0` if there is no limit + * or `0` if there is no limit + * @param pass_read_limit Limit on total amount of memory used for temporary computations during + * loading, or `0` if there is no limit * @param sources Input `datasource` objects to read the dataset from * @param options Settings for controlling reading behavior - * @param stream CUDA stream used for device memory operations and kernel launches. + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource to use for device memory allocation */ explicit chunked_reader(std::size_t chunk_read_limit, + std::size_t pass_read_limit, std::vector>&& sources, parquet_reader_options const& options, rmm::cuda_stream_view stream, diff --git a/cpp/include/cudf/io/parquet.hpp b/cpp/include/cudf/io/parquet.hpp index 788ff15f3c1..deaf23d405a 100644 --- a/cpp/include/cudf/io/parquet.hpp +++ b/cpp/include/cudf/io/parquet.hpp @@ -445,6 +445,30 @@ class chunked_parquet_reader { parquet_reader_options const& options, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); + /** + * @brief Constructor for chunked reader. + * + * This constructor requires the same `parquet_reader_option` parameter as in + * `cudf::read_parquet()`, with additional parameters to specify the size byte limit of the + * output table for each reading, and a byte limit on the amount of temporary memory to use + * when reading. pass_read_limit affects how many row groups we can read at a time by limiting + * the amount of memory dedicated to decompression space. pass_read_limit is a hint, not an + * absolute limit - if a single row group cannot fit within the limit given, it will still be + * loaded. + * + * @param chunk_read_limit Limit on total number of bytes to be returned per read, + * or `0` if there is no limit + * @param pass_read_limit Limit on the amount of memory used for reading and decompressing data or + * `0` if there is no limit + * @param options The options used to read Parquet file + * @param mr Device memory resource to use for device memory allocation + */ + chunked_parquet_reader( + std::size_t chunk_read_limit, + std::size_t pass_read_limit, + parquet_reader_options const& options, + rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); + /** * @brief Destructor, destroying the internal reader instance. * diff --git a/cpp/src/io/functions.cpp b/cpp/src/io/functions.cpp index 45f8b0f8822..392a7850886 100644 --- a/cpp/src/io/functions.cpp +++ b/cpp/src/io/functions.cpp @@ -562,6 +562,23 @@ chunked_parquet_reader::chunked_parquet_reader(std::size_t chunk_read_limit, parquet_reader_options const& options, rmm::mr::device_memory_resource* mr) : reader{std::make_unique(chunk_read_limit, + 0, + make_datasources(options.get_source()), + options, + cudf::get_default_stream(), + mr)} +{ +} + +/** + * @copydoc cudf::io::chunked_parquet_reader::chunked_parquet_reader + */ +chunked_parquet_reader::chunked_parquet_reader(std::size_t chunk_read_limit, + std::size_t pass_read_limit, + parquet_reader_options const& options, + rmm::mr::device_memory_resource* mr) + : reader{std::make_unique(chunk_read_limit, + pass_read_limit, make_datasources(options.get_source()), options, cudf::get_default_stream(), diff --git a/cpp/src/io/parquet/parquet_gpu.hpp b/cpp/src/io/parquet/parquet_gpu.hpp index 3c37c0df021..51c862b376b 100644 --- a/cpp/src/io/parquet/parquet_gpu.hpp +++ b/cpp/src/io/parquet/parquet_gpu.hpp @@ -321,33 +321,74 @@ struct ColumnChunkDesc { }; /** - * @brief Struct to store raw/intermediate file data before parsing. + * @brief The row_group_info class + */ +struct row_group_info { + size_type index; // row group index within a file. aggregate_reader_metadata::get_row_group() is + // called with index and source_index + size_t start_row; + size_type source_index; // file index. + + row_group_info() = default; + + row_group_info(size_type index, size_t start_row, size_type source_index) + : index{index}, start_row{start_row}, source_index{source_index} + { + } +}; + +/** + * @brief Struct to store file-level data that remains constant for + * all passes/chunks for the file. */ struct file_intermediate_data { + // all row groups to read + std::vector row_groups{}; + + // all chunks from the selected row groups. We may end up reading these chunks progressively + // instead of all at once + std::vector chunks{}; + + // skip_rows/num_rows values for the entire file. these need to be adjusted per-pass because we + // may not be visiting every row group that contains these bounds + size_t global_skip_rows; + size_t global_num_rows; +}; + +/** + * @brief Structs to identify the reading row range for each chunk of rows in chunked reading. + */ +struct chunk_read_info { + size_t skip_rows; + size_t num_rows; +}; + +/** + * @brief Struct to store pass-level data that remains constant for a single pass. + */ +struct pass_intermediate_data { std::vector> raw_page_data; rmm::device_buffer decomp_page_data; + + // rowgroup, chunk and page information for the current pass. + std::vector row_groups{}; cudf::detail::hostdevice_vector chunks{}; cudf::detail::hostdevice_vector pages_info{}; cudf::detail::hostdevice_vector page_nesting_info{}; cudf::detail::hostdevice_vector page_nesting_decode_info{}; - rmm::device_buffer level_decode_data; - int level_type_size; -}; - -/** - * @brief Struct to store intermediate page data for parsing each chunk of rows in chunked reading. - */ -struct chunk_intermediate_data { rmm::device_uvector page_keys{0, rmm::cuda_stream_default}; rmm::device_uvector page_index{0, rmm::cuda_stream_default}; rmm::device_uvector str_dict_index{0, rmm::cuda_stream_default}; -}; -/** - * @brief Structs to identify the reading row range for each chunk of rows in chunked reading. - */ -struct chunk_read_info { + std::vector output_chunk_read_info; + std::size_t current_output_chunk{0}; + + rmm::device_buffer level_decode_data{}; + int level_type_size{0}; + + // skip_rows and num_rows values for this particular pass. these may be adjusted values from the + // global values stored in file_intermediate_data. size_t skip_rows; size_t num_rows; }; diff --git a/cpp/src/io/parquet/reader.cpp b/cpp/src/io/parquet/reader.cpp index 7365c102d8f..1e87447006d 100644 --- a/cpp/src/io/parquet/reader.cpp +++ b/cpp/src/io/parquet/reader.cpp @@ -43,12 +43,14 @@ table_with_metadata reader::read(parquet_reader_options const& options) } chunked_reader::chunked_reader(std::size_t chunk_read_limit, + std::size_t pass_read_limit, std::vector>&& sources, parquet_reader_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { - _impl = std::make_unique(chunk_read_limit, std::move(sources), options, stream, mr); + _impl = std::make_unique( + chunk_read_limit, pass_read_limit, std::move(sources), options, stream, mr); } chunked_reader::~chunked_reader() = default; diff --git a/cpp/src/io/parquet/reader_impl.cpp b/cpp/src/io/parquet/reader_impl.cpp index 6cbe64e227b..ea40f29a070 100644 --- a/cpp/src/io/parquet/reader_impl.cpp +++ b/cpp/src/io/parquet/reader_impl.cpp @@ -29,10 +29,10 @@ namespace cudf::io::detail::parquet { void reader::impl::decode_page_data(size_t skip_rows, size_t num_rows) { - auto& chunks = _file_itm_data.chunks; - auto& pages = _file_itm_data.pages_info; - auto& page_nesting = _file_itm_data.page_nesting_info; - auto& page_nesting_decode = _file_itm_data.page_nesting_decode_info; + auto& chunks = _pass_itm_data->chunks; + auto& pages = _pass_itm_data->pages_info; + auto& page_nesting = _pass_itm_data->page_nesting_info; + auto& page_nesting_decode = _pass_itm_data->page_nesting_decode_info; // Should not reach here if there is no page data. CUDF_EXPECTS(pages.size() > 0, "There is no page to decode"); @@ -55,7 +55,7 @@ void reader::impl::decode_page_data(size_t skip_rows, size_t num_rows) std::vector col_sizes(_input_columns.size(), 0L); if (has_strings) { gpu::ComputePageStringSizes( - pages, chunks, skip_rows, num_rows, _file_itm_data.level_type_size, _stream); + pages, chunks, skip_rows, num_rows, _pass_itm_data->level_type_size, _stream); col_sizes = calculate_page_string_offsets(); @@ -169,7 +169,7 @@ void reader::impl::decode_page_data(size_t skip_rows, size_t num_rows) int const nkernels = std::bitset<32>(kernel_mask).count(); auto streams = cudf::detail::fork_streams(_stream, nkernels); - auto const level_type_size = _file_itm_data.level_type_size; + auto const level_type_size = _pass_itm_data->level_type_size; // launch string decoder int s_idx = 0; @@ -277,6 +277,7 @@ reader::impl::impl(std::vector>&& sources, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) : impl(0 /*chunk_read_limit*/, + 0 /*input_pass_read_limit*/, std::forward>>(sources), options, stream, @@ -285,11 +286,16 @@ reader::impl::impl(std::vector>&& sources, } reader::impl::impl(std::size_t chunk_read_limit, + std::size_t pass_read_limit, std::vector>&& sources, parquet_reader_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) - : _stream{stream}, _mr{mr}, _sources{std::move(sources)}, _chunk_read_limit{chunk_read_limit} + : _stream{stream}, + _mr{mr}, + _sources{std::move(sources)}, + _output_chunk_read_limit{chunk_read_limit}, + _input_pass_read_limit{pass_read_limit} { // Open and parse the source dataset metadata _metadata = std::make_unique(_sources); @@ -313,11 +319,8 @@ reader::impl::impl(std::size_t chunk_read_limit, _timestamp_type.id()); // Save the states of the output buffers for reuse in `chunk_read()`. - // Don't need to do it if we read the file all at once. - if (_chunk_read_limit > 0) { - for (auto const& buff : _output_buffers) { - _output_buffers_template.emplace_back(inline_column_buffer::empty_like(buff)); - } + for (auto const& buff : _output_buffers) { + _output_buffers_template.emplace_back(inline_column_buffer::empty_like(buff)); } } @@ -327,32 +330,62 @@ void reader::impl::prepare_data(int64_t skip_rows, host_span const> row_group_indices, std::optional> filter) { - if (_file_preprocessed) { return; } + // if we have not preprocessed at the whole-file level, do that now + if (!_file_preprocessed) { + // if filter is not empty, then create output types as vector and pass for filtering. + std::vector output_types; + if (filter.has_value()) { + std::transform(_output_buffers.cbegin(), + _output_buffers.cend(), + std::back_inserter(output_types), + [](auto const& col) { return col.type; }); + } + std::tie( + _file_itm_data.global_skip_rows, _file_itm_data.global_num_rows, _file_itm_data.row_groups) = + _metadata->select_row_groups( + row_group_indices, skip_rows, num_rows, output_types, filter, _stream); + + if (_file_itm_data.global_num_rows > 0 && not _file_itm_data.row_groups.empty() && + not _input_columns.empty()) { + // fills in chunk information without physically loading or decompressing + // the associated data + load_global_chunk_info(); + + // compute schedule of input reads. Each rowgroup contains 1 chunk per column. For now + // we will read an entire row group at a time. However, it is possible to do + // sub-rowgroup reads if we made some estimates on individual chunk sizes (tricky) and + // changed the high level structure such that we weren't always reading an entire table's + // worth of columns at once. + compute_input_pass_row_group_info(); + } - // if filter is not empty, then create output types as vector and pass for filtering. - std::vector output_types; - if (filter.has_value()) { - std::transform(_output_buffers.cbegin(), - _output_buffers.cend(), - std::back_inserter(output_types), - [](auto const& col) { return col.type; }); + _file_preprocessed = true; } - auto const [skip_rows_corrected, num_rows_corrected, row_groups_info] = - _metadata->select_row_groups( - row_group_indices, skip_rows, num_rows, output_types, filter, _stream); - - if (num_rows_corrected > 0 && not row_groups_info.empty() && not _input_columns.empty()) { - load_and_decompress_data(row_groups_info, num_rows_corrected); - preprocess_pages( - skip_rows_corrected, num_rows_corrected, uses_custom_row_bounds, _chunk_read_limit); - - if (_chunk_read_limit == 0) { // read the whole file at once - CUDF_EXPECTS(_chunk_read_info.size() == 1, - "Reading the whole file should yield only one chunk."); + + // if we have to start a new pass, do that now + if (!_pass_preprocessed) { + auto const num_passes = _input_pass_row_group_offsets.size() - 1; + + // always create the pass struct, even if we end up with no passes. + // this will also cause the previous pass information to be deleted + _pass_itm_data = std::make_unique(); + + if (_file_itm_data.global_num_rows > 0 && not _file_itm_data.row_groups.empty() && + not _input_columns.empty() && _current_input_pass < num_passes) { + // setup the pass_intermediate_info for this pass. + setup_pass(); + + load_and_decompress_data(); + preprocess_pages(uses_custom_row_bounds, _output_chunk_read_limit); + + if (_output_chunk_read_limit == 0) { // read the whole file at once + CUDF_EXPECTS(_pass_itm_data->output_chunk_read_info.size() == 1, + "Reading the whole file should yield only one chunk."); + } } - } - _file_preprocessed = true; + _pass_preprocessed = true; + } } void reader::impl::populate_metadata(table_metadata& out_metadata) @@ -382,11 +415,12 @@ table_with_metadata reader::impl::read_chunk_internal( auto out_columns = std::vector>{}; out_columns.reserve(_output_buffers.size()); - if (!has_next() || _chunk_read_info.empty()) { + if (!has_next() || _pass_itm_data->output_chunk_read_info.empty()) { return finalize_output(out_metadata, out_columns, filter); } - auto const& read_info = _chunk_read_info[_current_read_chunk++]; + auto const& read_info = + _pass_itm_data->output_chunk_read_info[_pass_itm_data->current_output_chunk]; // Allocate memory buffers for the output columns. allocate_columns(read_info.skip_rows, read_info.num_rows, uses_custom_row_bounds); @@ -439,6 +473,17 @@ table_with_metadata reader::impl::finalize_output( _output_metadata = std::make_unique(out_metadata); } + // advance chunks/passes as necessary + _pass_itm_data->current_output_chunk++; + _chunk_count++; + if (_pass_itm_data->current_output_chunk >= _pass_itm_data->output_chunk_read_info.size()) { + _pass_itm_data->current_output_chunk = 0; + _pass_itm_data->output_chunk_read_info.clear(); + + _current_input_pass++; + _pass_preprocessed = false; + } + if (filter.has_value()) { auto read_table = std::make_unique
(std::move(out_columns)); auto predicate = cudf::detail::compute_column( @@ -458,7 +503,8 @@ table_with_metadata reader::impl::read( host_span const> row_group_indices, std::optional> filter) { - CUDF_EXPECTS(_chunk_read_limit == 0, "Reading the whole file must not have non-zero byte_limit."); + CUDF_EXPECTS(_output_chunk_read_limit == 0, + "Reading the whole file must not have non-zero byte_limit."); table_metadata metadata; populate_metadata(metadata); auto expr_conv = named_to_reference_converter(filter, metadata); @@ -472,7 +518,7 @@ table_with_metadata reader::impl::read_chunk() { // Reset the output buffers to their original states (right after reader construction). // Don't need to do it if we read the file all at once. - if (_chunk_read_limit > 0) { + if (_chunk_count > 0) { _output_buffers.resize(0); for (auto const& buff : _output_buffers_template) { _output_buffers.emplace_back(inline_column_buffer::empty_like(buff)); @@ -494,7 +540,11 @@ bool reader::impl::has_next() true /*uses_custom_row_bounds*/, {} /*row_group_indices, empty means read all row groups*/, std::nullopt /*filter*/); - return _current_read_chunk < _chunk_read_info.size(); + + auto const num_input_passes = + _input_pass_row_group_offsets.size() == 0 ? 0 : _input_pass_row_group_offsets.size() - 1; + return (_pass_itm_data->current_output_chunk < _pass_itm_data->output_chunk_read_info.size()) || + (_current_input_pass < num_input_passes); } namespace { diff --git a/cpp/src/io/parquet/reader_impl.hpp b/cpp/src/io/parquet/reader_impl.hpp index a980670e465..9445e4d1648 100644 --- a/cpp/src/io/parquet/reader_impl.hpp +++ b/cpp/src/io/parquet/reader_impl.hpp @@ -90,17 +90,20 @@ class reader::impl { * ``` * * Reading the whole given file at once through `read()` function is still supported if - * `chunk_read_limit == 0` (i.e., no reading limit). - * In such case, `read_chunk()` will also return rows of the entire file. + * `chunk_read_limit == 0` (i.e., no reading limit) and `pass_read_limit == 0` (no temporary + * memory limit) In such case, `read_chunk()` will also return rows of the entire file. * * @param chunk_read_limit Limit on total number of bytes to be returned per read, * or `0` if there is no limit + * @param pass_read_limit Limit on memory usage for the purposes of decompression and processing + * of input, or `0` if there is no limit. * @param sources Dataset sources * @param options Settings for controlling reading behavior * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource to use for device memory allocation */ explicit impl(std::size_t chunk_read_limit, + std::size_t pass_read_limit, std::vector>&& sources, parquet_reader_options const& options, rmm::cuda_stream_view stream, @@ -133,22 +136,22 @@ class reader::impl { host_span const> row_group_indices, std::optional> filter); + void load_global_chunk_info(); + void compute_input_pass_row_group_info(); + void setup_pass(); + /** * @brief Create chunk information and start file reads * - * @param row_groups_info vector of information about row groups to read - * @param num_rows Maximum number of rows to read * @return pair of boolean indicating if compressed chunks were found and a vector of futures for * read completion */ - std::pair>> create_and_read_column_chunks( - cudf::host_span const row_groups_info, size_type num_rows); + std::pair>> read_and_decompress_column_chunks(); /** * @brief Load and decompress the input file(s) into memory. */ - void load_and_decompress_data(cudf::host_span const row_groups_info, - size_type num_rows); + void load_and_decompress_data(); /** * @brief Perform some preprocessing for page data and also compute the split locations @@ -161,17 +164,12 @@ class reader::impl { * * For flat schemas, these values are computed during header decoding (see gpuDecodePageHeaders). * - * @param skip_rows Crop all rows below skip_rows - * @param num_rows Maximum number of rows to read * @param uses_custom_row_bounds Whether or not num_rows and skip_rows represents user-specific * bounds * @param chunk_read_limit Limit on total number of bytes to be returned per read, * or `0` if there is no limit */ - void preprocess_pages(size_t skip_rows, - size_t num_rows, - bool uses_custom_row_bounds, - size_t chunk_read_limit); + void preprocess_pages(bool uses_custom_row_bounds, size_t chunk_read_limit); /** * @brief Allocate nesting information storage for all pages and set pointers to it. @@ -278,12 +276,28 @@ class reader::impl { std::optional> _reader_column_schema; data_type _timestamp_type{type_id::EMPTY}; - // Variables used for chunked reading: + // chunked reading happens in 2 parts: + // + // At the top level there is the "pass" in which we try and limit the + // total amount of temporary memory (compressed data, decompressed data) in use + // via _input_pass_read_limit. + // + // Within a pass, we produce one or more chunks of output, whose maximum total + // byte size is controlled by _output_chunk_read_limit. + cudf::io::parquet::gpu::file_intermediate_data _file_itm_data; - cudf::io::parquet::gpu::chunk_intermediate_data _chunk_itm_data; - std::vector _chunk_read_info; - std::size_t _chunk_read_limit{0}; - std::size_t _current_read_chunk{0}; + std::unique_ptr _pass_itm_data; + + // an array of offsets into _file_itm_data::global_chunks. Each pair of offsets represents + // the start/end of the chunks to be loaded for a given pass. + std::vector _input_pass_row_group_offsets{}; + std::vector _input_pass_row_count{}; + std::size_t _current_input_pass{0}; + std::size_t _chunk_count{0}; + + std::size_t _output_chunk_read_limit{0}; + std::size_t _input_pass_read_limit{0}; + bool _pass_preprocessed{false}; bool _file_preprocessed{false}; }; diff --git a/cpp/src/io/parquet/reader_impl_helpers.cpp b/cpp/src/io/parquet/reader_impl_helpers.cpp index f6dbeb275fc..fcaa610fbb7 100644 --- a/cpp/src/io/parquet/reader_impl_helpers.cpp +++ b/cpp/src/io/parquet/reader_impl_helpers.cpp @@ -344,7 +344,7 @@ std::vector aggregate_reader_metadata::get_pandas_index_names() con return names; } -std::tuple> +std::tuple> aggregate_reader_metadata::select_row_groups( host_span const> row_group_indices, int64_t skip_rows_opt, @@ -362,7 +362,7 @@ aggregate_reader_metadata::select_row_groups( host_span const>(filtered_row_group_indices.value()); } } - std::vector selection; + std::vector selection; auto [rows_to_skip, rows_to_read] = [&]() { if (not row_group_indices.empty()) { return std::pair{}; } auto const from_opts = cudf::io::detail::skip_rows_num_rows_from_options( diff --git a/cpp/src/io/parquet/reader_impl_helpers.hpp b/cpp/src/io/parquet/reader_impl_helpers.hpp index 751ffc33123..61e4f94df0f 100644 --- a/cpp/src/io/parquet/reader_impl_helpers.hpp +++ b/cpp/src/io/parquet/reader_impl_helpers.hpp @@ -53,19 +53,6 @@ using namespace cudf::io::parquet; : data_type{t_id}; } -/** - * @brief The row_group_info class - */ -struct row_group_info { - size_type const index; - size_t const start_row; // TODO source index - size_type const source_index; - row_group_info(size_type index, size_t start_row, size_type source_index) - : index(index), start_row(start_row), source_index(source_index) - { - } -}; - /** * @brief Class for parsing dataset metadata */ @@ -194,7 +181,7 @@ class aggregate_reader_metadata { * @return A tuple of corrected row_start, row_count and list of row group indexes and its * starting row */ - [[nodiscard]] std::tuple> select_row_groups( + [[nodiscard]] std::tuple> select_row_groups( host_span const> row_group_indices, int64_t row_start, std::optional const& row_count, diff --git a/cpp/src/io/parquet/reader_impl_preprocess.cu b/cpp/src/io/parquet/reader_impl_preprocess.cu index a2db0de26bb..c731c467f2c 100644 --- a/cpp/src/io/parquet/reader_impl_preprocess.cu +++ b/cpp/src/io/parquet/reader_impl_preprocess.cu @@ -577,10 +577,10 @@ int decode_page_headers(cudf::detail::hostdevice_vector& c void reader::impl::allocate_nesting_info() { - auto const& chunks = _file_itm_data.chunks; - auto& pages = _file_itm_data.pages_info; - auto& page_nesting_info = _file_itm_data.page_nesting_info; - auto& page_nesting_decode_info = _file_itm_data.page_nesting_decode_info; + auto const& chunks = _pass_itm_data->chunks; + auto& pages = _pass_itm_data->pages_info; + auto& page_nesting_info = _pass_itm_data->page_nesting_info; + auto& page_nesting_decode_info = _pass_itm_data->page_nesting_decode_info; // compute total # of page_nesting infos needed and allocate space. doing this in one // buffer to keep it to a single gpu allocation @@ -702,38 +702,39 @@ void reader::impl::allocate_nesting_info() void reader::impl::allocate_level_decode_space() { - auto& pages = _file_itm_data.pages_info; + auto& pages = _pass_itm_data->pages_info; // TODO: this could be made smaller if we ignored dictionary pages and pages with no // repetition data. size_t const per_page_decode_buf_size = - LEVEL_DECODE_BUF_SIZE * 2 * _file_itm_data.level_type_size; + LEVEL_DECODE_BUF_SIZE * 2 * _pass_itm_data->level_type_size; auto const decode_buf_size = per_page_decode_buf_size * pages.size(); - _file_itm_data.level_decode_data = + _pass_itm_data->level_decode_data = rmm::device_buffer(decode_buf_size, _stream, rmm::mr::get_current_device_resource()); // distribute the buffers - uint8_t* buf = static_cast(_file_itm_data.level_decode_data.data()); + uint8_t* buf = static_cast(_pass_itm_data->level_decode_data.data()); for (size_t idx = 0; idx < pages.size(); idx++) { auto& p = pages[idx]; p.lvl_decode_buf[gpu::level_type::DEFINITION] = buf; - buf += (LEVEL_DECODE_BUF_SIZE * _file_itm_data.level_type_size); + buf += (LEVEL_DECODE_BUF_SIZE * _pass_itm_data->level_type_size); p.lvl_decode_buf[gpu::level_type::REPETITION] = buf; - buf += (LEVEL_DECODE_BUF_SIZE * _file_itm_data.level_type_size); + buf += (LEVEL_DECODE_BUF_SIZE * _pass_itm_data->level_type_size); } } -std::pair>> reader::impl::create_and_read_column_chunks( - cudf::host_span const row_groups_info, size_type num_rows) +std::pair>> reader::impl::read_and_decompress_column_chunks() { - auto& raw_page_data = _file_itm_data.raw_page_data; - auto& chunks = _file_itm_data.chunks; + auto const& row_groups_info = _pass_itm_data->row_groups; + auto const num_rows = _pass_itm_data->num_rows; + + auto& raw_page_data = _pass_itm_data->raw_page_data; + auto& chunks = _pass_itm_data->chunks; // Descriptors for all the chunks that make up the selected columns auto const num_input_columns = _input_columns.size(); auto const num_chunks = row_groups_info.size() * num_input_columns; - chunks = cudf::detail::hostdevice_vector(0, num_chunks, _stream); // Association between each column chunk and its source std::vector chunk_source_map(num_chunks); @@ -747,13 +748,68 @@ std::pair>> reader::impl::create_and_read_co // Initialize column chunk information size_t total_decompressed_size = 0; auto remaining_rows = num_rows; - std::vector> read_rowgroup_tasks; + std::vector> read_chunk_tasks; + size_type chunk_count = 0; for (auto const& rg : row_groups_info) { auto const& row_group = _metadata->get_row_group(rg.index, rg.source_index); - auto const row_group_start = rg.start_row; auto const row_group_source = rg.source_index; auto const row_group_rows = std::min(remaining_rows, row_group.num_rows); + // generate ColumnChunkDesc objects for everything to be decoded (all input columns) + for (size_t i = 0; i < num_input_columns; ++i) { + auto const& col = _input_columns[i]; + // look up metadata + auto& col_meta = _metadata->get_column_metadata(rg.index, rg.source_index, col.schema_idx); + + column_chunk_offsets[chunk_count] = + (col_meta.dictionary_page_offset != 0) + ? std::min(col_meta.data_page_offset, col_meta.dictionary_page_offset) + : col_meta.data_page_offset; + + // Map each column chunk to its column index and its source index + chunk_source_map[chunk_count] = row_group_source; + + if (col_meta.codec != Compression::UNCOMPRESSED) { + total_decompressed_size += col_meta.total_uncompressed_size; + } + + chunk_count++; + } + remaining_rows -= row_group_rows; + } + + // Read compressed chunk data to device memory + read_chunk_tasks.push_back(read_column_chunks_async(_sources, + raw_page_data, + chunks, + 0, + chunks.size(), + column_chunk_offsets, + chunk_source_map, + _stream)); + + CUDF_EXPECTS(remaining_rows == 0, "All rows data must be read."); + + return {total_decompressed_size > 0, std::move(read_chunk_tasks)}; +} + +void reader::impl::load_global_chunk_info() +{ + auto const num_rows = _file_itm_data.global_num_rows; + auto const& row_groups_info = _file_itm_data.row_groups; + auto& chunks = _file_itm_data.chunks; + + // Descriptors for all the chunks that make up the selected columns + auto const num_input_columns = _input_columns.size(); + auto const num_chunks = row_groups_info.size() * num_input_columns; + + // Initialize column chunk information + auto remaining_rows = num_rows; + for (auto const& rg : row_groups_info) { + auto const& row_group = _metadata->get_row_group(rg.index, rg.source_index); + auto const row_group_start = rg.start_row; + auto const row_group_rows = std::min(remaining_rows, row_group.num_rows); + // generate ColumnChunkDesc objects for everything to be decoded (all input columns) for (size_t i = 0; i < num_input_columns; ++i) { auto col = _input_columns[i]; @@ -768,11 +824,6 @@ std::pair>> reader::impl::create_and_read_co schema.converted_type, schema.type_length); - column_chunk_offsets[chunks.size()] = - (col_meta.dictionary_page_offset != 0) - ? std::min(col_meta.data_page_offset, col_meta.dictionary_page_offset) - : col_meta.data_page_offset; - chunks.push_back(gpu::ColumnChunkDesc(col_meta.total_compressed_size, nullptr, col_meta.num_values, @@ -792,92 +843,171 @@ std::pair>> reader::impl::create_and_read_co clock_rate, i, col.schema_idx)); - - // Map each column chunk to its column index and its source index - chunk_source_map[chunks.size() - 1] = row_group_source; - - if (col_meta.codec != Compression::UNCOMPRESSED) { - total_decompressed_size += col_meta.total_uncompressed_size; - } } + remaining_rows -= row_group_rows; } +} - // Read compressed chunk data to device memory - read_rowgroup_tasks.push_back(read_column_chunks_async(_sources, - raw_page_data, - chunks, - 0, - chunks.size(), - column_chunk_offsets, - chunk_source_map, - _stream)); +void reader::impl::compute_input_pass_row_group_info() +{ + // at this point, row_groups has already been filtered down to just the row groups we need to + // handle optional skip_rows/num_rows parameters. + auto const& row_groups_info = _file_itm_data.row_groups; + + // if the user hasn't specified an input size limit, read everything in a single pass. + if (_input_pass_read_limit == 0) { + _input_pass_row_group_offsets.push_back(0); + _input_pass_row_group_offsets.push_back(row_groups_info.size()); + return; + } - CUDF_EXPECTS(remaining_rows == 0, "All rows data must be read."); + // generate passes. make sure to account for the case where a single row group doesn't fit within + // + std::size_t const read_limit = + _input_pass_read_limit > 0 ? _input_pass_read_limit : std::numeric_limits::max(); + std::size_t cur_pass_byte_size = 0; + std::size_t cur_rg_start = 0; + std::size_t cur_row_count = 0; + _input_pass_row_group_offsets.push_back(0); + _input_pass_row_count.push_back(0); + + for (size_t cur_rg_index = 0; cur_rg_index < row_groups_info.size(); cur_rg_index++) { + auto const& rgi = row_groups_info[cur_rg_index]; + auto const& row_group = _metadata->get_row_group(rgi.index, rgi.source_index); + + // can we add this row group + if (cur_pass_byte_size + row_group.total_byte_size >= read_limit) { + // A single row group (the current one) is larger than the read limit: + // We always need to include at least one row group, so end the pass at the end of the current + // row group + if (cur_rg_start == cur_rg_index) { + _input_pass_row_group_offsets.push_back(cur_rg_index + 1); + _input_pass_row_count.push_back(cur_row_count + row_group.num_rows); + cur_rg_start = cur_rg_index + 1; + cur_pass_byte_size = 0; + } + // End the pass at the end of the previous row group + else { + _input_pass_row_group_offsets.push_back(cur_rg_index); + _input_pass_row_count.push_back(cur_row_count); + cur_rg_start = cur_rg_index; + cur_pass_byte_size = row_group.total_byte_size; + } + } else { + cur_pass_byte_size += row_group.total_byte_size; + } + cur_row_count += row_group.num_rows; + } + // add the last pass if necessary + if (_input_pass_row_group_offsets.back() != row_groups_info.size()) { + _input_pass_row_group_offsets.push_back(row_groups_info.size()); + _input_pass_row_count.push_back(cur_row_count); + } +} - return {total_decompressed_size > 0, std::move(read_rowgroup_tasks)}; +void reader::impl::setup_pass() +{ + // this will also cause the previous pass information to be deleted + _pass_itm_data = std::make_unique(); + + // setup row groups to be loaded for this pass + auto const row_group_start = _input_pass_row_group_offsets[_current_input_pass]; + auto const row_group_end = _input_pass_row_group_offsets[_current_input_pass + 1]; + auto const num_row_groups = row_group_end - row_group_start; + _pass_itm_data->row_groups.resize(num_row_groups); + std::copy(_file_itm_data.row_groups.begin() + row_group_start, + _file_itm_data.row_groups.begin() + row_group_end, + _pass_itm_data->row_groups.begin()); + + auto const num_passes = _input_pass_row_group_offsets.size() - 1; + CUDF_EXPECTS(_current_input_pass < num_passes, "Encountered an invalid read pass index"); + + auto const chunks_per_rowgroup = _input_columns.size(); + auto const num_chunks = chunks_per_rowgroup * num_row_groups; + + auto chunk_start = _file_itm_data.chunks.begin() + (row_group_start * chunks_per_rowgroup); + auto chunk_end = _file_itm_data.chunks.begin() + (row_group_end * chunks_per_rowgroup); + + _pass_itm_data->chunks = + cudf::detail::hostdevice_vector(num_chunks, _stream); + std::copy(chunk_start, chunk_end, _pass_itm_data->chunks.begin()); + + // adjust skip_rows and num_rows by what's available in the row groups we are processing + if (num_passes == 1) { + _pass_itm_data->skip_rows = _file_itm_data.global_skip_rows; + _pass_itm_data->num_rows = _file_itm_data.global_num_rows; + } else { + auto const global_start_row = _file_itm_data.global_skip_rows; + auto const global_end_row = global_start_row + _file_itm_data.global_num_rows; + auto const start_row = std::max(_input_pass_row_count[_current_input_pass], global_start_row); + auto const end_row = std::min(_input_pass_row_count[_current_input_pass + 1], global_end_row); + + // skip_rows is always global in the sense that it is relative to the first row of + // everything we will be reading, regardless of what pass we are on. + // num_rows is how many rows we are reading this pass. + _pass_itm_data->skip_rows = global_start_row + _input_pass_row_count[_current_input_pass]; + _pass_itm_data->num_rows = end_row - start_row; + } } -void reader::impl::load_and_decompress_data( - cudf::host_span const row_groups_info, size_type num_rows) +void reader::impl::load_and_decompress_data() { // This function should never be called if `num_rows == 0`. - CUDF_EXPECTS(num_rows > 0, "Number of reading rows must not be zero."); + CUDF_EXPECTS(_pass_itm_data->num_rows > 0, "Number of reading rows must not be zero."); - auto& raw_page_data = _file_itm_data.raw_page_data; - auto& decomp_page_data = _file_itm_data.decomp_page_data; - auto& chunks = _file_itm_data.chunks; - auto& pages = _file_itm_data.pages_info; + auto& raw_page_data = _pass_itm_data->raw_page_data; + auto& decomp_page_data = _pass_itm_data->decomp_page_data; + auto& chunks = _pass_itm_data->chunks; + auto& pages = _pass_itm_data->pages_info; - auto const [has_compressed_data, read_rowgroup_tasks] = - create_and_read_column_chunks(row_groups_info, num_rows); + auto const [has_compressed_data, read_chunks_tasks] = read_and_decompress_column_chunks(); - for (auto& task : read_rowgroup_tasks) { + for (auto& task : read_chunks_tasks) { task.wait(); } // Process dataset chunk pages into output columns auto const total_pages = count_page_headers(chunks, _stream); + if (total_pages <= 0) { return; } pages = cudf::detail::hostdevice_vector(total_pages, total_pages, _stream); - if (total_pages > 0) { - // decoding of column/page information - _file_itm_data.level_type_size = decode_page_headers(chunks, pages, _stream); - if (has_compressed_data) { - decomp_page_data = decompress_page_data(chunks, pages, _stream); - // Free compressed data - for (size_t c = 0; c < chunks.size(); c++) { - if (chunks[c].codec != parquet::Compression::UNCOMPRESSED) { raw_page_data[c].reset(); } - } + // decoding of column/page information + _pass_itm_data->level_type_size = decode_page_headers(chunks, pages, _stream); + if (has_compressed_data) { + decomp_page_data = decompress_page_data(chunks, pages, _stream); + // Free compressed data + for (size_t c = 0; c < chunks.size(); c++) { + if (chunks[c].codec != parquet::Compression::UNCOMPRESSED) { raw_page_data[c].reset(); } } + } - // build output column info - // walk the schema, building out_buffers that mirror what our final cudf columns will look - // like. important : there is not necessarily a 1:1 mapping between input columns and output - // columns. For example, parquet does not explicitly store a ColumnChunkDesc for struct - // columns. The "structiness" is simply implied by the schema. For example, this schema: - // required group field_id=1 name { - // required binary field_id=2 firstname (String); - // required binary field_id=3 middlename (String); - // required binary field_id=4 lastname (String); - // } - // will only contain 3 columns of data (firstname, middlename, lastname). But of course - // "name" is a struct column that we want to return, so we have to make sure that we - // create it ourselves. - // std::vector output_info = build_output_column_info(); - - // the following two allocate functions modify the page data - pages.device_to_host_sync(_stream); - { - // nesting information (sizes, etc) stored -per page- - // note : even for flat schemas, we allocate 1 level of "nesting" info - allocate_nesting_info(); + // build output column info + // walk the schema, building out_buffers that mirror what our final cudf columns will look + // like. important : there is not necessarily a 1:1 mapping between input columns and output + // columns. For example, parquet does not explicitly store a ColumnChunkDesc for struct + // columns. The "structiness" is simply implied by the schema. For example, this schema: + // required group field_id=1 name { + // required binary field_id=2 firstname (String); + // required binary field_id=3 middlename (String); + // required binary field_id=4 lastname (String); + // } + // will only contain 3 columns of data (firstname, middlename, lastname). But of course + // "name" is a struct column that we want to return, so we have to make sure that we + // create it ourselves. + // std::vector output_info = build_output_column_info(); + + // the following two allocate functions modify the page data + pages.device_to_host_sync(_stream); + { + // nesting information (sizes, etc) stored -per page- + // note : even for flat schemas, we allocate 1 level of "nesting" info + allocate_nesting_info(); - // level decode space - allocate_level_decode_space(); - } - pages.host_to_device_async(_stream); + // level decode space + allocate_level_decode_space(); } + pages.host_to_device_async(_stream); } namespace { @@ -1183,7 +1313,7 @@ std::vector find_splits(std::vector c */ std::vector compute_splits( cudf::detail::hostdevice_vector& pages, - gpu::chunk_intermediate_data const& id, + gpu::pass_intermediate_data const& id, size_t num_rows, size_t chunk_read_limit, rmm::cuda_stream_view stream) @@ -1539,13 +1669,12 @@ struct page_offset_output_iter { } // anonymous namespace -void reader::impl::preprocess_pages(size_t skip_rows, - size_t num_rows, - bool uses_custom_row_bounds, - size_t chunk_read_limit) +void reader::impl::preprocess_pages(bool uses_custom_row_bounds, size_t chunk_read_limit) { - auto& chunks = _file_itm_data.chunks; - auto& pages = _file_itm_data.pages_info; + auto const skip_rows = _pass_itm_data->skip_rows; + auto const num_rows = _pass_itm_data->num_rows; + auto& chunks = _pass_itm_data->chunks; + auto& pages = _pass_itm_data->pages_info; // compute page ordering. // @@ -1636,7 +1765,7 @@ void reader::impl::preprocess_pages(size_t skip_rows, // Build index for string dictionaries since they can't be indexed // directly due to variable-sized elements - _chunk_itm_data.str_dict_index = + _pass_itm_data->str_dict_index = cudf::detail::make_zeroed_device_uvector_async( total_str_dict_indexes, _stream, rmm::mr::get_current_device_resource()); @@ -1646,7 +1775,7 @@ void reader::impl::preprocess_pages(size_t skip_rows, CUDF_EXPECTS(input_col.schema_idx == chunks[c].src_col_schema, "Column/page schema index mismatch"); if (is_dict_chunk(chunks[c])) { - chunks[c].str_dict_index = _chunk_itm_data.str_dict_index.data() + str_ofs; + chunks[c].str_dict_index = _pass_itm_data->str_dict_index.data() + str_ofs; str_ofs += pages[page_count].num_input_values; } @@ -1677,7 +1806,7 @@ void reader::impl::preprocess_pages(size_t skip_rows, std::numeric_limits::max(), true, // compute num_rows chunk_read_limit > 0, // compute string sizes - _file_itm_data.level_type_size, + _pass_itm_data->level_type_size, _stream); // computes: @@ -1699,20 +1828,21 @@ void reader::impl::preprocess_pages(size_t skip_rows, } // preserve page ordering data for string decoder - _chunk_itm_data.page_keys = std::move(page_keys); - _chunk_itm_data.page_index = std::move(page_index); + _pass_itm_data->page_keys = std::move(page_keys); + _pass_itm_data->page_index = std::move(page_index); // compute splits if necessary. otherwise return a single split representing // the whole file. - _chunk_read_info = chunk_read_limit > 0 - ? compute_splits(pages, _chunk_itm_data, num_rows, chunk_read_limit, _stream) - : std::vector{{skip_rows, num_rows}}; + _pass_itm_data->output_chunk_read_info = + _output_chunk_read_limit > 0 + ? compute_splits(pages, *_pass_itm_data, num_rows, chunk_read_limit, _stream) + : std::vector{{skip_rows, num_rows}}; } void reader::impl::allocate_columns(size_t skip_rows, size_t num_rows, bool uses_custom_row_bounds) { - auto const& chunks = _file_itm_data.chunks; - auto& pages = _file_itm_data.pages_info; + auto const& chunks = _pass_itm_data->chunks; + auto& pages = _pass_itm_data->pages_info; // Should not reach here if there is no page data. CUDF_EXPECTS(pages.size() > 0, "There is no page to parse"); @@ -1729,7 +1859,7 @@ void reader::impl::allocate_columns(size_t skip_rows, size_t num_rows, bool uses num_rows, false, // num_rows is already computed false, // no need to compute string sizes - _file_itm_data.level_type_size, + _pass_itm_data->level_type_size, _stream); // print_pages(pages, _stream); @@ -1766,7 +1896,7 @@ void reader::impl::allocate_columns(size_t skip_rows, size_t num_rows, bool uses // compute output column sizes by examining the pages of the -input- columns if (has_lists) { - auto& page_index = _chunk_itm_data.page_index; + auto& page_index = _pass_itm_data->page_index; std::vector h_cols_info; h_cols_info.reserve(_input_columns.size()); @@ -1846,10 +1976,10 @@ void reader::impl::allocate_columns(size_t skip_rows, size_t num_rows, bool uses std::vector reader::impl::calculate_page_string_offsets() { - auto& chunks = _file_itm_data.chunks; - auto& pages = _file_itm_data.pages_info; - auto const& page_keys = _chunk_itm_data.page_keys; - auto const& page_index = _chunk_itm_data.page_index; + auto& chunks = _pass_itm_data->chunks; + auto& pages = _pass_itm_data->pages_info; + auto const& page_keys = _pass_itm_data->page_keys; + auto const& page_index = _pass_itm_data->page_index; std::vector col_sizes(_input_columns.size(), 0L); rmm::device_uvector d_col_sizes(col_sizes.size(), _stream); diff --git a/cpp/tests/io/parquet_chunked_reader_test.cpp b/cpp/tests/io/parquet_chunked_reader_test.cpp index 9815304b965..05fb9a3ec48 100644 --- a/cpp/tests/io/parquet_chunked_reader_test.cpp +++ b/cpp/tests/io/parquet_chunked_reader_test.cpp @@ -100,11 +100,13 @@ auto write_file(std::vector>& input_columns, return std::pair{std::move(input_table), std::move(filepath)}; } -auto chunked_read(std::string const& filepath, std::size_t byte_limit) +auto chunked_read(std::string const& filepath, + std::size_t output_limit, + std::size_t input_limit = 0) { auto const read_opts = cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath}).build(); - auto reader = cudf::io::chunked_parquet_reader(byte_limit, read_opts); + auto reader = cudf::io::chunked_parquet_reader(output_limit, input_limit, read_opts); auto num_chunks = 0; auto out_tables = std::vector>{}; @@ -950,3 +952,65 @@ TEST_F(ParquetChunkedReaderTest, TestChunkedReadNullCount) EXPECT_EQ(reader.read_chunk().tbl->get_column(0).null_count(), page_limit_rows / 4); } while (reader.has_next()); } + +TEST_F(ParquetChunkedReaderTest, InputLimitSimple) +{ + auto const filepath = temp_env->get_temp_filepath("input_limit_10_rowgroups.parquet"); + + // This results in 10 grow groups, at 4001150 bytes per row group + constexpr int num_rows = 25'000'000; + auto value_iter = cudf::detail::make_counting_transform_iterator(0, [](int i) { return i; }); + cudf::test::fixed_width_column_wrapper expected(value_iter, value_iter + num_rows); + cudf::io::parquet_writer_options opts = + cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, + cudf::table_view{{expected}}) + // note: it is unnecessary to force compression to NONE here because the size we are using in + // the row group is the uncompressed data size. But forcing the dictionary policy to + // dictionary_policy::NEVER is necessary to prevent changes in the + // decompressed-but-not-yet-decoded data. + .dictionary_policy(cudf::io::dictionary_policy::NEVER); + + cudf::io::write_parquet(opts); + + { + // no chunking + auto const [result, num_chunks] = chunked_read(filepath, 0, 0); + EXPECT_EQ(num_chunks, 1); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->get_column(0)); + } + + { + // 25 chunks of 100k rows each + auto const [result, num_chunks] = chunked_read(filepath, 0, 1); + EXPECT_EQ(num_chunks, 25); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->get_column(0)); + } + + { + // 25 chunks of 100k rows each + auto const [result, num_chunks] = chunked_read(filepath, 0, 4000000); + EXPECT_EQ(num_chunks, 25); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->get_column(0)); + } + + { + // 25 chunks of 100k rows each + auto const [result, num_chunks] = chunked_read(filepath, 0, 4100000); + EXPECT_EQ(num_chunks, 25); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->get_column(0)); + } + + { + // 12 chunks of 200k rows each, plus 1 final chunk of 100k rows. + auto const [result, num_chunks] = chunked_read(filepath, 0, 8002301); + EXPECT_EQ(num_chunks, 13); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->get_column(0)); + } + + { + // 1 big chunk + auto const [result, num_chunks] = chunked_read(filepath, 0, size_t{1} * 1024 * 1024 * 1024); + EXPECT_EQ(num_chunks, 1); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->get_column(0)); + } +} From 23d24d43fac8615166c38231f13fe8751a8aec42 Mon Sep 17 00:00:00 2001 From: Martin Marenz Date: Thu, 28 Sep 2023 19:08:55 +0200 Subject: [PATCH 122/150] Add `bytes_per_second` to distinct_count of stream_compaction nvbench. (#14172) This patch relates to #13735. Benchmark: [benchmark_distinct_count.txt](https://github.com/rapidsai/cudf/files/12700496/benchmark_distinct_count.txt) Authors: - Martin Marenz (https://github.com/Blonck) - Mark Harris (https://github.com/harrism) Approvers: - David Wendt (https://github.com/davidwendt) - Karthikeyan (https://github.com/karthikeyann) - Mark Harris (https://github.com/harrism) URL: https://github.com/rapidsai/cudf/pull/14172 --- cpp/benchmarks/stream_compaction/distinct_count.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/cpp/benchmarks/stream_compaction/distinct_count.cpp b/cpp/benchmarks/stream_compaction/distinct_count.cpp index 2b2c901b90f..3e324013d4e 100644 --- a/cpp/benchmarks/stream_compaction/distinct_count.cpp +++ b/cpp/benchmarks/stream_compaction/distinct_count.cpp @@ -40,6 +40,14 @@ static void bench_distinct_count(nvbench::state& state, nvbench::type_list auto const& data_column = data_table->get_column(0); auto const input_table = cudf::table_view{{data_column, data_column, data_column}}; + // Collect memory statistics for input and output. + state.add_global_memory_reads(input_table.num_rows() * input_table.num_columns()); + state.add_global_memory_writes(1); + if (null_probability > 0) { + state.add_global_memory_reads( + input_table.num_columns() * cudf::bitmask_allocation_size_bytes(input_table.num_rows())); + } + auto mem_stats_logger = cudf::memory_stats_logger(); // init stats logger state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value())); state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) { From b2f00809f40e2e81b01214177b412456d40404cc Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Thu, 28 Sep 2023 12:16:29 -0500 Subject: [PATCH 123/150] Pin dask and distributed for 23.10 release (#14225) This PR pins `dask` and `distributed` to `2023.9.2` for `23.10` release. Authors: - GALI PREM SAGAR (https://github.com/galipremsagar) Approvers: - Ray Douglass (https://github.com/raydouglass) - Peter Andreas Entschev (https://github.com/pentschev) --- ci/test_wheel_dask_cudf.sh | 2 +- conda/environments/all_cuda-118_arch-x86_64.yaml | 6 +++--- conda/environments/all_cuda-120_arch-x86_64.yaml | 6 +++--- conda/recipes/custreamz/meta.yaml | 6 +++--- conda/recipes/dask-cudf/meta.yaml | 12 ++++++------ conda/recipes/dask-cudf/run_test.sh | 4 ++-- dependencies.yaml | 6 +++--- python/dask_cudf/pyproject.toml | 4 ++-- 8 files changed, 23 insertions(+), 23 deletions(-) diff --git a/ci/test_wheel_dask_cudf.sh b/ci/test_wheel_dask_cudf.sh index d6e7f4bf65e..0abee09ca8a 100755 --- a/ci/test_wheel_dask_cudf.sh +++ b/ci/test_wheel_dask_cudf.sh @@ -11,7 +11,7 @@ RAPIDS_PY_WHEEL_NAME="cudf_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from python -m pip install --no-deps ./local-cudf-dep/cudf*.whl # Always install latest dask for testing -python -m pip install git+https://github.com/dask/dask.git@main git+https://github.com/dask/distributed.git@main git+https://github.com/rapidsai/dask-cuda.git@branch-23.10 +python -m pip install git+https://github.com/dask/dask.git@2023.9.2 git+https://github.com/dask/distributed.git@2023.9.2 git+https://github.com/rapidsai/dask-cuda.git@branch-23.10 # echo to expand wildcard before adding `[extra]` requires for pip python -m pip install $(echo ./dist/dask_cudf*.whl)[test] diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index 9fb991f9075..46b0b3799f2 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -25,10 +25,10 @@ dependencies: - cupy>=12.0.0 - cxx-compiler - cython>=3.0.0 -- dask-core>=2023.7.1 +- dask-core==2023.9.2 - dask-cuda==23.10.* -- dask>=2023.7.1 -- distributed>=2023.7.1 +- dask==2023.9.2 +- distributed==2023.9.2 - dlpack>=0.5,<0.6.0a0 - doxygen=1.9.1 - fastavro>=0.22.9 diff --git a/conda/environments/all_cuda-120_arch-x86_64.yaml b/conda/environments/all_cuda-120_arch-x86_64.yaml index 9ba0dd8dc38..0e137c91120 100644 --- a/conda/environments/all_cuda-120_arch-x86_64.yaml +++ b/conda/environments/all_cuda-120_arch-x86_64.yaml @@ -26,10 +26,10 @@ dependencies: - cupy>=12.0.0 - cxx-compiler - cython>=3.0.0 -- dask-core>=2023.7.1 +- dask-core==2023.9.2 - dask-cuda==23.10.* -- dask>=2023.7.1 -- distributed>=2023.7.1 +- dask==2023.9.2 +- distributed==2023.9.2 - dlpack>=0.5,<0.6.0a0 - doxygen=1.9.1 - fastavro>=0.22.9 diff --git a/conda/recipes/custreamz/meta.yaml b/conda/recipes/custreamz/meta.yaml index 7aaa40bffd0..233d51baf31 100644 --- a/conda/recipes/custreamz/meta.yaml +++ b/conda/recipes/custreamz/meta.yaml @@ -45,9 +45,9 @@ requirements: - streamz - cudf ={{ version }} - cudf_kafka ={{ version }} - - dask >=2023.7.1 - - dask-core >=2023.7.1 - - distributed >=2023.7.1 + - dask ==2023.9.2 + - dask-core ==2023.9.2 + - distributed ==2023.9.2 - python-confluent-kafka >=1.9.0,<1.10.0a0 - {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }} diff --git a/conda/recipes/dask-cudf/meta.yaml b/conda/recipes/dask-cudf/meta.yaml index 12809ba648f..4c8af071074 100644 --- a/conda/recipes/dask-cudf/meta.yaml +++ b/conda/recipes/dask-cudf/meta.yaml @@ -38,16 +38,16 @@ requirements: host: - python - cudf ={{ version }} - - dask >=2023.7.1 - - dask-core >=2023.7.1 - - distributed >=2023.7.1 + - dask ==2023.9.2 + - dask-core ==2023.9.2 + - distributed ==2023.9.2 - cuda-version ={{ cuda_version }} run: - python - cudf ={{ version }} - - dask >=2023.7.1 - - dask-core >=2023.7.1 - - distributed >=2023.7.1 + - dask ==2023.9.2 + - dask-core ==2023.9.2 + - distributed ==2023.9.2 - {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }} test: diff --git a/conda/recipes/dask-cudf/run_test.sh b/conda/recipes/dask-cudf/run_test.sh index 7dc54747a0c..c79c014a89a 100644 --- a/conda/recipes/dask-cudf/run_test.sh +++ b/conda/recipes/dask-cudf/run_test.sh @@ -18,10 +18,10 @@ if [ "${ARCH}" = "aarch64" ]; then fi # Dask & Distributed option to install main(nightly) or `conda-forge` packages. -export INSTALL_DASK_MAIN=1 +export INSTALL_DASK_MAIN=0 # Dask version to install when `INSTALL_DASK_MAIN=0` -export DASK_STABLE_VERSION="2023.7.1" +export DASK_STABLE_VERSION="2023.9.2" # Install the conda-forge or nightly version of dask and distributed if [[ "${INSTALL_DASK_MAIN}" == 1 ]]; then diff --git a/dependencies.yaml b/dependencies.yaml index 5586f54348c..b21472df4fd 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -491,12 +491,12 @@ dependencies: common: - output_types: [conda, requirements, pyproject] packages: - - dask>=2023.7.1 - - distributed>=2023.7.1 + - dask==2023.9.2 + - distributed==2023.9.2 - output_types: conda packages: - cupy>=12.0.0 - - dask-core>=2023.7.1 # dask-core in conda is the actual package & dask is the meta package + - dask-core==2023.9.2 # dask-core in conda is the actual package & dask is the meta package - output_types: pyproject packages: - &cudf cudf==23.10.* diff --git a/python/dask_cudf/pyproject.toml b/python/dask_cudf/pyproject.toml index 922da366422..41b57b71749 100644 --- a/python/dask_cudf/pyproject.toml +++ b/python/dask_cudf/pyproject.toml @@ -20,8 +20,8 @@ requires-python = ">=3.9" dependencies = [ "cudf==23.10.*", "cupy-cuda11x>=12.0.0", - "dask>=2023.7.1", - "distributed>=2023.7.1", + "dask==2023.9.2", + "distributed==2023.9.2", "fsspec>=0.6.0", "numpy>=1.21,<1.25", "pandas>=1.3,<1.6.0dev0", From 59b09fd097e39bd15646eac1156889692974dc5f Mon Sep 17 00:00:00 2001 From: Bradley Dice Date: Fri, 29 Sep 2023 11:10:25 -0500 Subject: [PATCH 124/150] cuDF: Build CUDA 12.0 ARM conda packages. (#14112) This PR builds conda packages using CUDA 12 on ARM. This work is targeting 23.12 and depends on https://github.com/rapidsai/rmm/pull/1330. Closes #14128. Authors: - Bradley Dice (https://github.com/bdice) Approvers: - Ray Douglass (https://github.com/raydouglass) URL: https://github.com/rapidsai/cudf/pull/14112 --- .github/workflows/build.yaml | 16 ++++++++-------- .github/workflows/pr.yaml | 28 ++++++++++++++-------------- .github/workflows/test.yaml | 16 ++++++++-------- dependencies.yaml | 20 ++------------------ 4 files changed, 32 insertions(+), 48 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index ab028eb89cc..dc2c81d1c77 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -28,7 +28,7 @@ concurrency: jobs: cpp-build: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-build.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-build.yaml@cuda-120-arm with: build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} @@ -37,7 +37,7 @@ jobs: python-build: needs: [cpp-build] secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-build.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-build.yaml@cuda-120-arm with: build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} @@ -46,7 +46,7 @@ jobs: upload-conda: needs: [cpp-build, python-build] secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-upload-packages.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-upload-packages.yaml@cuda-120-arm with: build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} @@ -57,7 +57,7 @@ jobs: if: github.ref_type == 'branch' needs: python-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@cuda-120-arm with: arch: "amd64" branch: ${{ inputs.branch }} @@ -69,7 +69,7 @@ jobs: sha: ${{ inputs.sha }} wheel-build-cudf: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-build.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-build.yaml@cuda-120-arm with: build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} @@ -79,7 +79,7 @@ jobs: wheel-publish-cudf: needs: wheel-build-cudf secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-publish.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-publish.yaml@cuda-120-arm with: build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} @@ -89,7 +89,7 @@ jobs: wheel-build-dask-cudf: needs: wheel-publish-cudf secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-build.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-build.yaml@cuda-120-arm with: matrix_filter: map(select(.ARCH == "amd64" and .PY_VER == "3.10" and (.CUDA_VER == "11.8.0" or .CUDA_VER == "12.0.1"))) build_type: ${{ inputs.build_type || 'branch' }} @@ -100,7 +100,7 @@ jobs: wheel-publish-dask-cudf: needs: wheel-build-dask-cudf secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-publish.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-publish.yaml@cuda-120-arm with: build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index 214f9c90b41..047b80f2e5c 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -26,34 +26,34 @@ jobs: - wheel-build-dask-cudf - wheel-tests-dask-cudf secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/pr-builder.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/pr-builder.yaml@cuda-120-arm checks: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/checks.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/checks.yaml@cuda-120-arm with: enable_check_generated_files: false conda-cpp-build: needs: checks secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-build.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-build.yaml@cuda-120-arm with: build_type: pull-request conda-cpp-tests: needs: conda-cpp-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-tests.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-tests.yaml@cuda-120-arm with: build_type: pull-request conda-python-build: needs: conda-cpp-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-build.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-build.yaml@cuda-120-arm with: build_type: pull-request conda-python-cudf-tests: needs: conda-python-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@cuda-120-arm with: build_type: pull-request test_script: "ci/test_python_cudf.sh" @@ -61,14 +61,14 @@ jobs: # Tests for dask_cudf, custreamz, cudf_kafka are separated for CI parallelism needs: conda-python-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@cuda-120-arm with: build_type: pull-request test_script: "ci/test_python_other.sh" conda-java-tests: needs: conda-cpp-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@cuda-120-arm with: build_type: pull-request node_type: "gpu-v100-latest-1" @@ -78,7 +78,7 @@ jobs: conda-notebook-tests: needs: conda-python-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@cuda-120-arm with: build_type: pull-request node_type: "gpu-v100-latest-1" @@ -88,7 +88,7 @@ jobs: docs-build: needs: conda-python-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@cuda-120-arm with: build_type: pull-request node_type: "gpu-v100-latest-1" @@ -98,21 +98,21 @@ jobs: wheel-build-cudf: needs: checks secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-build.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-build.yaml@cuda-120-arm with: build_type: pull-request script: "ci/build_wheel_cudf.sh" wheel-tests-cudf: needs: wheel-build-cudf secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-test.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-test.yaml@cuda-120-arm with: build_type: pull-request script: ci/test_wheel_cudf.sh wheel-build-dask-cudf: needs: wheel-tests-cudf secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-build.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-build.yaml@cuda-120-arm with: matrix_filter: map(select(.ARCH == "amd64" and .PY_VER == "3.10" and (.CUDA_VER == "11.8.0" or .CUDA_VER == "12.0.1"))) build_type: pull-request @@ -120,7 +120,7 @@ jobs: wheel-tests-dask-cudf: needs: wheel-build-dask-cudf secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-test.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-test.yaml@cuda-120-arm with: matrix_filter: map(select(.ARCH == "amd64" and .PY_VER == "3.10" and (.CUDA_VER == "11.8.0" or .CUDA_VER == "12.0.1"))) build_type: pull-request diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 9ca32bcfe03..e58227c30dc 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -16,7 +16,7 @@ on: jobs: conda-cpp-tests: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-tests.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-tests.yaml@cuda-120-arm with: build_type: nightly branch: ${{ inputs.branch }} @@ -24,7 +24,7 @@ jobs: sha: ${{ inputs.sha }} conda-cpp-memcheck-tests: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@cuda-120-arm with: build_type: nightly branch: ${{ inputs.branch }} @@ -36,7 +36,7 @@ jobs: run_script: "ci/test_cpp_memcheck.sh" conda-python-cudf-tests: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@cuda-120-arm with: build_type: nightly branch: ${{ inputs.branch }} @@ -46,7 +46,7 @@ jobs: conda-python-other-tests: # Tests for dask_cudf, custreamz, cudf_kafka are separated for CI parallelism secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@cuda-120-arm with: build_type: nightly branch: ${{ inputs.branch }} @@ -55,7 +55,7 @@ jobs: test_script: "ci/test_python_other.sh" conda-java-tests: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@cuda-120-arm with: build_type: nightly branch: ${{ inputs.branch }} @@ -67,7 +67,7 @@ jobs: run_script: "ci/test_java.sh" conda-notebook-tests: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@cuda-120-arm with: build_type: nightly branch: ${{ inputs.branch }} @@ -79,7 +79,7 @@ jobs: run_script: "ci/test_notebooks.sh" wheel-tests-cudf: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-test.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-test.yaml@cuda-120-arm with: build_type: nightly branch: ${{ inputs.branch }} @@ -88,7 +88,7 @@ jobs: script: ci/test_wheel_cudf.sh wheel-tests-dask-cudf: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-test.yaml@branch-23.12 + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-test.yaml@cuda-120-arm with: matrix_filter: map(select(.ARCH == "amd64" and .PY_VER == "3.10" and (.CUDA_VER == "11.8.0" or .CUDA_VER == "12.0.1"))) build_type: nightly diff --git a/dependencies.yaml b/dependencies.yaml index c8ee66bd99f..c19e8765be3 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -227,25 +227,9 @@ dependencies: # in sync with the version pinned in get_arrow.cmake. - libarrow==12.0.1.* - librdkafka>=1.9.0,<1.10.0a0 + # Align nvcomp version with rapids-cmake + - nvcomp==2.6.1 - spdlog>=1.11.0,<1.12 - specific: - - output_types: conda - matrices: - - matrix: - arch: x86_64 - packages: - # Align nvcomp version with rapids-cmake - # TODO: not yet available for aarch64 CUDA 12 - - &nvcomp nvcomp==2.6.1 - - matrix: - arch: aarch64 - cuda: "11.8" - packages: - - *nvcomp - # TODO: Fallback matrix for aarch64 CUDA 12. After migrating to nvcomp 3, - # all CUDA/arch combinations should be supported by existing packages. - - matrix: - packages: build_wheels: common: - output_types: pyproject From 66a655ce80e8b0accb80ea4e23799d23a82a35a2 Mon Sep 17 00:00:00 2001 From: Bradley Dice Date: Tue, 3 Oct 2023 08:00:44 -0500 Subject: [PATCH 125/150] Fix inaccuracy in decimal128 rounding. (#14233) Fixes a bug where floating-point values were used in decimal128 rounding, giving wrong results. Closes https://github.com/rapidsai/cudf/issues/14210. Authors: - Bradley Dice (https://github.com/bdice) Approvers: - Divye Gala (https://github.com/divyegala) - Mark Harris (https://github.com/harrism) --- cpp/src/round/round.cu | 5 ++- cpp/tests/round/round_tests.cpp | 79 +++++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+), 1 deletion(-) diff --git a/cpp/src/round/round.cu b/cpp/src/round/round.cu index 4b3f80fc6e2..41cce57d55b 100644 --- a/cpp/src/round/round.cu +++ b/cpp/src/round/round.cu @@ -271,7 +271,10 @@ std::unique_ptr round_with(column_view const& input, out_view.template end(), static_cast(0)); } else { - Type const n = std::pow(10, scale_movement); + Type n = 10; + for (int i = 1; i < scale_movement; ++i) { + n *= 10; + } thrust::transform(rmm::exec_policy(stream), input.begin(), input.end(), diff --git a/cpp/tests/round/round_tests.cpp b/cpp/tests/round/round_tests.cpp index d802c0c2706..f97bb7a5323 100644 --- a/cpp/tests/round/round_tests.cpp +++ b/cpp/tests/round/round_tests.cpp @@ -703,4 +703,83 @@ TEST_F(RoundTests, BoolTestHalfUp) EXPECT_THROW(cudf::round(input, -2, cudf::rounding_method::HALF_UP), cudf::logic_error); } +// Use __uint128_t for demonstration. +constexpr __uint128_t operator""_uint128_t(const char* s) +{ + __uint128_t ret = 0; + for (int i = 0; s[i] != '\0'; ++i) { + ret *= 10; + if ('0' <= s[i] && s[i] <= '9') { ret += s[i] - '0'; } + } + return ret; +} + +TEST_F(RoundTests, HalfEvenErrorsA) +{ + using namespace numeric; + using RepType = cudf::device_storage_type_t; + using fp_wrapper = cudf::test::fixed_point_column_wrapper; + + { + // 0.5 at scale -37 should round HALF_EVEN to 0, because 0 is an even number + auto const input = + fp_wrapper{{5000000000000000000000000000000000000_uint128_t}, scale_type{-37}}; + auto const expected = fp_wrapper{{0}, scale_type{0}}; + auto const result = cudf::round(input, 0, cudf::rounding_method::HALF_EVEN); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->view()); + } +} + +TEST_F(RoundTests, HalfEvenErrorsB) +{ + using namespace numeric; + using RepType = cudf::device_storage_type_t; + using fp_wrapper = cudf::test::fixed_point_column_wrapper; + + { + // 0.125 at scale -37 should round HALF_EVEN to 0.12, because 2 is an even number + auto const input = + fp_wrapper{{1250000000000000000000000000000000000_uint128_t}, scale_type{-37}}; + auto const expected = fp_wrapper{{12}, scale_type{-2}}; + auto const result = cudf::round(input, 2, cudf::rounding_method::HALF_EVEN); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->view()); + } +} + +TEST_F(RoundTests, HalfEvenErrorsC) +{ + using namespace numeric; + using RepType = cudf::device_storage_type_t; + using fp_wrapper = cudf::test::fixed_point_column_wrapper; + + { + // 0.0625 at scale -37 should round HALF_EVEN to 0.062, because 2 is an even number + auto const input = + fp_wrapper{{0625000000000000000000000000000000000_uint128_t}, scale_type{-37}}; + auto const expected = fp_wrapper{{62}, scale_type{-3}}; + auto const result = cudf::round(input, 3, cudf::rounding_method::HALF_EVEN); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->view()); + } +} + +TEST_F(RoundTests, HalfUpErrorsA) +{ + using namespace numeric; + using RepType = cudf::device_storage_type_t; + using fp_wrapper = cudf::test::fixed_point_column_wrapper; + + { + // 0.25 at scale -37 should round HALF_UP to 0.3 + auto const input = + fp_wrapper{{2500000000000000000000000000000000000_uint128_t}, scale_type{-37}}; + auto const expected = fp_wrapper{{3}, scale_type{-1}}; + auto const result = cudf::round(input, 1, cudf::rounding_method::HALF_UP); + + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->view()); + } +} + CUDF_TEST_PROGRAM_MAIN() From 3964950ba2fecf7f962917276058a6381d396246 Mon Sep 17 00:00:00 2001 From: Bradley Dice Date: Tue, 3 Oct 2023 15:11:15 -0500 Subject: [PATCH 126/150] Fix inaccurate ceil/floor and inaccurate rescaling casts of fixed-point values. (#14242) This is a follow-up PR to #14233. This PR fixes a bug where floating-point values were used as intermediates in ceil/floor unary operations and cast operations that require rescaling for fixed-point types, giving inaccurate results. See also: - https://github.com/rapidsai/cudf/pull/14233#discussion_r1340786769 - https://github.com/rapidsai/cudf/issues/14243 Authors: - Bradley Dice (https://github.com/bdice) Approvers: - Mike Wilson (https://github.com/hyperbolic2346) - Vukasin Milovanovic (https://github.com/vuule) --- cpp/src/unary/cast_ops.cu | 8 +++++- cpp/src/unary/math_ops.cu | 8 ++++-- cpp/tests/unary/cast_tests.cpp | 40 ++++++++++++++++++++++++++++++ cpp/tests/unary/unary_ops_test.cpp | 33 ++++++++++++++++++++++++ 4 files changed, 86 insertions(+), 3 deletions(-) diff --git a/cpp/src/unary/cast_ops.cu b/cpp/src/unary/cast_ops.cu index f40ace8d10b..1c81f266200 100644 --- a/cpp/src/unary/cast_ops.cu +++ b/cpp/src/unary/cast_ops.cu @@ -199,7 +199,13 @@ std::unique_ptr rescale(column_view input, } return output_column; } - auto const scalar = make_fixed_point_scalar(std::pow(10, -diff), scale_type{diff}, stream); + + RepType scalar_value = 10; + for (int i = 1; i < -diff; ++i) { + scalar_value *= 10; + } + + auto const scalar = make_fixed_point_scalar(scalar_value, scale_type{diff}, stream); return detail::binary_operation(input, *scalar, binary_operator::DIV, type, stream, mr); } }; diff --git a/cpp/src/unary/math_ops.cu b/cpp/src/unary/math_ops.cu index 961f3a9e720..d0cae81a9c8 100644 --- a/cpp/src/unary/math_ops.cu +++ b/cpp/src/unary/math_ops.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -295,7 +295,11 @@ std::unique_ptr unary_op_with(column_view const& input, input.type(), input.size(), copy_bitmask(input, stream, mr), input.null_count(), stream, mr); auto out_view = result->mutable_view(); - Type const n = std::pow(10, -input.type().scale()); + + Type n = 10; + for (int i = 1; i < -input.type().scale(); ++i) { + n *= 10; + } thrust::transform(rmm::exec_policy(stream), input.begin(), diff --git a/cpp/tests/unary/cast_tests.cpp b/cpp/tests/unary/cast_tests.cpp index 9506e1918c0..d565359a4ea 100644 --- a/cpp/tests/unary/cast_tests.cpp +++ b/cpp/tests/unary/cast_tests.cpp @@ -30,6 +30,8 @@ #include #include +#include + #include #include @@ -967,6 +969,44 @@ TYPED_TEST(FixedPointTests, Decimal128ToDecimalXXWithLargerScale) CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->view()); } +TYPED_TEST(FixedPointTests, ValidateCastRescalePrecision) +{ + using namespace numeric; + using decimalXX = TypeParam; + using RepType = cudf::device_storage_type_t; + using fp_wrapper = cudf::test::fixed_point_column_wrapper; + + // This test is designed to protect against floating point conversion + // introducing errors in fixed-point arithmetic. The rescaling that occurs + // during casting to different scales should only use fixed-precision math. + // Realistically, we are only able to show precision failures due to floating + // conversion in a few very specific circumstances where dividing by specific + // powers of 10 works against us. Some examples: 10^23, 10^25, 10^26, 10^27, + // 10^30, 10^32, 10^36. See https://godbolt.org/z/cP1MddP8P for a derivation. + // For completeness and to ensure that we are not missing any other cases, we + // test casting to/from all scales in the range of each decimal type. Values + // that are powers of ten show this error more readily than non-powers of 10 + // because the rescaling factor is a power of 10, meaning that errors in + // division are more visible. + constexpr auto min_scale = -cuda::std::numeric_limits::digits10; + for (int input_scale = 0; input_scale >= min_scale; --input_scale) { + for (int result_scale = 0; result_scale >= min_scale; --result_scale) { + RepType input_value = 1; + for (int k = 0; k > input_scale; --k) { + input_value *= 10; + } + RepType result_value = 1; + for (int k = 0; k > result_scale; --k) { + result_value *= 10; + } + auto const input = fp_wrapper{{input_value}, scale_type{input_scale}}; + auto const expected = fp_wrapper{{result_value}, scale_type{result_scale}}; + auto const result = cudf::cast(input, make_fixed_point_data_type(result_scale)); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->view()); + } + } +} + TYPED_TEST(FixedPointTests, Decimal32ToDecimalXXWithLargerScaleAndNullMask) { using namespace numeric; diff --git a/cpp/tests/unary/unary_ops_test.cpp b/cpp/tests/unary/unary_ops_test.cpp index 49764f22373..76d1f769856 100644 --- a/cpp/tests/unary/unary_ops_test.cpp +++ b/cpp/tests/unary/unary_ops_test.cpp @@ -24,6 +24,8 @@ #include +#include + template cudf::test::fixed_width_column_wrapper create_fixed_columns(cudf::size_type start, cudf::size_type size, @@ -372,4 +374,35 @@ TYPED_TEST(FixedPointUnaryTests, FixedPointUnaryFloorLarge) CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->view()); } +TYPED_TEST(FixedPointUnaryTests, ValidateCeilFloorPrecision) +{ + using namespace numeric; + using decimalXX = TypeParam; + using RepType = cudf::device_storage_type_t; + using fp_wrapper = cudf::test::fixed_point_column_wrapper; + + // This test is designed to protect against floating point conversion + // introducing errors in fixed-point arithmetic. The rounding that occurs + // during ceil/floor should only use fixed-precision math. Realistically, + // we are only able to show precision failures due to floating conversion in + // a few very specific circumstances where dividing by specific powers of 10 + // works against us. Some examples: 10^23, 10^25, 10^26, 10^27, 10^30, + // 10^32, 10^36. See https://godbolt.org/z/cP1MddP8P for a derivation. For + // completeness and to ensure that we are not missing any other cases, we + // test all scales representable in the range of each decimal type. + constexpr auto min_scale = -cuda::std::numeric_limits::digits10; + for (int input_scale = 0; input_scale >= min_scale; --input_scale) { + RepType input_value = 1; + for (int k = 0; k > input_scale; --k) { + input_value *= 10; + } + auto const input = fp_wrapper{{input_value}, scale_type{input_scale}}; + auto const expected = fp_wrapper{{input_value}, scale_type{input_scale}}; + auto const ceil_result = cudf::unary_operation(input, cudf::unary_operator::CEIL); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, ceil_result->view()); + auto const floor_result = cudf::unary_operation(input, cudf::unary_operator::FLOOR); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, floor_result->view()); + } +} + CUDF_TEST_PROGRAM_MAIN() From 29556a2514f4d274164a27a80539410da7e132d6 Mon Sep 17 00:00:00 2001 From: Vukasin Milovanovic Date: Tue, 3 Oct 2023 14:44:28 -0700 Subject: [PATCH 127/150] Remove the use of volatile in ORC (#14175) `volatile` should no be required in our code, unless there are compiler or synchronization issues. This PR removes the use in ORC reader and writer. Authors: - Vukasin Milovanovic (https://github.com/vuule) Approvers: - Yunsong Wang (https://github.com/PointKernel) - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cudf/pull/14175 --- cpp/src/io/orc/stats_enc.cu | 4 +- cpp/src/io/orc/stripe_data.cu | 82 +++++++++++++++-------------------- cpp/src/io/orc/stripe_enc.cu | 14 +++--- cpp/src/io/orc/stripe_init.cu | 2 +- 4 files changed, 46 insertions(+), 56 deletions(-) diff --git a/cpp/src/io/orc/stats_enc.cu b/cpp/src/io/orc/stats_enc.cu index 95f1db5bfd1..479a2dfada3 100644 --- a/cpp/src/io/orc/stats_enc.cu +++ b/cpp/src/io/orc/stats_enc.cu @@ -76,8 +76,8 @@ __global__ void __launch_bounds__(block_size, 1) { using block_scan = cub::BlockScan; __shared__ typename block_scan::TempStorage temp_storage; - volatile uint32_t stats_size = 0; - auto t = threadIdx.x; + uint32_t stats_size = 0; + auto t = threadIdx.x; __syncthreads(); for (thread_index_type start = 0; start < statistics_count; start += block_size) { uint32_t stats_len = 0, stats_pos; diff --git a/cpp/src/io/orc/stripe_data.cu b/cpp/src/io/orc/stripe_data.cu index 3edcd3d83b2..0b249bbdafe 100644 --- a/cpp/src/io/orc/stripe_data.cu +++ b/cpp/src/io/orc/stripe_data.cu @@ -142,9 +142,7 @@ struct orcdec_state_s { * @param[in] base Pointer to raw byte stream data * @param[in] len Stream length in bytes */ -static __device__ void bytestream_init(volatile orc_bytestream_s* bs, - uint8_t const* base, - uint32_t len) +static __device__ void bytestream_init(orc_bytestream_s* bs, uint8_t const* base, uint32_t len) { uint32_t pos = (len > 0) ? static_cast(7 & reinterpret_cast(base)) : 0; bs->base = base - pos; @@ -160,8 +158,7 @@ static __device__ void bytestream_init(volatile orc_bytestream_s* bs, * @param[in] bs Byte stream input * @param[in] bytes_consumed Number of bytes that were consumed */ -static __device__ void bytestream_flush_bytes(volatile orc_bytestream_s* bs, - uint32_t bytes_consumed) +static __device__ void bytestream_flush_bytes(orc_bytestream_s* bs, uint32_t bytes_consumed) { uint32_t pos = bs->pos; uint32_t len = bs->len; @@ -197,7 +194,7 @@ static __device__ void bytestream_fill(orc_bytestream_s* bs, int t) * @param[in] pos Position in byte stream * @return byte */ -inline __device__ uint8_t bytestream_readbyte(volatile orc_bytestream_s* bs, int pos) +inline __device__ uint8_t bytestream_readbyte(orc_bytestream_s* bs, int pos) { return bs->buf.u8[pos & (bytestream_buffer_size - 1)]; } @@ -209,7 +206,7 @@ inline __device__ uint8_t bytestream_readbyte(volatile orc_bytestream_s* bs, int * @param[in] pos Position in byte stream * @result bits */ -inline __device__ uint32_t bytestream_readu32(volatile orc_bytestream_s* bs, int pos) +inline __device__ uint32_t bytestream_readu32(orc_bytestream_s* bs, int pos) { uint32_t a = bs->buf.u32[(pos & (bytestream_buffer_size - 1)) >> 2]; uint32_t b = bs->buf.u32[((pos + 4) & (bytestream_buffer_size - 1)) >> 2]; @@ -224,7 +221,7 @@ inline __device__ uint32_t bytestream_readu32(volatile orc_bytestream_s* bs, int * @param[in] numbits number of bits * @return bits */ -inline __device__ uint64_t bytestream_readu64(volatile orc_bytestream_s* bs, int pos) +inline __device__ uint64_t bytestream_readu64(orc_bytestream_s* bs, int pos) { uint32_t a = bs->buf.u32[(pos & (bytestream_buffer_size - 1)) >> 2]; uint32_t b = bs->buf.u32[((pos + 4) & (bytestream_buffer_size - 1)) >> 2]; @@ -245,9 +242,7 @@ inline __device__ uint64_t bytestream_readu64(volatile orc_bytestream_s* bs, int * @param[in] numbits number of bits * @return decoded value */ -inline __device__ uint32_t bytestream_readbits(volatile orc_bytestream_s* bs, - int bitpos, - uint32_t numbits) +inline __device__ uint32_t bytestream_readbits(orc_bytestream_s* bs, int bitpos, uint32_t numbits) { int idx = bitpos >> 5; uint32_t a = __byte_perm(bs->buf.u32[(idx + 0) & bytestream_buffer_mask], 0, 0x0123); @@ -263,9 +258,7 @@ inline __device__ uint32_t bytestream_readbits(volatile orc_bytestream_s* bs, * @param[in] numbits number of bits * @return decoded value */ -inline __device__ uint64_t bytestream_readbits64(volatile orc_bytestream_s* bs, - int bitpos, - uint32_t numbits) +inline __device__ uint64_t bytestream_readbits64(orc_bytestream_s* bs, int bitpos, uint32_t numbits) { int idx = bitpos >> 5; uint32_t a = __byte_perm(bs->buf.u32[(idx + 0) & bytestream_buffer_mask], 0, 0x0123); @@ -288,7 +281,7 @@ inline __device__ uint64_t bytestream_readbits64(volatile orc_bytestream_s* bs, * @param[in] numbits number of bits * @param[out] result decoded value */ -inline __device__ void bytestream_readbe(volatile orc_bytestream_s* bs, +inline __device__ void bytestream_readbe(orc_bytestream_s* bs, int bitpos, uint32_t numbits, uint32_t& result) @@ -304,7 +297,7 @@ inline __device__ void bytestream_readbe(volatile orc_bytestream_s* bs, * @param[in] numbits number of bits * @param[out] result decoded value */ -inline __device__ void bytestream_readbe(volatile orc_bytestream_s* bs, +inline __device__ void bytestream_readbe(orc_bytestream_s* bs, int bitpos, uint32_t numbits, int32_t& result) @@ -321,7 +314,7 @@ inline __device__ void bytestream_readbe(volatile orc_bytestream_s* bs, * @param[in] numbits number of bits * @param[out] result decoded value */ -inline __device__ void bytestream_readbe(volatile orc_bytestream_s* bs, +inline __device__ void bytestream_readbe(orc_bytestream_s* bs, int bitpos, uint32_t numbits, uint64_t& result) @@ -337,7 +330,7 @@ inline __device__ void bytestream_readbe(volatile orc_bytestream_s* bs, * @param[in] numbits number of bits * @param[out] result decoded value */ -inline __device__ void bytestream_readbe(volatile orc_bytestream_s* bs, +inline __device__ void bytestream_readbe(orc_bytestream_s* bs, int bitpos, uint32_t numbits, int64_t& result) @@ -354,7 +347,7 @@ inline __device__ void bytestream_readbe(volatile orc_bytestream_s* bs, * @return length of varint in bytes */ template -inline __device__ uint32_t varint_length(volatile orc_bytestream_s* bs, int pos) +inline __device__ uint32_t varint_length(orc_bytestream_s* bs, int pos) { if (bytestream_readbyte(bs, pos) > 0x7f) { uint32_t next32 = bytestream_readu32(bs, pos + 1); @@ -392,7 +385,7 @@ inline __device__ uint32_t varint_length(volatile orc_bytestream_s* bs, int pos) * @return new position in byte stream buffer */ template -inline __device__ int decode_base128_varint(volatile orc_bytestream_s* bs, int pos, T& result) +inline __device__ int decode_base128_varint(orc_bytestream_s* bs, int pos, T& result) { uint32_t v = bytestream_readbyte(bs, pos++); if (v > 0x7f) { @@ -446,7 +439,7 @@ inline __device__ int decode_base128_varint(volatile orc_bytestream_s* bs, int p /** * @brief Decodes a signed int128 encoded as base-128 varint (used for decimals) */ -inline __device__ __int128_t decode_varint128(volatile orc_bytestream_s* bs, int pos) +inline __device__ __int128_t decode_varint128(orc_bytestream_s* bs, int pos) { auto byte = bytestream_readbyte(bs, pos++); __int128_t const sign_mask = -(int32_t)(byte & 1); @@ -463,7 +456,7 @@ inline __device__ __int128_t decode_varint128(volatile orc_bytestream_s* bs, int /** * @brief Decodes an unsigned 32-bit varint */ -inline __device__ int decode_varint(volatile orc_bytestream_s* bs, int pos, uint32_t& result) +inline __device__ int decode_varint(orc_bytestream_s* bs, int pos, uint32_t& result) { uint32_t u; pos = decode_base128_varint(bs, pos, u); @@ -474,7 +467,7 @@ inline __device__ int decode_varint(volatile orc_bytestream_s* bs, int pos, uint /** * @brief Decodes an unsigned 64-bit varint */ -inline __device__ int decode_varint(volatile orc_bytestream_s* bs, int pos, uint64_t& result) +inline __device__ int decode_varint(orc_bytestream_s* bs, int pos, uint64_t& result) { uint64_t u; pos = decode_base128_varint(bs, pos, u); @@ -485,7 +478,7 @@ inline __device__ int decode_varint(volatile orc_bytestream_s* bs, int pos, uint /** * @brief Signed version of 32-bit decode_varint */ -inline __device__ int decode_varint(volatile orc_bytestream_s* bs, int pos, int32_t& result) +inline __device__ int decode_varint(orc_bytestream_s* bs, int pos, int32_t& result) { uint32_t u; pos = decode_base128_varint(bs, pos, u); @@ -496,7 +489,7 @@ inline __device__ int decode_varint(volatile orc_bytestream_s* bs, int pos, int3 /** * @brief Signed version of 64-bit decode_varint */ -inline __device__ int decode_varint(volatile orc_bytestream_s* bs, int pos, int64_t& result) +inline __device__ int decode_varint(orc_bytestream_s* bs, int pos, int64_t& result) { uint64_t u; pos = decode_base128_varint(bs, pos, u); @@ -514,7 +507,7 @@ inline __device__ int decode_varint(volatile orc_bytestream_s* bs, int pos, int6 * @return number of values decoded */ template -inline __device__ void lengths_to_positions(volatile T* vals, uint32_t numvals, unsigned int t) +inline __device__ void lengths_to_positions(T* vals, uint32_t numvals, unsigned int t) { for (uint32_t n = 1; n < numvals; n <<= 1) { __syncthreads(); @@ -534,8 +527,8 @@ inline __device__ void lengths_to_positions(volatile T* vals, uint32_t numvals, * @return number of values decoded */ template -static __device__ uint32_t Integer_RLEv1( - orc_bytestream_s* bs, volatile orc_rlev1_state_s* rle, volatile T* vals, uint32_t maxvals, int t) +static __device__ uint32_t +Integer_RLEv1(orc_bytestream_s* bs, orc_rlev1_state_s* rle, T* vals, uint32_t maxvals, int t) { uint32_t numvals, numruns; if (t == 0) { @@ -642,8 +635,8 @@ static const __device__ __constant__ uint8_t ClosestFixedBitsMap[65] = { */ template static __device__ uint32_t Integer_RLEv2(orc_bytestream_s* bs, - volatile orc_rlev2_state_s* rle, - volatile T* vals, + orc_rlev2_state_s* rle, + T* vals, uint32_t maxvals, int t, bool has_buffered_values = false) @@ -883,7 +876,7 @@ static __device__ uint32_t Integer_RLEv2(orc_bytestream_s* bs, * * @return 32-bit value */ -inline __device__ uint32_t rle8_read_bool32(volatile uint32_t* vals, uint32_t bitpos) +inline __device__ uint32_t rle8_read_bool32(uint32_t* vals, uint32_t bitpos) { uint32_t a = vals[(bitpos >> 5) + 0]; uint32_t b = vals[(bitpos >> 5) + 1]; @@ -903,11 +896,8 @@ inline __device__ uint32_t rle8_read_bool32(volatile uint32_t* vals, uint32_t bi * * @return number of values decoded */ -static __device__ uint32_t Byte_RLE(orc_bytestream_s* bs, - volatile orc_byterle_state_s* rle, - volatile uint8_t* vals, - uint32_t maxvals, - int t) +static __device__ uint32_t +Byte_RLE(orc_bytestream_s* bs, orc_byterle_state_s* rle, uint8_t* vals, uint32_t maxvals, int t) { uint32_t numvals, numruns; int r, tr; @@ -1006,8 +996,8 @@ static const __device__ __constant__ int64_t kPow5i[28] = {1, * @return number of values decoded */ static __device__ int Decode_Decimals(orc_bytestream_s* bs, - volatile orc_byterle_state_s* scratch, - volatile orcdec_state_s::values& vals, + orc_byterle_state_s* scratch, + orcdec_state_s::values& vals, int val_scale, int numvals, type_id dtype_id, @@ -1241,8 +1231,8 @@ __global__ void __launch_bounds__(block_size) } __syncthreads(); while (s->top.dict.dict_len > 0) { - uint32_t numvals = min(s->top.dict.dict_len, blockDim.x), len; - volatile uint32_t* vals = s->vals.u32; + uint32_t numvals = min(s->top.dict.dict_len, blockDim.x), len; + uint32_t* vals = s->vals.u32; bytestream_fill(&s->bs, t); __syncthreads(); if (is_rlev1(s->chunk.encoding_kind)) { @@ -1310,12 +1300,12 @@ static __device__ void DecodeRowPositions(orcdec_state_s* s, min((row_decoder_buffer_size - s->u.rowdec.nz_count) * 2, blockDim.x)); if (s->chunk.valid_map_base != nullptr) { // We have a present stream - uint32_t rmax = s->top.data.end_row - min((uint32_t)first_row, s->top.data.end_row); - auto r = (uint32_t)(s->top.data.cur_row + s->top.data.nrows + t - first_row); - uint32_t valid = (t < nrows && r < rmax) - ? (((uint8_t const*)s->chunk.valid_map_base)[r >> 3] >> (r & 7)) & 1 - : 0; - volatile auto* row_ofs_plus1 = (volatile uint16_t*)&s->u.rowdec.row[s->u.rowdec.nz_count]; + uint32_t rmax = s->top.data.end_row - min((uint32_t)first_row, s->top.data.end_row); + auto r = (uint32_t)(s->top.data.cur_row + s->top.data.nrows + t - first_row); + uint32_t valid = (t < nrows && r < rmax) + ? (((uint8_t const*)s->chunk.valid_map_base)[r >> 3] >> (r & 7)) & 1 + : 0; + auto* row_ofs_plus1 = (uint16_t*)&s->u.rowdec.row[s->u.rowdec.nz_count]; uint32_t nz_pos, row_plus1, nz_count = s->u.rowdec.nz_count, last_row; if (t < nrows) { row_ofs_plus1[t] = valid; } lengths_to_positions(row_ofs_plus1, nrows, t); diff --git a/cpp/src/io/orc/stripe_enc.cu b/cpp/src/io/orc/stripe_enc.cu index 73c41e2bbcd..4841fb1141a 100644 --- a/cpp/src/io/orc/stripe_enc.cu +++ b/cpp/src/io/orc/stripe_enc.cu @@ -53,7 +53,7 @@ constexpr bool zero_pll_war = true; struct byterle_enc_state_s { uint32_t literal_run; uint32_t repeat_run; - volatile uint32_t rpt_map[(512 / 32) + 1]; + uint32_t rpt_map[(512 / 32) + 1]; }; struct intrle_enc_state_s { @@ -63,7 +63,7 @@ struct intrle_enc_state_s { uint32_t literal_w; uint32_t hdr_bytes; uint32_t pl_bytes; - volatile uint32_t delta_map[(512 / 32) + 1]; + uint32_t delta_map[(512 / 32) + 1]; }; struct strdata_enc_state_s { @@ -366,7 +366,7 @@ static __device__ uint32_t IntegerRLE( using block_reduce = cub::BlockReduce; uint8_t* dst = s->stream.data_ptrs[cid] + s->strm_pos[cid]; uint32_t out_cnt = 0; - __shared__ volatile uint64_t block_vmin; + __shared__ uint64_t block_vmin; while (numvals > 0) { T v0 = (t < numvals) ? inbuf[(inpos + t) & inmask] : 0; @@ -615,7 +615,7 @@ static __device__ void StoreStringData(uint8_t* dst, * @param[in] t thread id */ template -inline __device__ void lengths_to_positions(volatile T* vals, uint32_t numvals, unsigned int t) +inline __device__ void lengths_to_positions(T* vals, uint32_t numvals, unsigned int t) { for (uint32_t n = 1; n < numvals; n <<= 1) { __syncthreads(); @@ -1143,7 +1143,7 @@ __global__ void __launch_bounds__(256) uint32_t comp_block_align) { __shared__ __align__(16) StripeStream ss; - __shared__ uint8_t* volatile uncomp_base_g; + __shared__ uint8_t* uncomp_base_g; auto const padded_block_header_size = util::round_up_unsafe(block_header_size, comp_block_align); auto const padded_comp_block_size = util::round_up_unsafe(max_comp_blk_size, comp_block_align); @@ -1196,8 +1196,8 @@ __global__ void __launch_bounds__(1024) uint32_t max_comp_blk_size) { __shared__ __align__(16) StripeStream ss; - __shared__ uint8_t const* volatile comp_src_g; - __shared__ uint32_t volatile comp_len_g; + __shared__ uint8_t const* comp_src_g; + __shared__ uint32_t comp_len_g; auto const stripe_id = blockIdx.x; auto const stream_id = blockIdx.y; diff --git a/cpp/src/io/orc/stripe_init.cu b/cpp/src/io/orc/stripe_init.cu index 8eeca504121..b31a4a081d1 100644 --- a/cpp/src/io/orc/stripe_init.cu +++ b/cpp/src/io/orc/stripe_init.cu @@ -499,7 +499,7 @@ __global__ void __launch_bounds__(128, 8) gpuParseRowGroupIndex(RowGroup* row_gr : row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].start_row; for (int j = t4; j < rowgroup_size4; j += 4) { ((uint32_t*)&row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x])[j] = - ((volatile uint32_t*)&s->rowgroups[i])[j]; + ((uint32_t*)&s->rowgroups[i])[j]; } row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].num_rows = num_rows; // Updating in case of struct From d87e181daa67d8fb1a029fc2c09e2f561d1e7234 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Wed, 4 Oct 2023 13:25:56 -0700 Subject: [PATCH 128/150] Expose streams in binaryop APIs (#14187) Contributes to #925 Authors: - Vyas Ramasubramani (https://github.com/vyasr) Approvers: - Nghia Truong (https://github.com/ttnghia) - Karthikeyan (https://github.com/karthikeyann) URL: https://github.com/rapidsai/cudf/pull/14187 --- cpp/include/cudf/binaryop.hpp | 8 ++ cpp/src/binaryop/binaryop.cpp | 12 ++- cpp/src/binaryop/compiled/binary_ops.cu | 6 +- cpp/tests/CMakeLists.txt | 1 + cpp/tests/streams/binaryop_test.cpp | 126 ++++++++++++++++++++++++ 5 files changed, 147 insertions(+), 6 deletions(-) create mode 100644 cpp/tests/streams/binaryop_test.cpp diff --git a/cpp/include/cudf/binaryop.hpp b/cpp/include/cudf/binaryop.hpp index 77d6a4d1e89..9df4b4eb00f 100644 --- a/cpp/include/cudf/binaryop.hpp +++ b/cpp/include/cudf/binaryop.hpp @@ -102,6 +102,7 @@ enum class binary_operator : int32_t { * @param rhs The right operand column * @param op The binary operator * @param output_type The desired data type of the output column + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return Output column of `output_type` type containing the result of * the binary operation @@ -115,6 +116,7 @@ std::unique_ptr binary_operation( column_view const& rhs, binary_operator op, data_type output_type, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -131,6 +133,7 @@ std::unique_ptr binary_operation( * @param rhs The right operand scalar * @param op The binary operator * @param output_type The desired data type of the output column + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return Output column of `output_type` type containing the result of * the binary operation @@ -144,6 +147,7 @@ std::unique_ptr binary_operation( scalar const& rhs, binary_operator op, data_type output_type, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -158,6 +162,7 @@ std::unique_ptr binary_operation( * @param rhs The right operand column * @param op The binary operator * @param output_type The desired data type of the output column + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return Output column of `output_type` type containing the result of * the binary operation @@ -172,6 +177,7 @@ std::unique_ptr binary_operation( column_view const& rhs, binary_operator op, data_type output_type, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -189,6 +195,7 @@ std::unique_ptr binary_operation( * @param output_type The desired data type of the output column. It is assumed * that output_type is compatible with the output data type * of the function in the PTX code + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return Output column of `output_type` type containing the result of * the binary operation @@ -201,6 +208,7 @@ std::unique_ptr binary_operation( column_view const& rhs, std::string const& ptx, data_type output_type, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** diff --git a/cpp/src/binaryop/binaryop.cpp b/cpp/src/binaryop/binaryop.cpp index ef07de8c461..6b413ab2be4 100644 --- a/cpp/src/binaryop/binaryop.cpp +++ b/cpp/src/binaryop/binaryop.cpp @@ -405,38 +405,42 @@ std::unique_ptr binary_operation(scalar const& lhs, column_view const& rhs, binary_operator op, data_type output_type, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::binary_operation(lhs, rhs, op, output_type, cudf::get_default_stream(), mr); + return detail::binary_operation(lhs, rhs, op, output_type, stream, mr); } std::unique_ptr binary_operation(column_view const& lhs, scalar const& rhs, binary_operator op, data_type output_type, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::binary_operation(lhs, rhs, op, output_type, cudf::get_default_stream(), mr); + return detail::binary_operation(lhs, rhs, op, output_type, stream, mr); } std::unique_ptr binary_operation(column_view const& lhs, column_view const& rhs, binary_operator op, data_type output_type, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::binary_operation(lhs, rhs, op, output_type, cudf::get_default_stream(), mr); + return detail::binary_operation(lhs, rhs, op, output_type, stream, mr); } std::unique_ptr binary_operation(column_view const& lhs, column_view const& rhs, std::string const& ptx, data_type output_type, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::binary_operation(lhs, rhs, ptx, output_type, cudf::get_default_stream(), mr); + return detail::binary_operation(lhs, rhs, ptx, output_type, stream, mr); } } // namespace cudf diff --git a/cpp/src/binaryop/compiled/binary_ops.cu b/cpp/src/binaryop/compiled/binary_ops.cu index 1f7f342632a..85ab5c6d6cb 100644 --- a/cpp/src/binaryop/compiled/binary_ops.cu +++ b/cpp/src/binaryop/compiled/binary_ops.cu @@ -47,14 +47,16 @@ namespace { struct scalar_as_column_view { using return_type = typename std::pair>; template ())> - return_type operator()(scalar const& s, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) + return_type operator()(scalar const& s, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource*) { auto& h_scalar_type_view = static_cast&>(const_cast(s)); auto col_v = column_view(s.type(), 1, h_scalar_type_view.data(), reinterpret_cast(s.validity_data()), - !s.is_valid()); + !s.is_valid(stream)); return std::pair{col_v, std::unique_ptr(nullptr)}; } template ())> diff --git a/cpp/tests/CMakeLists.txt b/cpp/tests/CMakeLists.txt index 04939f3cd6d..ac13c121530 100644 --- a/cpp/tests/CMakeLists.txt +++ b/cpp/tests/CMakeLists.txt @@ -622,6 +622,7 @@ ConfigureTest( STREAM_IDENTIFICATION_TEST identify_stream_usage/test_default_stream_identification.cu ) +ConfigureTest(STREAM_BINARYOP_TEST streams/binaryop_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_CONCATENATE_TEST streams/concatenate_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_COPYING_TEST streams/copying_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_FILLING_TEST streams/filling_test.cpp STREAM_MODE testing) diff --git a/cpp/tests/streams/binaryop_test.cpp b/cpp/tests/streams/binaryop_test.cpp new file mode 100644 index 00000000000..2520aed0458 --- /dev/null +++ b/cpp/tests/streams/binaryop_test.cpp @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include +#include +#include + +#include +#include +#include + +class BinaryopTest : public cudf::test::BaseFixture {}; + +TEST_F(BinaryopTest, ColumnColumn) +{ + cudf::test::fixed_width_column_wrapper lhs{10, 20, 30, 40, 50}; + cudf::test::fixed_width_column_wrapper rhs{15, 25, 35, 45, 55}; + + cudf::binary_operation(lhs, + rhs, + cudf::binary_operator::ADD, + cudf::data_type(cudf::type_to_id()), + cudf::test::get_default_stream()); +} + +TEST_F(BinaryopTest, ColumnScalar) +{ + cudf::test::fixed_width_column_wrapper lhs{10, 20, 30, 40, 50}; + cudf::numeric_scalar rhs{23, true, cudf::test::get_default_stream()}; + + cudf::binary_operation(lhs, + rhs, + cudf::binary_operator::ADD, + cudf::data_type(cudf::type_to_id()), + cudf::test::get_default_stream()); +} + +TEST_F(BinaryopTest, ScalarColumn) +{ + cudf::numeric_scalar lhs{42, true, cudf::test::get_default_stream()}; + cudf::test::fixed_width_column_wrapper rhs{15, 25, 35, 45, 55}; + + cudf::binary_operation(lhs, + rhs, + cudf::binary_operator::ADD, + cudf::data_type(cudf::type_to_id()), + cudf::test::get_default_stream()); +} + +class BinaryopPTXTest : public BinaryopTest { + protected: + void SetUp() override + { + if (!can_do_runtime_jit()) { GTEST_SKIP() << "Skipping tests that require 11.5 runtime"; } + } +}; + +TEST_F(BinaryopPTXTest, ColumnColumnPTX) +{ + cudf::test::fixed_width_column_wrapper lhs{10, 20, 30, 40, 50}; + cudf::test::fixed_width_column_wrapper rhs{15, 25, 35, 45, 55}; + + // c = a*a*a + b*b + char const* ptx = + R"***( +// +// Generated by NVIDIA NVVM Compiler +// +// Compiler Build ID: CL-24817639 +// Cuda compilation tools, release 10.0, V10.0.130 +// Based on LLVM 3.4svn +// + +.version 6.3 +.target sm_70 +.address_size 64 + + // .globl _ZN8__main__7add$241Eix +.common .global .align 8 .u64 _ZN08NumbaEnv8__main__7add$241Eix; +.common .global .align 8 .u64 _ZN08NumbaEnv5numba7targets7numbers14int_power_impl12$3clocals$3e13int_power$242Exx; + +.visible .func (.param .b32 func_retval0) _ZN8__main__7add$241Eix( + .param .b64 _ZN8__main__7add$241Eix_param_0, + .param .b32 _ZN8__main__7add$241Eix_param_1, + .param .b64 _ZN8__main__7add$241Eix_param_2 +) +{ + .reg .b32 %r<3>; + .reg .b64 %rd<8>; + + + ld.param.u64 %rd1, [_ZN8__main__7add$241Eix_param_0]; + ld.param.u32 %r1, [_ZN8__main__7add$241Eix_param_1]; + ld.param.u64 %rd2, [_ZN8__main__7add$241Eix_param_2]; + cvt.s64.s32 %rd3, %r1; + mul.wide.s32 %rd4, %r1, %r1; + mul.lo.s64 %rd5, %rd4, %rd3; + mul.lo.s64 %rd6, %rd2, %rd2; + add.s64 %rd7, %rd6, %rd5; + st.u64 [%rd1], %rd7; + mov.u32 %r2, 0; + st.param.b32 [func_retval0+0], %r2; + ret; +} + +)***"; + + cudf::binary_operation( + lhs, rhs, ptx, cudf::data_type(cudf::type_to_id()), cudf::test::get_default_stream()); + cudf::binary_operation(lhs, rhs, ptx, cudf::data_type(cudf::type_to_id())); +} From b120f7e73e882b4eaa6b5a2cb91aeed20bf1198d Mon Sep 17 00:00:00 2001 From: Yunsong Wang Date: Wed, 4 Oct 2023 14:23:24 -0700 Subject: [PATCH 129/150] Improve `contains_column` by invoking `contains_table` (#14238) Part of #https://github.com/rapidsai/cudf/issues/12261 This PR simplifies the `contains_column` implementation by invoking `contains_table` and gets rid of the use of the cudf `unordered_multiset`. It also removes the `unordered_multiset` header file from libcudf. Authors: - Yunsong Wang (https://github.com/PointKernel) Approvers: - Nghia Truong (https://github.com/ttnghia) - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cudf/pull/14238 --- cpp/src/hash/unordered_multiset.cuh | 159 ---------------------------- cpp/src/search/contains_column.cu | 67 +----------- 2 files changed, 1 insertion(+), 225 deletions(-) delete mode 100644 cpp/src/hash/unordered_multiset.cuh diff --git a/cpp/src/hash/unordered_multiset.cuh b/cpp/src/hash/unordered_multiset.cuh deleted file mode 100644 index 183042fc0f4..00000000000 --- a/cpp/src/hash/unordered_multiset.cuh +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include -#include -#include -#include - -#include -#include -#include - -#include -#include -#include -#include - -#include - -namespace cudf { -namespace detail { -/* - * Device view of the unordered multiset - */ -template , - typename Equality = equal_to> -class unordered_multiset_device_view { - public: - unordered_multiset_device_view(size_type hash_size, - size_type const* hash_begin, - Element const* hash_data) - : hash_size{hash_size}, hash_begin{hash_begin}, hash_data{hash_data}, hasher(), equals() - { - } - - bool __device__ contains(Element e) const - { - size_type loc = hasher(e) % (2 * hash_size); - - for (size_type i = hash_begin[loc]; i < hash_begin[loc + 1]; ++i) { - if (equals(hash_data[i], e)) return true; - } - - return false; - } - - private: - Hasher hasher; - Equality equals; - size_type hash_size; - size_type const* hash_begin; - Element const* hash_data; -}; - -/* - * Fixed size set on a device. - */ -template , - typename Equality = equal_to> -class unordered_multiset { - public: - /** - * @brief Factory to construct a new unordered_multiset - */ - static unordered_multiset create(column_view const& col, rmm::cuda_stream_view stream) - { - auto d_column = column_device_view::create(col, stream); - auto d_col = *d_column; - - auto hash_bins_start = cudf::detail::make_zeroed_device_uvector_async( - 2 * d_col.size() + 1, stream, rmm::mr::get_current_device_resource()); - auto hash_bins_end = cudf::detail::make_zeroed_device_uvector_async( - 2 * d_col.size() + 1, stream, rmm::mr::get_current_device_resource()); - auto hash_data = rmm::device_uvector(d_col.size(), stream); - - Hasher hasher; - size_type* d_hash_bins_start = hash_bins_start.data(); - size_type* d_hash_bins_end = hash_bins_end.data(); - Element* d_hash_data = hash_data.data(); - - thrust::for_each( - rmm::exec_policy(stream), - thrust::make_counting_iterator(0), - thrust::make_counting_iterator(col.size()), - [d_hash_bins_start, d_col, hasher] __device__(size_t idx) { - if (!d_col.is_null(idx)) { - Element e = d_col.element(idx); - size_type tmp = hasher(e) % (2 * d_col.size()); - cuda::atomic_ref ref{*(d_hash_bins_start + tmp)}; - ref.fetch_add(1, cuda::std::memory_order_relaxed); - } - }); - - thrust::exclusive_scan(rmm::exec_policy(stream), - hash_bins_start.begin(), - hash_bins_start.end(), - hash_bins_end.begin()); - - thrust::copy(rmm::exec_policy(stream), - hash_bins_end.begin(), - hash_bins_end.end(), - hash_bins_start.begin()); - - thrust::for_each( - rmm::exec_policy(stream), - thrust::make_counting_iterator(0), - thrust::make_counting_iterator(col.size()), - [d_hash_bins_end, d_hash_data, d_col, hasher] __device__(size_t idx) { - if (!d_col.is_null(idx)) { - Element e = d_col.element(idx); - size_type tmp = hasher(e) % (2 * d_col.size()); - cuda::atomic_ref ref{*(d_hash_bins_end + tmp)}; - size_type offset = ref.fetch_add(1, cuda::std::memory_order_relaxed); - d_hash_data[offset] = e; - } - }); - - return unordered_multiset(d_col.size(), std::move(hash_bins_start), std::move(hash_data)); - } - - unordered_multiset_device_view to_device() const - { - return unordered_multiset_device_view( - size, hash_bins.data(), hash_data.data()); - } - - private: - unordered_multiset(size_type size, - rmm::device_uvector&& hash_bins, - rmm::device_uvector&& hash_data) - : size{size}, hash_bins{std::move(hash_bins)}, hash_data{std::move(hash_data)} - { - } - - size_type size; - rmm::device_uvector hash_bins; - rmm::device_uvector hash_data; -}; - -} // namespace detail -} // namespace cudf diff --git a/cpp/src/search/contains_column.cu b/cpp/src/search/contains_column.cu index 4363bd212fe..85971647434 100644 --- a/cpp/src/search/contains_column.cu +++ b/cpp/src/search/contains_column.cu @@ -14,23 +14,14 @@ * limitations under the License. */ -#include - -#include #include #include #include #include #include #include -#include #include -#include - -#include -#include -#include namespace cudf { namespace detail { @@ -38,61 +29,7 @@ namespace detail { namespace { struct contains_column_dispatch { - template - struct contains_fn { - bool __device__ operator()(size_type const idx) const - { - if (needles_have_nulls && needles.is_null_nocheck(idx)) { - // Exit early. The value doesn't matter, and will be masked as a null element. - return true; - } - - return haystack.contains(needles.template element(idx)); - } - - Haystack const haystack; - column_device_view const needles; - bool const needles_have_nulls; - }; - - template ())> - std::unique_ptr operator()(column_view const& haystack, - column_view const& needles, - rmm::cuda_stream_view stream, - rmm::mr::device_memory_resource* mr) const - { - auto result = make_numeric_column(data_type{type_to_id()}, - needles.size(), - copy_bitmask(needles, stream, mr), - needles.null_count(), - stream, - mr); - if (needles.is_empty()) { return result; } - - auto const out_begin = result->mutable_view().template begin(); - if (haystack.is_empty()) { - thrust::uninitialized_fill( - rmm::exec_policy(stream), out_begin, out_begin + needles.size(), false); - return result; - } - - auto const haystack_set = cudf::detail::unordered_multiset::create(haystack, stream); - auto const haystack_set_dv = haystack_set.to_device(); - auto const needles_cdv_ptr = column_device_view::create(needles, stream); - - thrust::transform(rmm::exec_policy(stream), - thrust::make_counting_iterator(0), - thrust::make_counting_iterator(needles.size()), - out_begin, - contains_fn{ - haystack_set_dv, *needles_cdv_ptr, needles.has_nulls()}); - - result->set_null_count(needles.null_count()); - - return result; - } - - template ())> + template std::unique_ptr operator()(column_view const& haystack, column_view const& needles, rmm::cuda_stream_view stream, @@ -144,8 +81,6 @@ std::unique_ptr contains(column_view const& haystack, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { - CUDF_EXPECTS(haystack.type() == needles.type(), "DTYPE mismatch"); - return cudf::type_dispatcher( haystack.type(), contains_column_dispatch{}, haystack, needles, stream, mr); } From 5d311ea76ddc8bdbb357b6afdf64dfce6ece39a7 Mon Sep 17 00:00:00 2001 From: David Wendt <45795991+davidwendt@users.noreply.github.com> Date: Thu, 5 Oct 2023 13:21:31 -0400 Subject: [PATCH 130/150] Fix strings replace for adjacent, identical multi-byte UTF-8 character targets (#14235) Fixes bug that can occur when replacing all occurrences in a string using a multi-byte UTF-8 target when the target matches sequentially in the same string -- some characters were missed. Specialized gtest is also added. Found while working on #13891 Authors: - David Wendt (https://github.com/davidwendt) Approvers: - Bradley Dice (https://github.com/bdice) - Nghia Truong (https://github.com/ttnghia) URL: https://github.com/rapidsai/cudf/pull/14235 --- cpp/src/strings/replace/replace.cu | 2 +- cpp/tests/strings/replace_tests.cpp | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/cpp/src/strings/replace/replace.cu b/cpp/src/strings/replace/replace.cu index a622d1a742d..acc1502f4d6 100644 --- a/cpp/src/strings/replace/replace.cu +++ b/cpp/src/strings/replace/replace.cu @@ -97,7 +97,7 @@ struct replace_row_parallel_fn { } else { bytes += d_repl.size_bytes() - d_target.size_bytes(); } - position = d_str.find(d_target, position + d_target.size_bytes()); + position = d_str.find(d_target, position + d_target.length()); --max_n; } if (out_ptr) // copy whats left (or right depending on your point of view) diff --git a/cpp/tests/strings/replace_tests.cpp b/cpp/tests/strings/replace_tests.cpp index f143983aded..f04bb832f09 100644 --- a/cpp/tests/strings/replace_tests.cpp +++ b/cpp/tests/strings/replace_tests.cpp @@ -246,6 +246,28 @@ TEST_F(StringsReplaceTest, ReplaceEndOfString) CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); } +TEST_F(StringsReplaceTest, ReplaceAdjacentMultiByteTarget) +{ + auto input = cudf::test::strings_column_wrapper({"ééééééé", "eéeéeée", "eeeeeee"}); + auto strings_view = cudf::strings_column_view(input); + // replace all occurrences of 'é' with 'e' + cudf::test::strings_column_wrapper expected({"eeeeeee", "eeeeeee", "eeeeeee"}); + + auto stream = cudf::get_default_stream(); + auto mr = rmm::mr::get_current_device_resource(); + + auto target = cudf::string_scalar("é", true, stream); + auto repl = cudf::string_scalar("e", true, stream); + auto results = cudf::strings::replace(strings_view, target, repl); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); + results = cudf::strings::detail::replace( + strings_view, target, repl, -1, stream, mr); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); + results = cudf::strings::detail::replace( + strings_view, target, repl, -1, stream, mr); + CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); +} + TEST_F(StringsReplaceTest, ReplaceSlice) { std::vector h_strings{"Héllo", "thesé", nullptr, "ARE THE", "tést strings", ""}; From 04e2cd6ff4d525390d4a416651cefa16e11c2a50 Mon Sep 17 00:00:00 2001 From: Robert Maynard Date: Fri, 6 Oct 2023 09:33:16 -0400 Subject: [PATCH 131/150] cudf::detail::pinned_allocator doesn't throw from `deallocate` (#14251) Fixes #14165 The deallocate function is called by the `pinned_host_vector`. Throwing from destructors is bad since they can't be caught, and generally get converted into runtime sig aborts. Authors: - Robert Maynard (https://github.com/robertmaynard) Approvers: - David Wendt (https://github.com/davidwendt) - Divye Gala (https://github.com/divyegala) - Mike Wilson (https://github.com/hyperbolic2346) URL: https://github.com/rapidsai/cudf/pull/14251 --- cpp/include/cudf/detail/utilities/pinned_host_vector.hpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/cpp/include/cudf/detail/utilities/pinned_host_vector.hpp b/cpp/include/cudf/detail/utilities/pinned_host_vector.hpp index 9e2b85ea129..eee974c8399 100644 --- a/cpp/include/cudf/detail/utilities/pinned_host_vector.hpp +++ b/cpp/include/cudf/detail/utilities/pinned_host_vector.hpp @@ -169,7 +169,12 @@ class pinned_allocator { * It is the responsibility of the caller to destroy * the objects stored at \p p. */ - __host__ inline void deallocate(pointer p, size_type /*cnt*/) { CUDF_CUDA_TRY(cudaFreeHost(p)); } + __host__ inline void deallocate(pointer p, size_type /*cnt*/) + { + auto dealloc_worked = cudaFreeHost(p); + (void)dealloc_worked; + assert(dealloc_worked == cudaSuccess); + } /** * @brief This method returns the maximum size of the \c cnt parameter From fc3694730334971c6c7bd916bf36b71302cfcd42 Mon Sep 17 00:00:00 2001 From: Mike Wilson Date: Fri, 6 Oct 2023 14:03:32 -0400 Subject: [PATCH 132/150] Fixing parquet list of struct interpretation (#13715) This change alters how we interpret non-annotated data in a parquet file. Most modern parquet writers would produce something like: ``` message spark_schema { required int32 id; optional group phoneNumbers (LIST) { repeated group phone { required int64 number; optional binary kind (STRING); } } } ``` But the list annotation isn't required. If it didn't exist, we would incorrectly interpret this schema as a struct of struct and not a list of struct. This change alters the code to look at the child and see if it is repeated. If it is, this indicates a list. closes #13664 Authors: - Mike Wilson (https://github.com/hyperbolic2346) - Vukasin Milovanovic (https://github.com/vuule) - Mark Harris (https://github.com/harrism) Approvers: - Mark Harris (https://github.com/harrism) - Nghia Truong (https://github.com/ttnghia) - Vukasin Milovanovic (https://github.com/vuule) URL: https://github.com/rapidsai/cudf/pull/13715 --- cpp/src/io/parquet/page_decode.cuh | 2 +- cpp/src/io/parquet/parquet.hpp | 2 +- cpp/src/io/parquet/reader_impl_helpers.cpp | 86 ++++++++++++++++++++-- cpp/src/io/parquet/reader_impl_helpers.hpp | 1 + cpp/tests/io/parquet_test.cpp | 78 ++++++++++++++++++++ 5 files changed, 162 insertions(+), 7 deletions(-) diff --git a/cpp/src/io/parquet/page_decode.cuh b/cpp/src/io/parquet/page_decode.cuh index cdc29197eb3..d70cabdd35f 100644 --- a/cpp/src/io/parquet/page_decode.cuh +++ b/cpp/src/io/parquet/page_decode.cuh @@ -753,7 +753,7 @@ __device__ void gpuUpdateValidityOffsetsAndRowIndices(int32_t target_input_value // for nested schemas, it's more complicated. This warp will visit 32 incoming values, // however not all of them will necessarily represent a value at this nesting level. so // the validity bit for thread t might actually represent output value t-6. the correct - // position for thread t's bit is cur_value_count. for cuda 11 we could use + // position for thread t's bit is thread_value_count. for cuda 11 we could use // __reduce_or_sync(), but until then we have to do a warp reduce. WarpReduceOr32(is_valid << thread_value_count); diff --git a/cpp/src/io/parquet/parquet.hpp b/cpp/src/io/parquet/parquet.hpp index c2affc774c2..1df49262e87 100644 --- a/cpp/src/io/parquet/parquet.hpp +++ b/cpp/src/io/parquet/parquet.hpp @@ -206,7 +206,7 @@ struct SchemaElement { { return type == UNDEFINED_TYPE && // this assumption might be a little weak. - ((repetition_type != REPEATED) || (repetition_type == REPEATED && num_children == 2)); + ((repetition_type != REPEATED) || (repetition_type == REPEATED && num_children > 1)); } }; diff --git a/cpp/src/io/parquet/reader_impl_helpers.cpp b/cpp/src/io/parquet/reader_impl_helpers.cpp index fcaa610fbb7..9778cfc47d2 100644 --- a/cpp/src/io/parquet/reader_impl_helpers.cpp +++ b/cpp/src/io/parquet/reader_impl_helpers.cpp @@ -175,6 +175,81 @@ type_id to_type_id(SchemaElement const& schema, return type_id::EMPTY; } +void metadata::sanitize_schema() +{ + // Parquet isn't very strict about incoming metadata. Lots of things can and should be inferred. + // There are also a lot of rules that simply aren't followed and are expected to be worked around. + // This step sanitizes the metadata to something that isn't ambiguous. + // + // Take, for example, the following schema: + // + // required group field_id=-1 user { + // required int32 field_id=-1 id; + // optional group field_id=-1 phoneNumbers { + // repeated group field_id=-1 phone { + // required int64 field_id=-1 number; + // optional binary field_id=-1 kind (String); + // } + // } + // } + // + // This real-world example has no annotations telling us what is a list or a struct. On the + // surface this looks like a column of id's and a column of list>, but this + // actually should be interpreted as a struct>>. The phoneNumbers field + // has to be a struct because it is a group with no repeated tag and we have no annotation. The + // repeated group is actually BOTH a struct due to the multiple children and a list due to + // repeated. + // + // This code attempts to make this less messy for the code that follows. + + std::function process = [&](size_t schema_idx) -> void { + if (schema_idx < 0) { return; } + auto& schema_elem = schema[schema_idx]; + if (schema_idx != 0 && schema_elem.type == UNDEFINED_TYPE) { + auto const parent_type = schema[schema_elem.parent_idx].converted_type; + if (schema_elem.repetition_type == REPEATED && schema_elem.num_children > 1 && + parent_type != LIST && parent_type != MAP) { + // This is a list of structs, so we need to mark this as a list, but also + // add a struct child and move this element's children to the struct + schema_elem.converted_type = LIST; + schema_elem.repetition_type = OPTIONAL; + auto const struct_node_idx = schema.size(); + + SchemaElement struct_elem; + struct_elem.name = "struct_node"; + struct_elem.repetition_type = REQUIRED; + struct_elem.num_children = schema_elem.num_children; + struct_elem.type = UNDEFINED_TYPE; + struct_elem.converted_type = UNKNOWN; + + // swap children + struct_elem.children_idx = std::move(schema_elem.children_idx); + schema_elem.children_idx = {struct_node_idx}; + schema_elem.num_children = 1; + + struct_elem.max_definition_level = schema_elem.max_definition_level; + struct_elem.max_repetition_level = schema_elem.max_repetition_level; + schema_elem.max_definition_level--; + schema_elem.max_repetition_level = schema[schema_elem.parent_idx].max_repetition_level; + + // change parent index on new node and on children + struct_elem.parent_idx = schema_idx; + for (auto& child_idx : struct_elem.children_idx) { + schema[child_idx].parent_idx = struct_node_idx; + } + // add our struct + schema.push_back(struct_elem); + } + } + + for (auto& child_idx : schema_elem.children_idx) { + process(child_idx); + } + }; + + process(0); +} + metadata::metadata(datasource* source) { constexpr auto header_len = sizeof(file_header_s); @@ -195,6 +270,7 @@ metadata::metadata(datasource* source) CompactProtocolReader cp(buffer->data(), ender->footer_len); CUDF_EXPECTS(cp.read(this), "Cannot parse metadata"); CUDF_EXPECTS(cp.InitSchema(this), "Cannot initialize schema"); + sanitize_schema(); } std::vector aggregate_reader_metadata::metadatas_from_sources( @@ -445,8 +521,10 @@ aggregate_reader_metadata::select_columns(std::optional child_col_name_info, schema_elem.children_idx[0], out_col_array, has_list_parent); } + auto const one_level_list = schema_elem.is_one_level_list(get_schema(schema_elem.parent_idx)); + // if we're at the root, this is a new output column - auto const col_type = schema_elem.is_one_level_list(get_schema(schema_elem.parent_idx)) + auto const col_type = one_level_list ? type_id::LIST : to_type_id(schema_elem, strings_to_categorical, timestamp_type_id); auto const dtype = to_data_type(col_type, schema_elem); @@ -485,7 +563,7 @@ aggregate_reader_metadata::select_columns(std::optional input_column_info{schema_idx, schema_elem.name, schema_elem.max_repetition_level > 0}); // set up child output column for one-level encoding list - if (schema_elem.is_one_level_list(get_schema(schema_elem.parent_idx))) { + if (one_level_list) { // determine the element data type auto const element_type = to_type_id(schema_elem, strings_to_categorical, timestamp_type_id); @@ -506,9 +584,7 @@ aggregate_reader_metadata::select_columns(std::optional std::copy(nesting.cbegin(), nesting.cend(), std::back_inserter(input_col.nesting)); // pop off the extra nesting element. - if (schema_elem.is_one_level_list(get_schema(schema_elem.parent_idx))) { - nesting.pop_back(); - } + if (one_level_list) { nesting.pop_back(); } path_is_valid = true; // If we're able to reach leaf then path is valid } diff --git a/cpp/src/io/parquet/reader_impl_helpers.hpp b/cpp/src/io/parquet/reader_impl_helpers.hpp index 61e4f94df0f..9ee17f26a10 100644 --- a/cpp/src/io/parquet/reader_impl_helpers.hpp +++ b/cpp/src/io/parquet/reader_impl_helpers.hpp @@ -58,6 +58,7 @@ using namespace cudf::io::parquet; */ struct metadata : public FileMetaData { explicit metadata(datasource* source); + void sanitize_schema(); }; class aggregate_reader_metadata { diff --git a/cpp/tests/io/parquet_test.cpp b/cpp/tests/io/parquet_test.cpp index 81e0e12eeb9..73c946a5feb 100644 --- a/cpp/tests/io/parquet_test.cpp +++ b/cpp/tests/io/parquet_test.cpp @@ -6732,4 +6732,82 @@ TEST_P(ParquetV2Test, CheckEncodings) } } +TEST_F(ParquetReaderTest, RepeatedNoAnnotations) +{ + constexpr unsigned char repeated_bytes[] = { + 0x50, 0x41, 0x52, 0x31, 0x15, 0x04, 0x15, 0x30, 0x15, 0x30, 0x4c, 0x15, 0x0c, 0x15, 0x00, 0x12, + 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x15, 0x00, 0x15, 0x0a, 0x15, 0x0a, + 0x2c, 0x15, 0x0c, 0x15, 0x10, 0x15, 0x06, 0x15, 0x06, 0x00, 0x00, 0x03, 0x03, 0x88, 0xc6, 0x02, + 0x26, 0x80, 0x01, 0x1c, 0x15, 0x02, 0x19, 0x25, 0x00, 0x10, 0x19, 0x18, 0x02, 0x69, 0x64, 0x15, + 0x00, 0x16, 0x0c, 0x16, 0x78, 0x16, 0x78, 0x26, 0x54, 0x26, 0x08, 0x00, 0x00, 0x15, 0x04, 0x15, + 0x40, 0x15, 0x40, 0x4c, 0x15, 0x08, 0x15, 0x00, 0x12, 0x00, 0x00, 0xe3, 0x0c, 0x23, 0x4b, 0x01, + 0x00, 0x00, 0x00, 0xc7, 0x35, 0x3a, 0x42, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x6b, 0x74, 0x84, 0x00, + 0x00, 0x00, 0x00, 0x55, 0xa1, 0xae, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x15, 0x00, 0x15, 0x22, 0x15, + 0x22, 0x2c, 0x15, 0x10, 0x15, 0x10, 0x15, 0x06, 0x15, 0x06, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, + 0x03, 0xc0, 0x03, 0x00, 0x00, 0x00, 0x03, 0x90, 0xaa, 0x02, 0x03, 0x94, 0x03, 0x26, 0xda, 0x02, + 0x1c, 0x15, 0x04, 0x19, 0x25, 0x00, 0x10, 0x19, 0x38, 0x0c, 0x70, 0x68, 0x6f, 0x6e, 0x65, 0x4e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x05, 0x70, 0x68, 0x6f, 0x6e, 0x65, 0x06, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x15, 0x00, 0x16, 0x10, 0x16, 0xa0, 0x01, 0x16, 0xa0, 0x01, 0x26, 0x96, 0x02, + 0x26, 0xba, 0x01, 0x00, 0x00, 0x15, 0x04, 0x15, 0x24, 0x15, 0x24, 0x4c, 0x15, 0x04, 0x15, 0x00, + 0x12, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x68, 0x6f, 0x6d, 0x65, 0x06, 0x00, 0x00, 0x00, 0x6d, + 0x6f, 0x62, 0x69, 0x6c, 0x65, 0x15, 0x00, 0x15, 0x20, 0x15, 0x20, 0x2c, 0x15, 0x10, 0x15, 0x10, + 0x15, 0x06, 0x15, 0x06, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, 0xc0, 0x03, 0x00, 0x00, 0x00, + 0x03, 0x90, 0xef, 0x01, 0x03, 0x04, 0x26, 0xcc, 0x04, 0x1c, 0x15, 0x0c, 0x19, 0x25, 0x00, 0x10, + 0x19, 0x38, 0x0c, 0x70, 0x68, 0x6f, 0x6e, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x05, + 0x70, 0x68, 0x6f, 0x6e, 0x65, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x15, 0x00, 0x16, 0x10, 0x16, 0x82, + 0x01, 0x16, 0x82, 0x01, 0x26, 0x8a, 0x04, 0x26, 0xca, 0x03, 0x00, 0x00, 0x15, 0x02, 0x19, 0x6c, + 0x48, 0x04, 0x75, 0x73, 0x65, 0x72, 0x15, 0x04, 0x00, 0x15, 0x02, 0x25, 0x00, 0x18, 0x02, 0x69, + 0x64, 0x00, 0x35, 0x02, 0x18, 0x0c, 0x70, 0x68, 0x6f, 0x6e, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x73, 0x15, 0x02, 0x00, 0x35, 0x04, 0x18, 0x05, 0x70, 0x68, 0x6f, 0x6e, 0x65, 0x15, 0x04, + 0x00, 0x15, 0x04, 0x25, 0x00, 0x18, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x00, 0x15, 0x0c, + 0x25, 0x02, 0x18, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x25, 0x00, 0x00, 0x16, 0x00, 0x19, 0x1c, 0x19, + 0x3c, 0x26, 0x80, 0x01, 0x1c, 0x15, 0x02, 0x19, 0x25, 0x00, 0x10, 0x19, 0x18, 0x02, 0x69, 0x64, + 0x15, 0x00, 0x16, 0x0c, 0x16, 0x78, 0x16, 0x78, 0x26, 0x54, 0x26, 0x08, 0x00, 0x00, 0x26, 0xda, + 0x02, 0x1c, 0x15, 0x04, 0x19, 0x25, 0x00, 0x10, 0x19, 0x38, 0x0c, 0x70, 0x68, 0x6f, 0x6e, 0x65, + 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x05, 0x70, 0x68, 0x6f, 0x6e, 0x65, 0x06, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x15, 0x00, 0x16, 0x10, 0x16, 0xa0, 0x01, 0x16, 0xa0, 0x01, 0x26, 0x96, + 0x02, 0x26, 0xba, 0x01, 0x00, 0x00, 0x26, 0xcc, 0x04, 0x1c, 0x15, 0x0c, 0x19, 0x25, 0x00, 0x10, + 0x19, 0x38, 0x0c, 0x70, 0x68, 0x6f, 0x6e, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x05, + 0x70, 0x68, 0x6f, 0x6e, 0x65, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x15, 0x00, 0x16, 0x10, 0x16, 0x82, + 0x01, 0x16, 0x82, 0x01, 0x26, 0x8a, 0x04, 0x26, 0xca, 0x03, 0x00, 0x00, 0x16, 0x9a, 0x03, 0x16, + 0x0c, 0x00, 0x28, 0x49, 0x70, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x2d, 0x72, 0x73, 0x20, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x30, 0x2e, 0x33, 0x2e, 0x30, 0x20, 0x28, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x20, 0x62, 0x34, 0x35, 0x63, 0x65, 0x37, 0x63, 0x62, 0x61, 0x32, 0x31, 0x39, + 0x39, 0x66, 0x32, 0x32, 0x64, 0x39, 0x33, 0x32, 0x36, 0x39, 0x63, 0x31, 0x35, 0x30, 0x64, 0x38, + 0x61, 0x38, 0x33, 0x39, 0x31, 0x36, 0x63, 0x36, 0x39, 0x62, 0x35, 0x65, 0x29, 0x00, 0x32, 0x01, + 0x00, 0x00, 0x50, 0x41, 0x52, 0x31}; + + auto read_opts = cudf::io::parquet_reader_options::builder( + cudf::io::source_info{reinterpret_cast(repeated_bytes), sizeof(repeated_bytes)}); + auto result = cudf::io::read_parquet(read_opts); + + EXPECT_EQ(result.tbl->view().column(0).size(), 6); + EXPECT_EQ(result.tbl->view().num_columns(), 2); + + column_wrapper col0{1, 2, 3, 4, 5, 6}; + column_wrapper child0{{5555555555l, 1111111111l, 1111111111l, 2222222222l, 3333333333l}}; + cudf::test::strings_column_wrapper child1{{"-", "home", "home", "-", "mobile"}, {0, 1, 1, 0, 1}}; + auto struct_col = cudf::test::structs_column_wrapper{{child0, child1}}; + + auto list_offsets_column = + cudf::test::fixed_width_column_wrapper{0, 0, 0, 0, 1, 2, 5}.release(); + auto num_list_rows = list_offsets_column->size() - 1; + + auto mask = cudf::create_null_mask(6, cudf::mask_state::ALL_VALID); + cudf::set_null_mask(static_cast(mask.data()), 0, 2, false); + + auto list_col = cudf::make_lists_column( + num_list_rows, std::move(list_offsets_column), struct_col.release(), 2, std::move(mask)); + + std::vector> struct_children; + struct_children.push_back(std::move(list_col)); + + auto outer_struct = + cudf::test::structs_column_wrapper{{std::move(struct_children)}, {0, 0, 1, 1, 1, 1}}; + table_view expected{{col0, outer_struct}}; + + CUDF_TEST_EXPECT_TABLES_EQUAL(result.tbl->view(), expected); +} + CUDF_TEST_PROGRAM_MAIN() From 96664ec7436033f59aa5b9740e6f54aec707e3cf Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Fri, 6 Oct 2023 15:09:11 -0700 Subject: [PATCH 133/150] Add pylibcudf.Scalar that interoperates with Arrow scalars (#14133) This PR adds a new Scalar object to pylibcudf that will function as the pylibcudf equivalent of cudf::scalar. Unlike columns, which are typically operated on in the form of views rather than owning types by libcudf, owning scalars are accepted by (const) ref by libcudf APIs and no corresponding view type exists. Therefore, pylibcudf.Scalar differs from pylibcudf.Column by actually owning an instance of the underlying libcudf type (cudf::scalar). Construction of pylibcudf Scalars is expected to be done from an Arrow scalar. This PR relies on #14124 and should not be merged until after that one. Authors: - Vyas Ramasubramani (https://github.com/vyasr) Approvers: - Lawrence Mitchell (https://github.com/wence-) URL: https://github.com/rapidsai/cudf/pull/14133 --- python/cudf/cudf/_lib/CMakeLists.txt | 8 +- python/cudf/cudf/_lib/datetime.pyx | 6 +- python/cudf/cudf/_lib/interop.pyx | 95 +------------ python/cudf/cudf/_lib/nvtext/CMakeLists.txt | 8 ++ .../cudf/cudf/_lib/pylibcudf/CMakeLists.txt | 25 +++- python/cudf/cudf/_lib/pylibcudf/__init__.pxd | 5 +- python/cudf/cudf/_lib/pylibcudf/__init__.py | 5 +- python/cudf/cudf/_lib/pylibcudf/interop.pxd | 9 ++ python/cudf/cudf/_lib/pylibcudf/interop.pyx | 23 +++ python/cudf/cudf/_lib/pylibcudf/scalar.pxd | 32 +++++ python/cudf/cudf/_lib/pylibcudf/scalar.pyx | 133 ++++++++++++++++++ python/cudf/cudf/_lib/pylibcudf/table.pxd | 3 + python/cudf/cudf/_lib/pylibcudf/table.pyx | 33 ++++- python/cudf/cudf/_lib/scalar.pxd | 13 +- python/cudf/cudf/_lib/scalar.pyx | 88 ++++++++---- python/cudf/cudf/_lib/strings/CMakeLists.txt | 10 +- .../cudf/_lib/strings/convert/CMakeLists.txt | 10 +- .../cudf/_lib/strings/split/CMakeLists.txt | 10 +- 18 files changed, 378 insertions(+), 138 deletions(-) create mode 100644 python/cudf/cudf/_lib/pylibcudf/interop.pxd create mode 100644 python/cudf/cudf/_lib/pylibcudf/interop.pyx create mode 100644 python/cudf/cudf/_lib/pylibcudf/scalar.pxd create mode 100644 python/cudf/cudf/_lib/pylibcudf/scalar.pyx diff --git a/python/cudf/cudf/_lib/CMakeLists.txt b/python/cudf/cudf/_lib/CMakeLists.txt index 947659c290a..1b543b94589 100644 --- a/python/cudf/cudf/_lib/CMakeLists.txt +++ b/python/cudf/cudf/_lib/CMakeLists.txt @@ -107,8 +107,12 @@ if(${PYARROW_RESULT}) message(FATAL_ERROR "Error while trying to obtain pyarrow include directory:\n${PYARROW_ERROR}") endif() -set(targets_using_arrow_headers interop avro csv orc json parquet) -foreach(target IN LISTS targets_using_arrow_headers) +# TODO: Due to cudf's scalar.pyx needing to cimport pylibcudf's scalar.pyx (because there are parts +# of cudf Cython that need to directly access the c_obj underlying the pylibcudf Scalar) the +# requirement for arrow headers infects all of cudf. That in turn requires including numpy headers. +# These requirements will go away once all scalar-related Cython code is removed from cudf. +foreach(target IN LISTS RAPIDS_CYTHON_CREATED_TARGETS) + target_include_directories(${target} PRIVATE "${NumPy_INCLUDE_DIRS}") target_include_directories(${target} PRIVATE "${PYARROW_INCLUDE_DIR}") endforeach() diff --git a/python/cudf/cudf/_lib/datetime.pyx b/python/cudf/cudf/_lib/datetime.pyx index 81949dbaa20..3d96f59c4d6 100644 --- a/python/cudf/cudf/_lib/datetime.pyx +++ b/python/cudf/cudf/_lib/datetime.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2022, NVIDIA CORPORATION. +# Copyright (c) 2020-2023, NVIDIA CORPORATION. from cudf.core.buffer import acquire_spill_lock @@ -10,6 +10,7 @@ from cudf._lib.column cimport Column from cudf._lib.cpp.column.column cimport column from cudf._lib.cpp.column.column_view cimport column_view from cudf._lib.cpp.filling cimport calendrical_month_sequence +from cudf._lib.cpp.scalar.scalar cimport scalar from cudf._lib.cpp.types cimport size_type from cudf._lib.scalar cimport DeviceScalar @@ -166,10 +167,11 @@ def date_range(DeviceScalar start, size_type n, offset): + offset.kwds.get("months", 0) ) + cdef const scalar* c_start = start.c_value.get() with nogil: c_result = move(calendrical_month_sequence( n, - start.c_value.get()[0], + c_start[0], months )) return Column.from_unique_ptr(move(c_result)) diff --git a/python/cudf/cudf/_lib/interop.pyx b/python/cudf/cudf/_lib/interop.pyx index 639754fc54f..8fd2a409d90 100644 --- a/python/cudf/cudf/_lib/interop.pyx +++ b/python/cudf/cudf/_lib/interop.pyx @@ -4,14 +4,7 @@ from cpython cimport pycapsule from libcpp.memory cimport shared_ptr, unique_ptr from libcpp.utility cimport move from libcpp.vector cimport vector -from pyarrow.lib cimport ( - CScalar, - CTable, - pyarrow_unwrap_scalar, - pyarrow_unwrap_table, - pyarrow_wrap_scalar, - pyarrow_wrap_table, -) +from pyarrow.lib cimport CTable, pyarrow_unwrap_table, pyarrow_wrap_table from cudf._lib.cpp.interop cimport ( DLManagedTensor, @@ -21,22 +14,12 @@ from cudf._lib.cpp.interop cimport ( to_arrow as cpp_to_arrow, to_dlpack as cpp_to_dlpack, ) -from cudf._lib.cpp.scalar.scalar cimport fixed_point_scalar, scalar from cudf._lib.cpp.table.table cimport table from cudf._lib.cpp.table.table_view cimport table_view -from cudf._lib.cpp.types cimport type_id -from cudf._lib.cpp.wrappers.decimals cimport ( - decimal32, - decimal64, - decimal128, - scale_type, -) -from cudf._lib.scalar cimport DeviceScalar from cudf._lib.utils cimport columns_from_unique_ptr, table_view_from_columns from cudf.api.types import is_list_dtype, is_struct_dtype from cudf.core.buffer import acquire_spill_lock -from cudf.core.dtypes import Decimal32Dtype, Decimal64Dtype def from_dlpack(dlpack_capsule): @@ -199,79 +182,3 @@ def from_arrow(object input_table): c_result = move(cpp_from_arrow(cpp_arrow_table.get()[0])) return columns_from_unique_ptr(move(c_result)) - - -@acquire_spill_lock() -def to_arrow_scalar(DeviceScalar source_scalar): - """Convert a scalar to a PyArrow scalar. - - Parameters - ---------- - source_scalar : the scalar to convert - - Returns - ------- - pyarrow.lib.Scalar - """ - cdef vector[column_metadata] cpp_metadata = gather_metadata( - [("", source_scalar.dtype)] - ) - cdef const scalar* source_scalar_ptr = source_scalar.get_raw_ptr() - - cdef shared_ptr[CScalar] cpp_arrow_scalar - with nogil: - cpp_arrow_scalar = cpp_to_arrow( - source_scalar_ptr[0], cpp_metadata[0] - ) - - return pyarrow_wrap_scalar(cpp_arrow_scalar) - - -@acquire_spill_lock() -def from_arrow_scalar(object input_scalar, output_dtype=None): - """Convert from PyArrow scalar to a cudf scalar. - - Parameters - ---------- - input_scalar : PyArrow scalar - output_dtype : output type to cast to, ignored except for decimals - - Returns - ------- - cudf._lib.DeviceScalar - """ - cdef shared_ptr[CScalar] cpp_arrow_scalar = ( - pyarrow_unwrap_scalar(input_scalar) - ) - cdef unique_ptr[scalar] c_result - - with nogil: - c_result = move(cpp_from_arrow(cpp_arrow_scalar.get()[0])) - - cdef type_id ctype = c_result.get().type().id() - if ctype == type_id.DECIMAL128: - if output_dtype is None: - # Decimals must be cast to the cudf dtype of the right width - raise ValueError( - "Decimal scalars must be constructed with a dtype" - ) - - if isinstance(output_dtype, Decimal32Dtype): - c_result.reset( - new fixed_point_scalar[decimal32]( - ( c_result.get()).value(), - scale_type(-input_scalar.type.scale), - c_result.get().is_valid() - ) - ) - elif isinstance(output_dtype, Decimal64Dtype): - c_result.reset( - new fixed_point_scalar[decimal64]( - ( c_result.get()).value(), - scale_type(-input_scalar.type.scale), - c_result.get().is_valid() - ) - ) - # Decimal128Dtype is a no-op, no conversion needed. - - return DeviceScalar.from_unique_ptr(move(c_result), output_dtype) diff --git a/python/cudf/cudf/_lib/nvtext/CMakeLists.txt b/python/cudf/cudf/_lib/nvtext/CMakeLists.txt index 515b9c1d6e4..d4e2392ee04 100644 --- a/python/cudf/cudf/_lib/nvtext/CMakeLists.txt +++ b/python/cudf/cudf/_lib/nvtext/CMakeLists.txt @@ -22,3 +22,11 @@ rapids_cython_create_modules( SOURCE_FILES "${cython_sources}" LINKED_LIBRARIES "${linked_libraries}" MODULE_PREFIX nvtext_ ASSOCIATED_TARGETS cudf ) +# TODO: Due to cudf's scalar.pyx needing to cimport pylibcudf's scalar.pyx (because there are parts +# of cudf Cython that need to directly access the c_obj underlying the pylibcudf Scalar) the +# requirement for arrow headers infects all of cudf. That in turn requires including numpy headers. +# These requirements will go away once all scalar-related Cython code is removed from cudf. +foreach(target IN LISTS RAPIDS_CYTHON_CREATED_TARGETS) + target_include_directories(${target} PRIVATE "${NumPy_INCLUDE_DIRS}") + target_include_directories(${target} PRIVATE "${PYARROW_INCLUDE_DIR}") +endforeach() diff --git a/python/cudf/cudf/_lib/pylibcudf/CMakeLists.txt b/python/cudf/cudf/_lib/pylibcudf/CMakeLists.txt index 0ce42dc43ff..5185b2d4bb5 100644 --- a/python/cudf/cudf/_lib/pylibcudf/CMakeLists.txt +++ b/python/cudf/cudf/_lib/pylibcudf/CMakeLists.txt @@ -12,10 +12,33 @@ # the License. # ============================================================================= -set(cython_sources column.pyx copying.pyx gpumemoryview.pyx table.pyx types.pyx utils.pyx) +set(cython_sources column.pyx copying.pyx gpumemoryview.pyx interop.pyx scalar.pyx table.pyx + types.pyx utils.pyx +) set(linked_libraries cudf::cudf) rapids_cython_create_modules( CXX SOURCE_FILES "${cython_sources}" LINKED_LIBRARIES "${linked_libraries}" MODULE_PREFIX pylibcudf_ ASSOCIATED_TARGETS cudf ) + +find_package(Python 3.9 REQUIRED COMPONENTS Interpreter) + +execute_process( + COMMAND "${Python_EXECUTABLE}" -c "import pyarrow; print(pyarrow.get_include())" + OUTPUT_VARIABLE PYARROW_INCLUDE_DIR + OUTPUT_STRIP_TRAILING_WHITESPACE +) + +foreach(target IN LISTS RAPIDS_CYTHON_CREATED_TARGETS) + target_include_directories(${target} PRIVATE "${PYARROW_INCLUDE_DIR}") +endforeach() + +# TODO: Clean up this include when switching to scikit-build-core. See cudf/_lib/CMakeLists.txt for +# more info +find_package(NumPy REQUIRED) +foreach(target IN LISTS RAPIDS_CYTHON_CREATED_TARGETS) + target_include_directories(${target} PRIVATE "${NumPy_INCLUDE_DIRS}") + # Switch to the line below when we switch back to FindPython.cmake in CMake 3.24. + # target_include_directories(${target} PRIVATE "${Python_NumPy_INCLUDE_DIRS}") +endforeach() diff --git a/python/cudf/cudf/_lib/pylibcudf/__init__.pxd b/python/cudf/cudf/_lib/pylibcudf/__init__.pxd index ba7822b0a54..7a35854392c 100644 --- a/python/cudf/cudf/_lib/pylibcudf/__init__.pxd +++ b/python/cudf/cudf/_lib/pylibcudf/__init__.pxd @@ -1,9 +1,10 @@ # Copyright (c) 2023, NVIDIA CORPORATION. # TODO: Verify consistent usage of relative/absolute imports in pylibcudf. -from . cimport copying +from . cimport copying, interop from .column cimport Column from .gpumemoryview cimport gpumemoryview +from .scalar cimport Scalar from .table cimport Table # TODO: cimport type_id once # https://github.com/cython/cython/issues/5609 is resolved @@ -12,7 +13,9 @@ from .types cimport DataType __all__ = [ "Column", "DataType", + "Scalar", "Table", "copying", "gpumemoryview", + "interop", ] diff --git a/python/cudf/cudf/_lib/pylibcudf/__init__.py b/python/cudf/cudf/_lib/pylibcudf/__init__.py index 3edff9a53e8..72b74a57b87 100644 --- a/python/cudf/cudf/_lib/pylibcudf/__init__.py +++ b/python/cudf/cudf/_lib/pylibcudf/__init__.py @@ -1,16 +1,19 @@ # Copyright (c) 2023, NVIDIA CORPORATION. -from . import copying +from . import copying, interop from .column import Column from .gpumemoryview import gpumemoryview +from .scalar import Scalar from .table import Table from .types import DataType, TypeId __all__ = [ "Column", "DataType", + "Scalar", "Table", "TypeId", "copying", "gpumemoryview", + "interop", ] diff --git a/python/cudf/cudf/_lib/pylibcudf/interop.pxd b/python/cudf/cudf/_lib/pylibcudf/interop.pxd new file mode 100644 index 00000000000..3a79e5425d4 --- /dev/null +++ b/python/cudf/cudf/_lib/pylibcudf/interop.pxd @@ -0,0 +1,9 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. + +from cudf._lib.cpp.interop cimport column_metadata + + +cdef class ColumnMetadata: + cdef public object name + cdef public object children_meta + cdef column_metadata to_libcudf(self) diff --git a/python/cudf/cudf/_lib/pylibcudf/interop.pyx b/python/cudf/cudf/_lib/pylibcudf/interop.pyx new file mode 100644 index 00000000000..0cdca275027 --- /dev/null +++ b/python/cudf/cudf/_lib/pylibcudf/interop.pyx @@ -0,0 +1,23 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. + +from cudf._lib.cpp.interop cimport column_metadata + + +cdef class ColumnMetadata: + def __init__(self, name): + self.name = name + self.children_meta = [] + + cdef column_metadata to_libcudf(self): + """Convert to C++ column_metadata. + + Since this class is mutable and cheap, it is easier to create the C++ + object on the fly rather than have it directly backing the storage for + the Cython class. + """ + cdef column_metadata c_metadata + cdef ColumnMetadata child_meta + c_metadata.name = self.name.encode() + for child_meta in self.children_meta: + c_metadata.children_meta.push_back(child_meta.to_libcudf()) + return c_metadata diff --git a/python/cudf/cudf/_lib/pylibcudf/scalar.pxd b/python/cudf/cudf/_lib/pylibcudf/scalar.pxd new file mode 100644 index 00000000000..09d853d832f --- /dev/null +++ b/python/cudf/cudf/_lib/pylibcudf/scalar.pxd @@ -0,0 +1,32 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. + +from libcpp cimport bool +from libcpp.memory cimport unique_ptr +from pyarrow cimport lib as pa + +from rmm._lib.memory_resource cimport DeviceMemoryResource + +from cudf._lib.cpp.scalar.scalar cimport scalar + +from .interop cimport ColumnMetadata +from .types cimport DataType + + +cdef class Scalar: + cdef unique_ptr[scalar] c_obj + cdef DataType _data_type + + # Holds a reference to the DeviceMemoryResource used for allocation. + # Ensures the MR does not get destroyed before this DeviceBuffer. `mr` is + # needed for deallocation + cdef DeviceMemoryResource mr + + cdef const scalar* get(self) except * + + cpdef DataType type(self) + cpdef bool is_valid(self) + + @staticmethod + cdef Scalar from_libcudf(unique_ptr[scalar] libcudf_scalar, dtype=*) + + cpdef pa.Scalar to_arrow(self, ColumnMetadata metadata) diff --git a/python/cudf/cudf/_lib/pylibcudf/scalar.pyx b/python/cudf/cudf/_lib/pylibcudf/scalar.pyx new file mode 100644 index 00000000000..04f588bd3e6 --- /dev/null +++ b/python/cudf/cudf/_lib/pylibcudf/scalar.pyx @@ -0,0 +1,133 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. + +from cython cimport no_gc_clear +from cython.operator cimport dereference +from libcpp.memory cimport shared_ptr, unique_ptr +from libcpp.utility cimport move +from pyarrow cimport lib as pa + +from rmm._lib.memory_resource cimport get_current_device_resource + +from cudf._lib.cpp.interop cimport ( + column_metadata, + from_arrow as cpp_from_arrow, + to_arrow as cpp_to_arrow, +) +from cudf._lib.cpp.scalar.scalar cimport fixed_point_scalar, scalar +from cudf._lib.cpp.wrappers.decimals cimport ( + decimal32, + decimal64, + decimal128, + scale_type, +) + +from .interop cimport ColumnMetadata +from .types cimport DataType, type_id + + +# The DeviceMemoryResource attribute could be released prematurely +# by the gc if the Scalar is in a reference cycle. Removing the tp_clear +# function with the no_gc_clear decoration prevents that. See +# https://github.com/rapidsai/rmm/pull/931 for details. +@no_gc_clear +cdef class Scalar: + """A scalar value in device memory.""" + # Unlike for columns, libcudf does not support scalar views. All APIs that + # accept scalar values accept references to the owning object rather than a + # special view type. As a result, pylibcudf.Scalar has a simpler structure + # than pylibcudf.Column because it can be a true wrapper around a libcudf + # column + + def __cinit__(self, *args, **kwargs): + self.mr = get_current_device_resource() + + def __init__(self, pa.Scalar value=None): + # TODO: This case is not something we really want to + # support, but it here for now to ease the transition of + # DeviceScalar. + if value is not None: + raise ValueError("Scalar should be constructed with a factory") + + @staticmethod + def from_arrow(pa.Scalar value, DataType data_type=None): + # Allow passing a dtype, but only for the purpose of decimals for now + + cdef shared_ptr[pa.CScalar] cscalar = ( + pa.pyarrow_unwrap_scalar(value) + ) + cdef unique_ptr[scalar] c_result + + with nogil: + c_result = move(cpp_from_arrow(cscalar.get()[0])) + + cdef Scalar s = Scalar.from_libcudf(move(c_result)) + + if s.type().id() != type_id.DECIMAL128: + if data_type is not None: + raise ValueError( + "dtype may not be passed for non-decimal types" + ) + return s + + if data_type is None: + raise ValueError( + "Decimal scalars must be constructed with a dtype" + ) + + cdef type_id tid = data_type.id() + + if tid == type_id.DECIMAL32: + s.c_obj.reset( + new fixed_point_scalar[decimal32]( + ( s.c_obj.get()).value(), + scale_type(-value.type.scale), + s.c_obj.get().is_valid() + ) + ) + elif tid == type_id.DECIMAL64: + s.c_obj.reset( + new fixed_point_scalar[decimal64]( + ( s.c_obj.get()).value(), + scale_type(-value.type.scale), + s.c_obj.get().is_valid() + ) + ) + elif tid != type_id.DECIMAL128: + raise ValueError( + "Decimal scalars may only be cast to decimals" + ) + + return s + + cpdef pa.Scalar to_arrow(self, ColumnMetadata metadata): + cdef shared_ptr[pa.CScalar] c_result + cdef column_metadata c_metadata = metadata.to_libcudf() + + with nogil: + c_result = move(cpp_to_arrow(dereference(self.c_obj.get()), c_metadata)) + + return pa.pyarrow_wrap_scalar(c_result) + + cdef const scalar* get(self) except *: + return self.c_obj.get() + + cpdef DataType type(self): + """The type of data in the column.""" + return self._data_type + + cpdef bool is_valid(self): + """True if the scalar is valid, false if not""" + return self.get().is_valid() + + @staticmethod + cdef Scalar from_libcudf(unique_ptr[scalar] libcudf_scalar, dtype=None): + """Construct a Scalar object from a libcudf scalar. + + This method is for pylibcudf's functions to use to ingest outputs of + calling libcudf algorithms, and should generally not be needed by users + (even direct pylibcudf Cython users). + """ + cdef Scalar s = Scalar.__new__(Scalar) + s.c_obj.swap(libcudf_scalar) + s._data_type = DataType.from_libcudf(s.get().type()) + return s diff --git a/python/cudf/cudf/_lib/pylibcudf/table.pxd b/python/cudf/cudf/_lib/pylibcudf/table.pxd index 95f197b13eb..a9e2874232a 100644 --- a/python/cudf/cudf/_lib/pylibcudf/table.pxd +++ b/python/cudf/cudf/_lib/pylibcudf/table.pxd @@ -1,6 +1,7 @@ # Copyright (c) 2023, NVIDIA CORPORATION. from libcpp.memory cimport unique_ptr +from pyarrow cimport lib as pa from cudf._lib.cpp.table.table cimport table from cudf._lib.cpp.table.table_view cimport table_view @@ -16,3 +17,5 @@ cdef class Table: cdef Table from_libcudf(unique_ptr[table] libcudf_tbl) cpdef list columns(self) + + cpdef pa.Table to_arrow(self, list metadata) diff --git a/python/cudf/cudf/_lib/pylibcudf/table.pyx b/python/cudf/cudf/_lib/pylibcudf/table.pyx index 720f9815bd6..c41eb82e4a1 100644 --- a/python/cudf/cudf/_lib/pylibcudf/table.pyx +++ b/python/cudf/cudf/_lib/pylibcudf/table.pyx @@ -1,15 +1,22 @@ # Copyright (c) 2023, NVIDIA CORPORATION. from cython.operator cimport dereference -from libcpp.memory cimport unique_ptr +from libcpp.memory cimport shared_ptr, unique_ptr from libcpp.utility cimport move from libcpp.vector cimport vector +from pyarrow cimport lib as pa from cudf._lib.cpp.column.column cimport column from cudf._lib.cpp.column.column_view cimport column_view +from cudf._lib.cpp.interop cimport ( + column_metadata, + from_arrow as cpp_from_arrow, + to_arrow as cpp_to_arrow, +) from cudf._lib.cpp.table.table cimport table from .column cimport Column +from .interop cimport ColumnMetadata cdef class Table: @@ -60,3 +67,27 @@ cdef class Table: cpdef list columns(self): return self._columns + + @staticmethod + def from_arrow(pa.Table pyarrow_table): + cdef shared_ptr[pa.CTable] ctable = ( + pa.pyarrow_unwrap_table(pyarrow_table) + ) + cdef unique_ptr[table] c_result + + with nogil: + c_result = move(cpp_from_arrow(ctable.get()[0])) + + return Table.from_libcudf(move(c_result)) + + cpdef pa.Table to_arrow(self, list metadata): + cdef shared_ptr[pa.CTable] c_result + cdef vector[column_metadata] c_metadata + cdef ColumnMetadata meta + for meta in metadata: + c_metadata.push_back(meta.to_libcudf()) + + with nogil: + c_result = move(cpp_to_arrow(self.view(), c_metadata)) + + return pa.pyarrow_wrap_table(c_result) diff --git a/python/cudf/cudf/_lib/scalar.pxd b/python/cudf/cudf/_lib/scalar.pxd index 1deed60d67d..77733f59c3d 100644 --- a/python/cudf/cudf/_lib/scalar.pxd +++ b/python/cudf/cudf/_lib/scalar.pxd @@ -1,20 +1,19 @@ -# Copyright (c) 2020-2022, NVIDIA CORPORATION. +# Copyright (c) 2020-2023, NVIDIA CORPORATION. from libcpp cimport bool from libcpp.memory cimport unique_ptr from rmm._lib.memory_resource cimport DeviceMemoryResource +# TODO: Would like to remove this cimport, but it will require some more work +# to excise all C code in scalar.pyx that relies on using the C API of the +# pylibcudf Scalar underlying the DeviceScalar. +from cudf._lib cimport pylibcudf from cudf._lib.cpp.scalar.scalar cimport scalar cdef class DeviceScalar: - cdef unique_ptr[scalar] c_value - - # Holds a reference to the DeviceMemoryResource used for allocation. - # Ensures the MR does not get destroyed before this DeviceBuffer. `mr` is - # needed for deallocation - cdef DeviceMemoryResource mr + cdef pylibcudf.Scalar c_value cdef object _dtype diff --git a/python/cudf/cudf/_lib/scalar.pyx b/python/cudf/cudf/_lib/scalar.pyx index 5ab286c5701..0b64c75f7b6 100644 --- a/python/cudf/cudf/_lib/scalar.pyx +++ b/python/cudf/cudf/_lib/scalar.pyx @@ -1,7 +1,5 @@ # Copyright (c) 2020-2023, NVIDIA CORPORATION. -cimport cython - import copy import numpy as np @@ -13,17 +11,17 @@ from libcpp cimport bool from libcpp.memory cimport unique_ptr from libcpp.utility cimport move -from rmm._lib.memory_resource cimport get_current_device_resource - import cudf +from cudf._lib import pylibcudf from cudf._lib.types import LIBCUDF_TO_SUPPORTED_NUMPY_TYPES -from cudf.core.dtypes import ListDtype, StructDtype +from cudf.core.dtypes import ( + ListDtype, + StructDtype, + is_list_dtype, + is_struct_dtype, +) from cudf.core.missing import NA, NaT -from cudf._lib.types cimport dtype_from_column_view, underlying_type_t_type_id - -from cudf._lib.interop import from_arrow_scalar, to_arrow_scalar - cimport cudf._lib.cpp.types as libcudf_types from cudf._lib.cpp.scalar.scalar cimport ( duration_scalar, @@ -44,6 +42,7 @@ from cudf._lib.cpp.wrappers.timestamps cimport ( timestamp_s, timestamp_us, ) +from cudf._lib.types cimport dtype_from_column_view, underlying_type_t_type_id def _replace_nested(obj, check, replacement): @@ -61,15 +60,44 @@ def _replace_nested(obj, check, replacement): _replace_nested(v, check, replacement) -# The DeviceMemoryResource attribute could be released prematurely -# by the gc if the DeviceScalar is in a reference cycle. Removing -# the tp_clear function with the no_gc_clear decoration prevents that. -# See https://github.com/rapidsai/rmm/pull/931 for details. -@cython.no_gc_clear +def gather_metadata(dtypes): + """Convert a dict of dtypes to a list of ColumnMetadata objects. + + The metadata is constructed recursively so that nested types are + represented as nested ColumnMetadata objects. + + Parameters + ---------- + dtypes : dict + A dict mapping column names to dtypes. + + Returns + ------- + List[ColumnMetadata] + A list of ColumnMetadata objects. + """ + out = [] + for name, dtype in dtypes.items(): + v = pylibcudf.interop.ColumnMetadata(name) + if is_struct_dtype(dtype): + v.children_meta = gather_metadata(dtype.fields) + elif is_list_dtype(dtype): + # Offsets column is unnamed and has no children + v.children_meta.append(pylibcudf.interop.ColumnMetadata("")) + v.children_meta.extend( + gather_metadata({"": dtype.element_type}) + ) + out.append(v) + return out + + cdef class DeviceScalar: + # TODO: I think this should be removable, except that currently the way + # that from_unique_ptr is implemented is probably dereferencing this in an + # invalid state. See what the best way to fix that is. def __cinit__(self, *args, **kwargs): - self.mr = get_current_device_resource() + self.c_value = pylibcudf.Scalar() def __init__(self, value, dtype): """ @@ -85,7 +113,7 @@ cdef class DeviceScalar: dtype : dtype A NumPy dtype. """ - self._dtype = dtype if dtype.kind != 'U' else cudf.dtype('object') + dtype = dtype if dtype.kind != 'U' else cudf.dtype('object') if cudf.utils.utils.is_na_like(value): value = None @@ -108,10 +136,17 @@ cdef class DeviceScalar: pa_scalar = pa.scalar(value, type=pa_type) - # Note: This factory-like behavior in __init__ will be removed when - # migrating to pylibcudf. - cdef DeviceScalar obj = from_arrow_scalar(pa_scalar, self._dtype) - self.c_value.swap(obj.c_value) + data_type = None + if isinstance(dtype, cudf.core.dtypes.DecimalDtype): + tid = pylibcudf.TypeId.DECIMAL128 + if isinstance(dtype, cudf.core.dtypes.Decimal32Dtype): + tid = pylibcudf.TypeId.DECIMAL32 + elif isinstance(dtype, cudf.core.dtypes.Decimal64Dtype): + tid = pylibcudf.TypeId.DECIMAL64 + data_type = pylibcudf.DataType(tid, -dtype.scale) + + self.c_value = pylibcudf.Scalar.from_arrow(pa_scalar, data_type) + self._dtype = dtype def _to_host_scalar(self): is_datetime = self.dtype.kind == "M" @@ -119,7 +154,8 @@ cdef class DeviceScalar: null_type = NaT if is_datetime or is_timedelta else NA - ps = to_arrow_scalar(self) + metadata = gather_metadata({"": self.dtype})[0] + ps = self.c_value.to_arrow(metadata) if not ps.is_valid: return null_type @@ -158,13 +194,13 @@ cdef class DeviceScalar: return self._to_host_scalar() cdef const scalar* get_raw_ptr(self) except *: - return self.c_value.get() + return self.c_value.c_obj.get() cpdef bool is_valid(self): """ Returns if the Scalar is valid or not(i.e., ). """ - return self.get_raw_ptr()[0].is_valid() + return self.c_value.is_valid() def __repr__(self): if cudf.utils.utils.is_na_like(self.value): @@ -183,7 +219,7 @@ cdef class DeviceScalar: cdef DeviceScalar s = DeviceScalar.__new__(DeviceScalar) cdef libcudf_types.data_type cdtype - s.c_value = move(ptr) + s.c_value = pylibcudf.Scalar.from_libcudf(move(ptr)) cdtype = s.get_raw_ptr()[0].type() if dtype is not None: @@ -310,9 +346,9 @@ def _create_proxy_nat_scalar(dtype): if dtype.char in 'mM': nat = dtype.type('NaT').astype(dtype) if dtype.type == np.datetime64: - _set_datetime64_from_np_scalar(result.c_value, nat, dtype, True) + _set_datetime64_from_np_scalar(result.c_value.c_obj, nat, dtype, True) elif dtype.type == np.timedelta64: - _set_timedelta64_from_np_scalar(result.c_value, nat, dtype, True) + _set_timedelta64_from_np_scalar(result.c_value.c_obj, nat, dtype, True) return result else: raise TypeError('NAT only valid for datetime and timedelta') diff --git a/python/cudf/cudf/_lib/strings/CMakeLists.txt b/python/cudf/cudf/_lib/strings/CMakeLists.txt index a5e87a456cb..fc11f047ab4 100644 --- a/python/cudf/cudf/_lib/strings/CMakeLists.txt +++ b/python/cudf/cudf/_lib/strings/CMakeLists.txt @@ -1,5 +1,5 @@ # ============================================================================= -# Copyright (c) 2022, NVIDIA CORPORATION. +# Copyright (c) 2022-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at @@ -40,6 +40,14 @@ rapids_cython_create_modules( SOURCE_FILES "${cython_sources}" LINKED_LIBRARIES "${linked_libraries}" MODULE_PREFIX strings_ ASSOCIATED_TARGETS cudf ) +# TODO: Due to cudf's scalar.pyx needing to cimport pylibcudf's scalar.pyx (because there are parts +# of cudf Cython that need to directly access the c_obj underlying the pylibcudf Scalar) the +# requirement for arrow headers infects all of cudf. That requirement will go away once all +# scalar-related Cython code is removed from cudf. +foreach(target IN LISTS RAPIDS_CYTHON_CREATED_TARGETS) + target_include_directories(${target} PRIVATE "${NumPy_INCLUDE_DIRS}") + target_include_directories(${target} PRIVATE "${PYARROW_INCLUDE_DIR}") +endforeach() add_subdirectory(convert) add_subdirectory(split) diff --git a/python/cudf/cudf/_lib/strings/convert/CMakeLists.txt b/python/cudf/cudf/_lib/strings/convert/CMakeLists.txt index 434f79d3b5f..f55bb1fb780 100644 --- a/python/cudf/cudf/_lib/strings/convert/CMakeLists.txt +++ b/python/cudf/cudf/_lib/strings/convert/CMakeLists.txt @@ -1,5 +1,5 @@ # ============================================================================= -# Copyright (c) 2022, NVIDIA CORPORATION. +# Copyright (c) 2022-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at @@ -22,3 +22,11 @@ rapids_cython_create_modules( SOURCE_FILES "${cython_sources}" LINKED_LIBRARIES "${linked_libraries}" MODULE_PREFIX strings_ ASSOCIATED_TARGETS cudf ) +# TODO: Due to cudf's scalar.pyx needing to cimport pylibcudf's scalar.pyx (because there are parts +# of cudf Cython that need to directly access the c_obj underlying the pylibcudf Scalar) the +# requirement for arrow headers infects all of cudf. That requirement will go away once all +# scalar-related Cython code is removed from cudf. +foreach(target IN LISTS RAPIDS_CYTHON_CREATED_TARGETS) + target_include_directories(${target} PRIVATE "${NumPy_INCLUDE_DIRS}") + target_include_directories(${target} PRIVATE "${PYARROW_INCLUDE_DIR}") +endforeach() diff --git a/python/cudf/cudf/_lib/strings/split/CMakeLists.txt b/python/cudf/cudf/_lib/strings/split/CMakeLists.txt index 59a22c06e85..2f2063482af 100644 --- a/python/cudf/cudf/_lib/strings/split/CMakeLists.txt +++ b/python/cudf/cudf/_lib/strings/split/CMakeLists.txt @@ -1,5 +1,5 @@ # ============================================================================= -# Copyright (c) 2022, NVIDIA CORPORATION. +# Copyright (c) 2022-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at @@ -20,3 +20,11 @@ rapids_cython_create_modules( SOURCE_FILES "${cython_sources}" LINKED_LIBRARIES "${linked_libraries}" MODULE_PREFIX strings_ ASSOCIATED_TARGETS cudf ) +# TODO: Due to cudf's scalar.pyx needing to cimport pylibcudf's scalar.pyx (because there are parts +# of cudf Cython that need to directly access the c_obj underlying the pylibcudf Scalar) the +# requirement for arrow headers infects all of cudf. That requirement will go away once all +# scalar-related Cython code is removed from cudf. +foreach(target IN LISTS RAPIDS_CYTHON_CREATED_TARGETS) + target_include_directories(${target} PRIVATE "${NumPy_INCLUDE_DIRS}") + target_include_directories(${target} PRIVATE "${PYARROW_INCLUDE_DIR}") +endforeach() From e28017cc17d2feb050d2effd4ebafb84600fd607 Mon Sep 17 00:00:00 2001 From: nvdbaranec <56695930+nvdbaranec@users.noreply.github.com> Date: Mon, 9 Oct 2023 10:05:12 -0500 Subject: [PATCH 134/150] Cleanup of namespaces in parquet code. (#14259) Cleans up several issues in the parquet code: - We were using the namespace `cudf::io::detail::parquet`, when `cudf::io::parquet::detail` makes more sense. - Converts the `cudf::io::parquet::gpu` namespace to also just use `cudf::io::parquet::detail` - Several detail-style headers and source files were using `cudf::io::parquet` when they should probably have been in the detail namespace. Authors: - https://github.com/nvdbaranec Approvers: - Bradley Dice (https://github.com/bdice) - Yunsong Wang (https://github.com/PointKernel) - Vukasin Milovanovic (https://github.com/vuule) URL: https://github.com/rapidsai/cudf/pull/14259 --- cpp/include/cudf/io/detail/parquet.hpp | 8 +- cpp/include/cudf/io/parquet.hpp | 4 +- cpp/src/io/functions.cpp | 4 +- cpp/src/io/parquet/chunk_dict.cu | 19 +- .../io/parquet/compact_protocol_reader.cpp | 8 +- .../io/parquet/compact_protocol_reader.hpp | 9 +- .../io/parquet/compact_protocol_writer.cpp | 8 +- .../io/parquet/compact_protocol_writer.hpp | 8 +- cpp/src/io/parquet/decode_preprocess.cu | 10 +- cpp/src/io/parquet/delta_binary.cuh | 4 +- cpp/src/io/parquet/page_data.cu | 12 +- cpp/src/io/parquet/page_decode.cuh | 4 +- cpp/src/io/parquet/page_delta_decode.cu | 6 +- cpp/src/io/parquet/page_enc.cu | 22 +- cpp/src/io/parquet/page_hdr.cu | 14 +- cpp/src/io/parquet/page_string_decode.cu | 14 +- cpp/src/io/parquet/page_string_utils.cuh | 4 +- cpp/src/io/parquet/parquet.hpp | 9 +- cpp/src/io/parquet/parquet_common.hpp | 9 +- cpp/src/io/parquet/parquet_gpu.cuh | 4 +- cpp/src/io/parquet/parquet_gpu.hpp | 27 +- cpp/src/io/parquet/predicate_pushdown.cpp | 14 +- cpp/src/io/parquet/reader.cpp | 4 +- cpp/src/io/parquet/reader_impl.cpp | 36 +-- cpp/src/io/parquet/reader_impl.hpp | 12 +- cpp/src/io/parquet/reader_impl_helpers.cpp | 121 ++++---- cpp/src/io/parquet/reader_impl_helpers.hpp | 21 +- cpp/src/io/parquet/reader_impl_preprocess.cu | 259 +++++++++--------- cpp/src/io/parquet/rle_stream.cuh | 4 +- cpp/src/io/parquet/writer_impl.cu | 223 ++++++++------- cpp/src/io/parquet/writer_impl.hpp | 28 +- cpp/tests/io/parquet_test.cpp | 207 +++++++------- 32 files changed, 531 insertions(+), 605 deletions(-) diff --git a/cpp/include/cudf/io/detail/parquet.hpp b/cpp/include/cudf/io/detail/parquet.hpp index 074f690d2c7..0b8ee9676de 100644 --- a/cpp/include/cudf/io/detail/parquet.hpp +++ b/cpp/include/cudf/io/detail/parquet.hpp @@ -38,7 +38,7 @@ class parquet_reader_options; class parquet_writer_options; class chunked_parquet_writer_options; -namespace detail::parquet { +namespace parquet::detail { /** * @brief Class to read Parquet dataset data into columns. @@ -186,7 +186,7 @@ class writer { */ explicit writer(std::vector> sinks, parquet_writer_options const& options, - single_write_mode mode, + cudf::io::detail::single_write_mode mode, rmm::cuda_stream_view stream); /** @@ -201,7 +201,7 @@ class writer { */ explicit writer(std::vector> sinks, chunked_parquet_writer_options const& options, - single_write_mode mode, + cudf::io::detail::single_write_mode mode, rmm::cuda_stream_view stream); /** @@ -250,5 +250,5 @@ class writer { * metadata. */ parquet_metadata read_parquet_metadata(host_span const> sources); -} // namespace detail::parquet +} // namespace parquet::detail } // namespace cudf::io diff --git a/cpp/include/cudf/io/parquet.hpp b/cpp/include/cudf/io/parquet.hpp index deaf23d405a..6283099e700 100644 --- a/cpp/include/cudf/io/parquet.hpp +++ b/cpp/include/cudf/io/parquet.hpp @@ -499,7 +499,7 @@ class chunked_parquet_reader { [[nodiscard]] table_with_metadata read_chunk() const; private: - std::unique_ptr reader; + std::unique_ptr reader; }; /** @} */ // end of group @@ -1750,7 +1750,7 @@ class parquet_chunked_writer { std::vector const& column_chunks_file_paths = {}); /// Unique pointer to impl writer class - std::unique_ptr writer; + std::unique_ptr writer; }; /** @} */ // end of group diff --git a/cpp/src/io/functions.cpp b/cpp/src/io/functions.cpp index 392a7850886..726442d752e 100644 --- a/cpp/src/io/functions.cpp +++ b/cpp/src/io/functions.cpp @@ -470,8 +470,8 @@ void orc_chunked_writer::close() writer->close(); } -using namespace cudf::io::detail::parquet; -namespace detail_parquet = cudf::io::detail::parquet; +using namespace cudf::io::parquet::detail; +namespace detail_parquet = cudf::io::parquet::detail; table_with_metadata read_parquet(parquet_reader_options const& options, rmm::mr::device_memory_resource* mr) diff --git a/cpp/src/io/parquet/chunk_dict.cu b/cpp/src/io/parquet/chunk_dict.cu index 9ff1869edde..53ff31ab0a7 100644 --- a/cpp/src/io/parquet/chunk_dict.cu +++ b/cpp/src/io/parquet/chunk_dict.cu @@ -24,10 +24,8 @@ #include -namespace cudf { -namespace io { -namespace parquet { -namespace gpu { +namespace cudf::io::parquet::detail { + namespace { constexpr int DEFAULT_BLOCK_SIZE = 256; } @@ -101,7 +99,7 @@ struct map_find_fn { template __global__ void __launch_bounds__(block_size) - populate_chunk_hash_maps_kernel(cudf::detail::device_2dspan frags) + populate_chunk_hash_maps_kernel(cudf::detail::device_2dspan frags) { auto col_idx = blockIdx.y; auto block_x = blockIdx.x; @@ -226,7 +224,7 @@ __global__ void __launch_bounds__(block_size) template __global__ void __launch_bounds__(block_size) - get_dictionary_indices_kernel(cudf::detail::device_2dspan frags) + get_dictionary_indices_kernel(cudf::detail::device_2dspan frags) { auto col_idx = blockIdx.y; auto block_x = blockIdx.x; @@ -276,7 +274,7 @@ void initialize_chunk_hash_maps(device_span chunks, rmm::cuda_st <<>>(chunks); } -void populate_chunk_hash_maps(cudf::detail::device_2dspan frags, +void populate_chunk_hash_maps(cudf::detail::device_2dspan frags, rmm::cuda_stream_view stream) { dim3 const dim_grid(frags.size().second, frags.size().first); @@ -290,14 +288,11 @@ void collect_map_entries(device_span chunks, rmm::cuda_stream_vi collect_map_entries_kernel<<>>(chunks); } -void get_dictionary_indices(cudf::detail::device_2dspan frags, +void get_dictionary_indices(cudf::detail::device_2dspan frags, rmm::cuda_stream_view stream) { dim3 const dim_grid(frags.size().second, frags.size().first); get_dictionary_indices_kernel <<>>(frags); } -} // namespace gpu -} // namespace parquet -} // namespace io -} // namespace cudf +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/compact_protocol_reader.cpp b/cpp/src/io/parquet/compact_protocol_reader.cpp index 5c7b8ca3f8c..81d1be64a45 100644 --- a/cpp/src/io/parquet/compact_protocol_reader.cpp +++ b/cpp/src/io/parquet/compact_protocol_reader.cpp @@ -21,9 +21,7 @@ #include #include -namespace cudf { -namespace io { -namespace parquet { +namespace cudf::io::parquet::detail { /** * @brief Base class for parquet field functors. @@ -870,6 +868,4 @@ int CompactProtocolReader::WalkSchema( } } -} // namespace parquet -} // namespace io -} // namespace cudf +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/compact_protocol_reader.hpp b/cpp/src/io/parquet/compact_protocol_reader.hpp index 619815db503..cbb4161b138 100644 --- a/cpp/src/io/parquet/compact_protocol_reader.hpp +++ b/cpp/src/io/parquet/compact_protocol_reader.hpp @@ -25,9 +25,8 @@ #include #include -namespace cudf { -namespace io { -namespace parquet { +namespace cudf::io::parquet::detail { + /** * @brief Class for parsing Parquet's Thrift Compact Protocol encoded metadata * @@ -147,6 +146,4 @@ class CompactProtocolReader { friend class parquet_field_struct_blob; }; -} // namespace parquet -} // namespace io -} // namespace cudf +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/compact_protocol_writer.cpp b/cpp/src/io/parquet/compact_protocol_writer.cpp index 60bc8984d81..9adc8767880 100644 --- a/cpp/src/io/parquet/compact_protocol_writer.cpp +++ b/cpp/src/io/parquet/compact_protocol_writer.cpp @@ -16,9 +16,7 @@ #include "compact_protocol_writer.hpp" -namespace cudf { -namespace io { -namespace parquet { +namespace cudf::io::parquet::detail { /** * @brief Parquet CompactProtocolWriter class @@ -391,6 +389,4 @@ inline void CompactProtocolFieldWriter::set_current_field(int const& field) current_field_value = field; } -} // namespace parquet -} // namespace io -} // namespace cudf +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/compact_protocol_writer.hpp b/cpp/src/io/parquet/compact_protocol_writer.hpp index 26d66527aa5..4849a814b14 100644 --- a/cpp/src/io/parquet/compact_protocol_writer.hpp +++ b/cpp/src/io/parquet/compact_protocol_writer.hpp @@ -25,9 +25,7 @@ #include #include -namespace cudf { -namespace io { -namespace parquet { +namespace cudf::io::parquet::detail { /** * @brief Class for parsing Parquet's Thrift Compact Protocol encoded metadata @@ -115,6 +113,4 @@ class CompactProtocolFieldWriter { inline void set_current_field(int const& field); }; -} // namespace parquet -} // namespace io -} // namespace cudf +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/decode_preprocess.cu b/cpp/src/io/parquet/decode_preprocess.cu index 8de3702bc2e..544c93ee616 100644 --- a/cpp/src/io/parquet/decode_preprocess.cu +++ b/cpp/src/io/parquet/decode_preprocess.cu @@ -23,10 +23,7 @@ #include #include -namespace cudf { -namespace io { -namespace parquet { -namespace gpu { +namespace cudf::io::parquet::detail { namespace { @@ -411,7 +408,4 @@ void ComputePageSizes(cudf::detail::hostdevice_vector& pages, } } -} // namespace gpu -} // namespace parquet -} // namespace io -} // namespace cudf +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/delta_binary.cuh b/cpp/src/io/parquet/delta_binary.cuh index 2382e4aafdf..a513e6674b4 100644 --- a/cpp/src/io/parquet/delta_binary.cuh +++ b/cpp/src/io/parquet/delta_binary.cuh @@ -18,7 +18,7 @@ #include "page_decode.cuh" -namespace cudf::io::parquet::gpu { +namespace cudf::io::parquet::detail { // DELTA_XXX encoding support // @@ -291,4 +291,4 @@ struct delta_binary_decoder { } }; -} // namespace cudf::io::parquet::gpu +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/page_data.cu b/cpp/src/io/parquet/page_data.cu index 230834632dd..cce3659b902 100644 --- a/cpp/src/io/parquet/page_data.cu +++ b/cpp/src/io/parquet/page_data.cu @@ -23,10 +23,7 @@ #include #include -namespace cudf { -namespace io { -namespace parquet { -namespace gpu { +namespace cudf::io::parquet::detail { namespace { @@ -624,7 +621,7 @@ uint32_t GetAggregatedDecodeKernelMask(cudf::detail::hostdevice_vector } /** - * @copydoc cudf::io::parquet::gpu::DecodePageData + * @copydoc cudf::io::parquet::detail::DecodePageData */ void __host__ DecodePageData(cudf::detail::hostdevice_vector& pages, cudf::detail::hostdevice_vector const& chunks, @@ -648,7 +645,4 @@ void __host__ DecodePageData(cudf::detail::hostdevice_vector& pages, } } -} // namespace gpu -} // namespace parquet -} // namespace io -} // namespace cudf +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/page_decode.cuh b/cpp/src/io/parquet/page_decode.cuh index d70cabdd35f..7c866fd8b9e 100644 --- a/cpp/src/io/parquet/page_decode.cuh +++ b/cpp/src/io/parquet/page_decode.cuh @@ -24,7 +24,7 @@ #include #include -namespace cudf::io::parquet::gpu { +namespace cudf::io::parquet::detail { struct page_state_s { constexpr page_state_s() noexcept {} @@ -1384,4 +1384,4 @@ inline __device__ bool setupLocalPageInfo(page_state_s* const s, return true; } -} // namespace cudf::io::parquet::gpu +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/page_delta_decode.cu b/cpp/src/io/parquet/page_delta_decode.cu index 2b78dead205..d25684a59f3 100644 --- a/cpp/src/io/parquet/page_delta_decode.cu +++ b/cpp/src/io/parquet/page_delta_decode.cu @@ -23,7 +23,7 @@ #include #include -namespace cudf::io::parquet::gpu { +namespace cudf::io::parquet::detail { namespace { @@ -160,7 +160,7 @@ __global__ void __launch_bounds__(96) } // anonymous namespace /** - * @copydoc cudf::io::parquet::gpu::DecodeDeltaBinary + * @copydoc cudf::io::parquet::detail::DecodeDeltaBinary */ void __host__ DecodeDeltaBinary(cudf::detail::hostdevice_vector& pages, cudf::detail::hostdevice_vector const& chunks, @@ -184,4 +184,4 @@ void __host__ DecodeDeltaBinary(cudf::detail::hostdevice_vector& pages } } -} // namespace cudf::io::parquet::gpu +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/page_enc.cu b/cpp/src/io/parquet/page_enc.cu index fe0dbb85124..78873d5e8ca 100644 --- a/cpp/src/io/parquet/page_enc.cu +++ b/cpp/src/io/parquet/page_enc.cu @@ -41,10 +41,7 @@ #include #include -namespace cudf { -namespace io { -namespace parquet { -namespace gpu { +namespace cudf::io::parquet::detail { namespace { @@ -329,7 +326,7 @@ __global__ void __launch_bounds__(128) // blockDim {128,1,1} __global__ void __launch_bounds__(128) gpuInitPages(device_2dspan chunks, - device_span pages, + device_span pages, device_span page_sizes, device_span comp_page_sizes, device_span col_desc, @@ -998,7 +995,7 @@ __device__ auto julian_days_with_time(int64_t v) // blockDim(128, 1, 1) template __global__ void __launch_bounds__(128, 8) - gpuEncodePages(device_span pages, + gpuEncodePages(device_span pages, device_span> comp_in, device_span> comp_out, device_span comp_results, @@ -1988,7 +1985,7 @@ __global__ void __launch_bounds__(128) // blockDim(1024, 1, 1) __global__ void __launch_bounds__(1024) - gpuGatherPages(device_span chunks, device_span pages) + gpuGatherPages(device_span chunks, device_span pages) { __shared__ __align__(8) EncColumnChunk ck_g; __shared__ __align__(8) EncPage page_g; @@ -2265,7 +2262,7 @@ void InitFragmentStatistics(device_span groups, } void InitEncoderPages(device_2dspan chunks, - device_span pages, + device_span pages, device_span page_sizes, device_span comp_page_sizes, device_span col_desc, @@ -2294,7 +2291,7 @@ void InitEncoderPages(device_2dspan chunks, write_v2_headers); } -void EncodePages(device_span pages, +void EncodePages(device_span pages, bool write_v2_headers, device_span> comp_in, device_span> comp_out, @@ -2328,7 +2325,7 @@ void EncodePageHeaders(device_span pages, } void GatherPages(device_span chunks, - device_span pages, + device_span pages, rmm::cuda_stream_view stream) { gpuGatherPages<<>>(chunks, pages); @@ -2343,7 +2340,4 @@ void EncodeColumnIndexes(device_span chunks, chunks, column_stats, column_index_truncate_length); } -} // namespace gpu -} // namespace parquet -} // namespace io -} // namespace cudf +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/page_hdr.cu b/cpp/src/io/parquet/page_hdr.cu index 6f8b2f50443..eae8e05e61e 100644 --- a/cpp/src/io/parquet/page_hdr.cu +++ b/cpp/src/io/parquet/page_hdr.cu @@ -20,10 +20,8 @@ #include -namespace cudf { -namespace io { -namespace parquet { -namespace gpu { +namespace cudf::io::parquet::detail { + // Minimal thrift implementation for parsing page headers // https://github.com/apache/thrift/blob/master/doc/specs/thrift-compact-protocol.md @@ -161,8 +159,7 @@ __device__ void skip_struct_field(byte_stream_s* bs, int field_type) * @param chunk Column chunk the page belongs to * @return `kernel_mask_bits` value for the given page */ -__device__ uint32_t kernel_mask_for_page(gpu::PageInfo const& page, - gpu::ColumnChunkDesc const& chunk) +__device__ uint32_t kernel_mask_for_page(PageInfo const& page, ColumnChunkDesc const& chunk) { if (page.flags & PAGEINFO_FLAGS_DICTIONARY) { return 0; } @@ -528,7 +525,4 @@ void __host__ BuildStringDictionaryIndex(ColumnChunkDesc* chunks, gpuBuildStringDictionaryIndex<<>>(chunks, num_chunks); } -} // namespace gpu -} // namespace parquet -} // namespace io -} // namespace cudf +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/page_string_decode.cu b/cpp/src/io/parquet/page_string_decode.cu index d79abe4a6d2..4d79770ec34 100644 --- a/cpp/src/io/parquet/page_string_decode.cu +++ b/cpp/src/io/parquet/page_string_decode.cu @@ -20,10 +20,7 @@ #include #include -namespace cudf { -namespace io { -namespace parquet { -namespace gpu { +namespace cudf::io::parquet::detail { namespace { @@ -757,7 +754,7 @@ __global__ void __launch_bounds__(decode_block_size) } // anonymous namespace /** - * @copydoc cudf::io::parquet::gpu::ComputePageStringSizes + * @copydoc cudf::io::parquet::detail::ComputePageStringSizes */ void ComputePageStringSizes(cudf::detail::hostdevice_vector& pages, cudf::detail::hostdevice_vector const& chunks, @@ -778,7 +775,7 @@ void ComputePageStringSizes(cudf::detail::hostdevice_vector& pages, } /** - * @copydoc cudf::io::parquet::gpu::DecodeStringPageData + * @copydoc cudf::io::parquet::detail::DecodeStringPageData */ void __host__ DecodeStringPageData(cudf::detail::hostdevice_vector& pages, cudf::detail::hostdevice_vector const& chunks, @@ -802,7 +799,4 @@ void __host__ DecodeStringPageData(cudf::detail::hostdevice_vector& pa } } -} // namespace gpu -} // namespace parquet -} // namespace io -} // namespace cudf +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/page_string_utils.cuh b/cpp/src/io/parquet/page_string_utils.cuh index 9395599b3ff..a81d0a64466 100644 --- a/cpp/src/io/parquet/page_string_utils.cuh +++ b/cpp/src/io/parquet/page_string_utils.cuh @@ -18,7 +18,7 @@ #include -namespace cudf::io::parquet::gpu { +namespace cudf::io::parquet::detail { // stole this from cudf/strings/detail/gather.cuh. modified to run on a single string on one warp. // copies from src to dst in 16B chunks per thread. @@ -107,4 +107,4 @@ __device__ void block_excl_sum(size_type* arr, size_type length, size_type initi } } -} // namespace cudf::io::parquet::gpu +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/parquet.hpp b/cpp/src/io/parquet/parquet.hpp index 1df49262e87..c5993d73dec 100644 --- a/cpp/src/io/parquet/parquet.hpp +++ b/cpp/src/io/parquet/parquet.hpp @@ -25,9 +25,8 @@ #include #include -namespace cudf { -namespace io { -namespace parquet { +namespace cudf::io::parquet::detail { + constexpr uint32_t parquet_magic = (('P' << 0) | ('A' << 8) | ('R' << 16) | ('1' << 24)); /** @@ -405,6 +404,4 @@ static inline int CountLeadingZeros32(uint32_t value) #endif } -} // namespace parquet -} // namespace io -} // namespace cudf +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/parquet_common.hpp b/cpp/src/io/parquet/parquet_common.hpp index 5a1716bb547..50736197eb9 100644 --- a/cpp/src/io/parquet/parquet_common.hpp +++ b/cpp/src/io/parquet/parquet_common.hpp @@ -18,9 +18,8 @@ #include -namespace cudf { -namespace io { -namespace parquet { +namespace cudf::io::parquet::detail { + // Max decimal precisions according to the parquet spec: // https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#decimal auto constexpr MAX_DECIMAL32_PRECISION = 9; @@ -156,6 +155,4 @@ enum FieldType { ST_FLD_STRUCT = 12, }; -} // namespace parquet -} // namespace io -} // namespace cudf +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/parquet_gpu.cuh b/cpp/src/io/parquet/parquet_gpu.cuh index dc74bee1536..10e12ebb782 100644 --- a/cpp/src/io/parquet/parquet_gpu.cuh +++ b/cpp/src/io/parquet/parquet_gpu.cuh @@ -23,7 +23,7 @@ #include -namespace cudf::io::parquet::gpu { +namespace cudf::io::parquet::detail { auto constexpr KEY_SENTINEL = size_type{-1}; auto constexpr VALUE_SENTINEL = size_type{-1}; @@ -81,4 +81,4 @@ inline size_type __device__ row_to_value_idx(size_type idx, return idx; } -} // namespace cudf::io::parquet::gpu +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/parquet_gpu.hpp b/cpp/src/io/parquet/parquet_gpu.hpp index 51c862b376b..767668cc65e 100644 --- a/cpp/src/io/parquet/parquet_gpu.hpp +++ b/cpp/src/io/parquet/parquet_gpu.hpp @@ -35,7 +35,7 @@ #include -namespace cudf::io::parquet { +namespace cudf::io::parquet::detail { using cudf::io::detail::string_index_pair; @@ -88,8 +88,6 @@ struct input_column_info { auto nesting_depth() const { return nesting.size(); } }; -namespace gpu { - /** * @brief Enums for the flags in the page header */ @@ -347,7 +345,7 @@ struct file_intermediate_data { // all chunks from the selected row groups. We may end up reading these chunks progressively // instead of all at once - std::vector chunks{}; + std::vector chunks{}; // skip_rows/num_rows values for the entire file. these need to be adjusted per-pass because we // may not be visiting every row group that contains these bounds @@ -372,16 +370,16 @@ struct pass_intermediate_data { // rowgroup, chunk and page information for the current pass. std::vector row_groups{}; - cudf::detail::hostdevice_vector chunks{}; - cudf::detail::hostdevice_vector pages_info{}; - cudf::detail::hostdevice_vector page_nesting_info{}; - cudf::detail::hostdevice_vector page_nesting_decode_info{}; + cudf::detail::hostdevice_vector chunks{}; + cudf::detail::hostdevice_vector pages_info{}; + cudf::detail::hostdevice_vector page_nesting_info{}; + cudf::detail::hostdevice_vector page_nesting_decode_info{}; rmm::device_uvector page_keys{0, rmm::cuda_stream_default}; rmm::device_uvector page_index{0, rmm::cuda_stream_default}; rmm::device_uvector str_dict_index{0, rmm::cuda_stream_default}; - std::vector output_chunk_read_info; + std::vector output_chunk_read_info; std::size_t current_output_chunk{0}; rmm::device_buffer level_decode_data{}; @@ -739,7 +737,7 @@ void initialize_chunk_hash_maps(device_span chunks, rmm::cuda_st * @param frags Column fragments * @param stream CUDA stream to use */ -void populate_chunk_hash_maps(cudf::detail::device_2dspan frags, +void populate_chunk_hash_maps(cudf::detail::device_2dspan frags, rmm::cuda_stream_view stream); /** @@ -762,7 +760,7 @@ void collect_map_entries(device_span chunks, rmm::cuda_stream_vi * @param frags Column fragments * @param stream CUDA stream to use */ -void get_dictionary_indices(cudf::detail::device_2dspan frags, +void get_dictionary_indices(cudf::detail::device_2dspan frags, rmm::cuda_stream_view stream); /** @@ -781,7 +779,7 @@ void get_dictionary_indices(cudf::detail::device_2dspan * @param[in] stream CUDA stream to use */ void InitEncoderPages(cudf::detail::device_2dspan chunks, - device_span pages, + device_span pages, device_span page_sizes, device_span comp_page_sizes, device_span col_desc, @@ -847,7 +845,7 @@ void EncodePageHeaders(device_span pages, * @param[in] stream CUDA stream to use */ void GatherPages(device_span chunks, - device_span pages, + device_span pages, rmm::cuda_stream_view stream); /** @@ -863,5 +861,4 @@ void EncodeColumnIndexes(device_span chunks, int32_t column_index_truncate_length, rmm::cuda_stream_view stream); -} // namespace gpu -} // namespace cudf::io::parquet +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/predicate_pushdown.cpp b/cpp/src/io/parquet/predicate_pushdown.cpp index 805d082c71e..9083be1c2dd 100644 --- a/cpp/src/io/parquet/predicate_pushdown.cpp +++ b/cpp/src/io/parquet/predicate_pushdown.cpp @@ -35,7 +35,7 @@ #include #include -namespace cudf::io::detail::parquet { +namespace cudf::io::parquet::detail { namespace { /** @@ -62,13 +62,13 @@ struct stats_caster { // uses storage type as T template () or cudf::is_nested())> - static T convert(uint8_t const* stats_val, size_t stats_size, cudf::io::parquet::Type const type) + static T convert(uint8_t const* stats_val, size_t stats_size, Type const type) { CUDF_FAIL("unsupported type for stats casting"); } template ())> - static T convert(uint8_t const* stats_val, size_t stats_size, cudf::io::parquet::Type const type) + static T convert(uint8_t const* stats_val, size_t stats_size, Type const type) { CUDF_EXPECTS(type == BOOLEAN, "Invalid type and stats combination"); return targetType(*reinterpret_cast(stats_val)); @@ -78,7 +78,7 @@ struct stats_caster { template () and !cudf::is_boolean()) or cudf::is_fixed_point() or cudf::is_chrono())> - static T convert(uint8_t const* stats_val, size_t stats_size, cudf::io::parquet::Type const type) + static T convert(uint8_t const* stats_val, size_t stats_size, Type const type) { switch (type) { case INT32: return targetType(*reinterpret_cast(stats_val)); @@ -103,7 +103,7 @@ struct stats_caster { } template ())> - static T convert(uint8_t const* stats_val, size_t stats_size, cudf::io::parquet::Type const type) + static T convert(uint8_t const* stats_val, size_t stats_size, Type const type) { switch (type) { case FLOAT: return targetType(*reinterpret_cast(stats_val)); @@ -113,7 +113,7 @@ struct stats_caster { } template )> - static T convert(uint8_t const* stats_val, size_t stats_size, cudf::io::parquet::Type const type) + static T convert(uint8_t const* stats_val, size_t stats_size, Type const type) { switch (type) { case BYTE_ARRAY: [[fallthrough]]; @@ -527,4 +527,4 @@ named_to_reference_converter::visit_operands( return transformed_operands; } -} // namespace cudf::io::detail::parquet +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/reader.cpp b/cpp/src/io/parquet/reader.cpp index 1e87447006d..17d7c07bc91 100644 --- a/cpp/src/io/parquet/reader.cpp +++ b/cpp/src/io/parquet/reader.cpp @@ -16,7 +16,7 @@ #include "reader_impl.hpp" -namespace cudf::io::detail::parquet { +namespace cudf::io::parquet::detail { reader::reader() = default; @@ -59,4 +59,4 @@ bool chunked_reader::has_next() const { return _impl->has_next(); } table_with_metadata chunked_reader::read_chunk() const { return _impl->read_chunk(); } -} // namespace cudf::io::detail::parquet +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/reader_impl.cpp b/cpp/src/io/parquet/reader_impl.cpp index ea40f29a070..26ec83d5946 100644 --- a/cpp/src/io/parquet/reader_impl.cpp +++ b/cpp/src/io/parquet/reader_impl.cpp @@ -25,7 +25,7 @@ #include #include -namespace cudf::io::detail::parquet { +namespace cudf::io::parquet::detail { void reader::impl::decode_page_data(size_t skip_rows, size_t num_rows) { @@ -38,7 +38,7 @@ void reader::impl::decode_page_data(size_t skip_rows, size_t num_rows) CUDF_EXPECTS(pages.size() > 0, "There is no page to decode"); size_t const sum_max_depths = std::accumulate( - chunks.begin(), chunks.end(), 0, [&](size_t cursum, gpu::ColumnChunkDesc const& chunk) { + chunks.begin(), chunks.end(), 0, [&](size_t cursum, ColumnChunkDesc const& chunk) { return cursum + _metadata->get_output_nesting_depth(chunk.src_col_schema); }); @@ -51,10 +51,10 @@ void reader::impl::decode_page_data(size_t skip_rows, size_t num_rows) // doing a gather operation later on. // TODO: This step is somewhat redundant if size info has already been calculated (nested schema, // chunked reader). - auto const has_strings = (kernel_mask & gpu::KERNEL_MASK_STRING) != 0; + auto const has_strings = (kernel_mask & KERNEL_MASK_STRING) != 0; std::vector col_sizes(_input_columns.size(), 0L); if (has_strings) { - gpu::ComputePageStringSizes( + ComputePageStringSizes( pages, chunks, skip_rows, num_rows, _pass_itm_data->level_type_size, _stream); col_sizes = calculate_page_string_offsets(); @@ -176,19 +176,19 @@ void reader::impl::decode_page_data(size_t skip_rows, size_t num_rows) if (has_strings) { auto& stream = streams[s_idx++]; chunk_nested_str_data.host_to_device_async(stream); - gpu::DecodeStringPageData( + DecodeStringPageData( pages, chunks, num_rows, skip_rows, level_type_size, error_code.data(), stream); } // launch delta binary decoder - if ((kernel_mask & gpu::KERNEL_MASK_DELTA_BINARY) != 0) { - gpu::DecodeDeltaBinary( + if ((kernel_mask & KERNEL_MASK_DELTA_BINARY) != 0) { + DecodeDeltaBinary( pages, chunks, num_rows, skip_rows, level_type_size, error_code.data(), streams[s_idx++]); } // launch the catch-all page decoder - if ((kernel_mask & gpu::KERNEL_MASK_GENERAL) != 0) { - gpu::DecodePageData( + if ((kernel_mask & KERNEL_MASK_GENERAL) != 0) { + DecodePageData( pages, chunks, num_rows, skip_rows, level_type_size, error_code.data(), streams[s_idx++]); } @@ -248,13 +248,13 @@ void reader::impl::decode_page_data(size_t skip_rows, size_t num_rows) // update null counts in the final column buffers for (size_t idx = 0; idx < pages.size(); idx++) { - gpu::PageInfo* pi = &pages[idx]; - if (pi->flags & gpu::PAGEINFO_FLAGS_DICTIONARY) { continue; } - gpu::ColumnChunkDesc* col = &chunks[pi->chunk_idx]; + PageInfo* pi = &pages[idx]; + if (pi->flags & PAGEINFO_FLAGS_DICTIONARY) { continue; } + ColumnChunkDesc* col = &chunks[pi->chunk_idx]; input_column_info const& input_col = _input_columns[col->src_col_index]; - int index = pi->nesting_decode - page_nesting_decode.device_ptr(); - gpu::PageNestingDecodeInfo* pndi = &page_nesting_decode[index]; + int index = pi->nesting_decode - page_nesting_decode.device_ptr(); + PageNestingDecodeInfo* pndi = &page_nesting_decode[index]; auto* cols = &_output_buffers; for (size_t l_idx = 0; l_idx < input_col.nesting_depth(); l_idx++) { @@ -320,7 +320,7 @@ reader::impl::impl(std::size_t chunk_read_limit, // Save the states of the output buffers for reuse in `chunk_read()`. for (auto const& buff : _output_buffers) { - _output_buffers_template.emplace_back(inline_column_buffer::empty_like(buff)); + _output_buffers_template.emplace_back(cudf::io::detail::inline_column_buffer::empty_like(buff)); } } @@ -368,7 +368,7 @@ void reader::impl::prepare_data(int64_t skip_rows, // always create the pass struct, even if we end up with no passes. // this will also cause the previous pass information to be deleted - _pass_itm_data = std::make_unique(); + _pass_itm_data = std::make_unique(); if (_file_itm_data.global_num_rows > 0 && not _file_itm_data.row_groups.empty() && not _input_columns.empty() && _current_input_pass < num_passes) { @@ -521,7 +521,7 @@ table_with_metadata reader::impl::read_chunk() if (_chunk_count > 0) { _output_buffers.resize(0); for (auto const& buff : _output_buffers_template) { - _output_buffers.emplace_back(inline_column_buffer::empty_like(buff)); + _output_buffers.emplace_back(cudf::io::detail::inline_column_buffer::empty_like(buff)); } } @@ -571,4 +571,4 @@ parquet_metadata read_parquet_metadata(host_span con metadata.get_key_value_metadata()[0]}; } -} // namespace cudf::io::detail::parquet +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/reader_impl.hpp b/cpp/src/io/parquet/reader_impl.hpp index 9445e4d1648..6003b931b04 100644 --- a/cpp/src/io/parquet/reader_impl.hpp +++ b/cpp/src/io/parquet/reader_impl.hpp @@ -35,7 +35,7 @@ #include #include -namespace cudf::io::detail::parquet { +namespace cudf::io::parquet::detail { /** * @brief Implementation for Parquet reader @@ -261,10 +261,10 @@ class reader::impl { std::vector _input_columns; // Buffers for generating output columns - std::vector _output_buffers; + std::vector _output_buffers; // Buffers copied from `_output_buffers` after construction for reuse - std::vector _output_buffers_template; + std::vector _output_buffers_template; // _output_buffers associated schema indices std::vector _output_column_schemas; @@ -285,8 +285,8 @@ class reader::impl { // Within a pass, we produce one or more chunks of output, whose maximum total // byte size is controlled by _output_chunk_read_limit. - cudf::io::parquet::gpu::file_intermediate_data _file_itm_data; - std::unique_ptr _pass_itm_data; + file_intermediate_data _file_itm_data; + std::unique_ptr _pass_itm_data; // an array of offsets into _file_itm_data::global_chunks. Each pair of offsets represents // the start/end of the chunks to be loaded for a given pass. @@ -301,4 +301,4 @@ class reader::impl { bool _file_preprocessed{false}; }; -} // namespace cudf::io::detail::parquet +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/reader_impl_helpers.cpp b/cpp/src/io/parquet/reader_impl_helpers.cpp index 9778cfc47d2..171cf07da3e 100644 --- a/cpp/src/io/parquet/reader_impl_helpers.cpp +++ b/cpp/src/io/parquet/reader_impl_helpers.cpp @@ -21,34 +21,34 @@ #include #include -namespace cudf::io::detail::parquet { +namespace cudf::io::parquet::detail { namespace { ConvertedType logical_type_to_converted_type(LogicalType const& logical) { if (logical.isset.STRING) { - return parquet::UTF8; + return UTF8; } else if (logical.isset.MAP) { - return parquet::MAP; + return MAP; } else if (logical.isset.LIST) { - return parquet::LIST; + return LIST; } else if (logical.isset.ENUM) { - return parquet::ENUM; + return ENUM; } else if (logical.isset.DECIMAL) { - return parquet::DECIMAL; // TODO set decimal values + return DECIMAL; // TODO set decimal values } else if (logical.isset.DATE) { - return parquet::DATE; + return DATE; } else if (logical.isset.TIME) { if (logical.TIME.unit.isset.MILLIS) - return parquet::TIME_MILLIS; + return TIME_MILLIS; else if (logical.TIME.unit.isset.MICROS) - return parquet::TIME_MICROS; + return TIME_MICROS; } else if (logical.isset.TIMESTAMP) { if (logical.TIMESTAMP.unit.isset.MILLIS) - return parquet::TIMESTAMP_MILLIS; + return TIMESTAMP_MILLIS; else if (logical.TIMESTAMP.unit.isset.MICROS) - return parquet::TIMESTAMP_MICROS; + return TIMESTAMP_MICROS; } else if (logical.isset.INTEGER) { switch (logical.INTEGER.bitWidth) { case 8: return logical.INTEGER.isSigned ? INT_8 : UINT_8; @@ -58,13 +58,13 @@ ConvertedType logical_type_to_converted_type(LogicalType const& logical) default: break; } } else if (logical.isset.UNKNOWN) { - return parquet::NA; + return NA; } else if (logical.isset.JSON) { - return parquet::JSON; + return JSON; } else if (logical.isset.BSON) { - return parquet::BSON; + return BSON; } - return parquet::UNKNOWN; + return UNKNOWN; } } // namespace @@ -76,39 +76,39 @@ type_id to_type_id(SchemaElement const& schema, bool strings_to_categorical, type_id timestamp_type_id) { - parquet::Type const physical = schema.type; - parquet::LogicalType const logical_type = schema.logical_type; - parquet::ConvertedType converted_type = schema.converted_type; - int32_t decimal_precision = schema.decimal_precision; + Type const physical = schema.type; + LogicalType const logical_type = schema.logical_type; + ConvertedType converted_type = schema.converted_type; + int32_t decimal_precision = schema.decimal_precision; // Logical type used for actual data interpretation; the legacy converted type // is superseded by 'logical' type whenever available. auto const inferred_converted_type = logical_type_to_converted_type(logical_type); - if (inferred_converted_type != parquet::UNKNOWN) { converted_type = inferred_converted_type; } - if (inferred_converted_type == parquet::DECIMAL) { + if (inferred_converted_type != UNKNOWN) { converted_type = inferred_converted_type; } + if (inferred_converted_type == DECIMAL) { decimal_precision = schema.logical_type.DECIMAL.precision; } switch (converted_type) { - case parquet::UINT_8: return type_id::UINT8; - case parquet::INT_8: return type_id::INT8; - case parquet::UINT_16: return type_id::UINT16; - case parquet::INT_16: return type_id::INT16; - case parquet::UINT_32: return type_id::UINT32; - case parquet::UINT_64: return type_id::UINT64; - case parquet::DATE: return type_id::TIMESTAMP_DAYS; - case parquet::TIME_MILLIS: return type_id::DURATION_MILLISECONDS; - case parquet::TIME_MICROS: return type_id::DURATION_MICROSECONDS; - case parquet::TIMESTAMP_MILLIS: + case UINT_8: return type_id::UINT8; + case INT_8: return type_id::INT8; + case UINT_16: return type_id::UINT16; + case INT_16: return type_id::INT16; + case UINT_32: return type_id::UINT32; + case UINT_64: return type_id::UINT64; + case DATE: return type_id::TIMESTAMP_DAYS; + case TIME_MILLIS: return type_id::DURATION_MILLISECONDS; + case TIME_MICROS: return type_id::DURATION_MICROSECONDS; + case TIMESTAMP_MILLIS: return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id : type_id::TIMESTAMP_MILLISECONDS; - case parquet::TIMESTAMP_MICROS: + case TIMESTAMP_MICROS: return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id : type_id::TIMESTAMP_MICROSECONDS; - case parquet::DECIMAL: - if (physical == parquet::INT32) { return type_id::DECIMAL32; } - if (physical == parquet::INT64) { return type_id::DECIMAL64; } - if (physical == parquet::FIXED_LEN_BYTE_ARRAY) { + case DECIMAL: + if (physical == INT32) { return type_id::DECIMAL32; } + if (physical == INT64) { return type_id::DECIMAL64; } + if (physical == FIXED_LEN_BYTE_ARRAY) { if (schema.type_length <= static_cast(sizeof(int32_t))) { return type_id::DECIMAL32; } @@ -119,7 +119,7 @@ type_id to_type_id(SchemaElement const& schema, return type_id::DECIMAL128; } } - if (physical == parquet::BYTE_ARRAY) { + if (physical == BYTE_ARRAY) { CUDF_EXPECTS(decimal_precision <= MAX_DECIMAL128_PRECISION, "Invalid decimal precision"); if (decimal_precision <= MAX_DECIMAL32_PRECISION) { return type_id::DECIMAL32; @@ -133,20 +133,20 @@ type_id to_type_id(SchemaElement const& schema, break; // maps are just List>. - case parquet::MAP: - case parquet::LIST: return type_id::LIST; - case parquet::NA: return type_id::STRING; + case MAP: + case LIST: return type_id::LIST; + case NA: return type_id::STRING; // return type_id::EMPTY; //TODO(kn): enable after Null/Empty column support default: break; } - if (inferred_converted_type == parquet::UNKNOWN and physical == parquet::INT64 and + if (inferred_converted_type == UNKNOWN and physical == INT64 and logical_type.TIMESTAMP.unit.isset.NANOS) { return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id : type_id::TIMESTAMP_NANOSECONDS; } - if (inferred_converted_type == parquet::UNKNOWN and physical == parquet::INT64 and + if (inferred_converted_type == UNKNOWN and physical == INT64 and logical_type.TIME.unit.isset.NANOS) { return type_id::DURATION_NANOSECONDS; } @@ -157,16 +157,16 @@ type_id to_type_id(SchemaElement const& schema, // Physical storage type supported by Parquet; controls the on-disk storage // format in combination with the encoding type. switch (physical) { - case parquet::BOOLEAN: return type_id::BOOL8; - case parquet::INT32: return type_id::INT32; - case parquet::INT64: return type_id::INT64; - case parquet::FLOAT: return type_id::FLOAT32; - case parquet::DOUBLE: return type_id::FLOAT64; - case parquet::BYTE_ARRAY: - case parquet::FIXED_LEN_BYTE_ARRAY: + case BOOLEAN: return type_id::BOOL8; + case INT32: return type_id::INT32; + case INT64: return type_id::INT64; + case FLOAT: return type_id::FLOAT32; + case DOUBLE: return type_id::FLOAT64; + case BYTE_ARRAY: + case FIXED_LEN_BYTE_ARRAY: // Can be mapped to INT32 (32-bit hash) or STRING return strings_to_categorical ? type_id::INT32 : type_id::STRING; - case parquet::INT96: + case INT96: return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id : type_id::TIMESTAMP_NANOSECONDS; default: break; @@ -420,7 +420,7 @@ std::vector aggregate_reader_metadata::get_pandas_index_names() con return names; } -std::tuple> +std::tuple> aggregate_reader_metadata::select_row_groups( host_span const> row_group_indices, int64_t skip_rows_opt, @@ -438,7 +438,7 @@ aggregate_reader_metadata::select_row_groups( host_span const>(filtered_row_group_indices.value()); } } - std::vector selection; + std::vector selection; auto [rows_to_skip, rows_to_read] = [&]() { if (not row_group_indices.empty()) { return std::pair{}; } auto const from_opts = cudf::io::detail::skip_rows_num_rows_from_options( @@ -478,7 +478,7 @@ aggregate_reader_metadata::select_row_groups( } std::tuple, - std::vector, + std::vector, std::vector> aggregate_reader_metadata::select_columns(std::optional> const& use_names, bool include_index, @@ -496,17 +496,18 @@ aggregate_reader_metadata::select_columns(std::optional : -1; }; - std::vector output_columns; + std::vector output_columns; std::vector input_columns; std::vector nesting; // Return true if column path is valid. e.g. if the path is {"struct1", "child1"}, then it is // valid if "struct1.child1" exists in this file's schema. If "struct1" exists but "child1" is // not a child of "struct1" then the function will return false for "struct1" - std::function&, bool)> + std::function&, bool)> build_column = [&](column_name_info const* col_name_info, int schema_idx, - std::vector& out_col_array, + std::vector& out_col_array, bool has_list_parent) { if (schema_idx < 0) { return false; } auto const& schema_elem = get_schema(schema_idx); @@ -529,7 +530,8 @@ aggregate_reader_metadata::select_columns(std::optional : to_type_id(schema_elem, strings_to_categorical, timestamp_type_id); auto const dtype = to_data_type(col_type, schema_elem); - inline_column_buffer output_col(dtype, schema_elem.repetition_type == OPTIONAL); + cudf::io::detail::inline_column_buffer output_col(dtype, + schema_elem.repetition_type == OPTIONAL); if (has_list_parent) { output_col.user_data |= PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT; } // store the index of this element if inserted in out_col_array nesting.push_back(static_cast(out_col_array.size())); @@ -569,7 +571,8 @@ aggregate_reader_metadata::select_columns(std::optional to_type_id(schema_elem, strings_to_categorical, timestamp_type_id); auto const element_dtype = to_data_type(element_type, schema_elem); - inline_column_buffer element_col(element_dtype, schema_elem.repetition_type == OPTIONAL); + cudf::io::detail::inline_column_buffer element_col( + element_dtype, schema_elem.repetition_type == OPTIONAL); if (has_list_parent || col_type == type_id::LIST) { element_col.user_data |= PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT; } @@ -732,4 +735,4 @@ aggregate_reader_metadata::select_columns(std::optional std::move(input_columns), std::move(output_columns), std::move(output_column_schemas)); } -} // namespace cudf::io::detail::parquet +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/reader_impl_helpers.hpp b/cpp/src/io/parquet/reader_impl_helpers.hpp index 9ee17f26a10..1a73e2f55ac 100644 --- a/cpp/src/io/parquet/reader_impl_helpers.hpp +++ b/cpp/src/io/parquet/reader_impl_helpers.hpp @@ -32,9 +32,7 @@ #include #include -namespace cudf::io::detail::parquet { - -using namespace cudf::io::parquet; +namespace cudf::io::parquet::detail { /** * @brief Function that translates Parquet datatype to cuDF type enum @@ -182,7 +180,7 @@ class aggregate_reader_metadata { * @return A tuple of corrected row_start, row_count and list of row group indexes and its * starting row */ - [[nodiscard]] std::tuple> select_row_groups( + [[nodiscard]] std::tuple> select_row_groups( host_span const> row_group_indices, int64_t row_start, std::optional const& row_count, @@ -202,12 +200,13 @@ class aggregate_reader_metadata { * @return input column information, output column information, list of output column schema * indices */ - [[nodiscard]] std:: - tuple, std::vector, std::vector> - select_columns(std::optional> const& use_names, - bool include_index, - bool strings_to_categorical, - type_id timestamp_type_id) const; + [[nodiscard]] std::tuple, + std::vector, + std::vector> + select_columns(std::optional> const& use_names, + bool include_index, + bool strings_to_categorical, + type_id timestamp_type_id) const; }; /** @@ -276,4 +275,4 @@ class named_to_reference_converter : public ast::detail::expression_transformer std::list _operators; }; -} // namespace cudf::io::detail::parquet +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/reader_impl_preprocess.cu b/cpp/src/io/parquet/reader_impl_preprocess.cu index c731c467f2c..4bc6bb6f43b 100644 --- a/cpp/src/io/parquet/reader_impl_preprocess.cu +++ b/cpp/src/io/parquet/reader_impl_preprocess.cu @@ -43,7 +43,8 @@ #include -namespace cudf::io::detail::parquet { +namespace cudf::io::parquet::detail { + namespace { /** @@ -185,11 +186,11 @@ template */ [[nodiscard]] std::tuple conversion_info(type_id column_type_id, type_id timestamp_type_id, - parquet::Type physical, + Type physical, int8_t converted, int32_t length) { - int32_t type_width = (physical == parquet::FIXED_LEN_BYTE_ARRAY) ? length : 0; + int32_t type_width = (physical == FIXED_LEN_BYTE_ARRAY) ? length : 0; int32_t clock_rate = 0; if (column_type_id == type_id::INT8 or column_type_id == type_id::UINT8) { type_width = 1; // I32 -> I8 @@ -202,9 +203,9 @@ template } int8_t converted_type = converted; - if (converted_type == parquet::DECIMAL && column_type_id != type_id::FLOAT64 && + if (converted_type == DECIMAL && column_type_id != type_id::FLOAT64 && not cudf::is_fixed_point(data_type{column_type_id})) { - converted_type = parquet::UNKNOWN; // Not converting to float64 or decimal + converted_type = UNKNOWN; // Not converting to float64 or decimal } return std::make_tuple(type_width, clock_rate, converted_type); } @@ -226,7 +227,7 @@ template [[nodiscard]] std::future read_column_chunks_async( std::vector> const& sources, std::vector>& page_data, - cudf::detail::hostdevice_vector& chunks, + cudf::detail::hostdevice_vector& chunks, size_t begin_chunk, size_t end_chunk, std::vector const& column_chunk_offsets, @@ -239,11 +240,10 @@ template size_t const io_offset = column_chunk_offsets[chunk]; size_t io_size = chunks[chunk].compressed_size; size_t next_chunk = chunk + 1; - bool const is_compressed = (chunks[chunk].codec != parquet::Compression::UNCOMPRESSED); + bool const is_compressed = (chunks[chunk].codec != Compression::UNCOMPRESSED); while (next_chunk < end_chunk) { - size_t const next_offset = column_chunk_offsets[next_chunk]; - bool const is_next_compressed = - (chunks[next_chunk].codec != parquet::Compression::UNCOMPRESSED); + size_t const next_offset = column_chunk_offsets[next_chunk]; + bool const is_next_compressed = (chunks[next_chunk].codec != Compression::UNCOMPRESSED); if (next_offset != io_offset + io_size || is_next_compressed != is_compressed || chunk_source_map[chunk] != chunk_source_map[next_chunk]) { // Can't merge if not contiguous or mixing compressed and uncompressed @@ -300,13 +300,13 @@ template * * @return The total number of pages */ -[[nodiscard]] size_t count_page_headers( - cudf::detail::hostdevice_vector& chunks, rmm::cuda_stream_view stream) +[[nodiscard]] size_t count_page_headers(cudf::detail::hostdevice_vector& chunks, + rmm::cuda_stream_view stream) { size_t total_pages = 0; chunks.host_to_device_async(stream); - gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream); + DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream); chunks.device_to_host_sync(stream); for (size_t c = 0; c < chunks.size(); c++) { @@ -337,8 +337,8 @@ constexpr bool is_supported_encoding(Encoding enc) * @param stream CUDA stream used for device memory operations and kernel launches * @returns The size in bytes of level type data required */ -int decode_page_headers(cudf::detail::hostdevice_vector& chunks, - cudf::detail::hostdevice_vector& pages, +int decode_page_headers(cudf::detail::hostdevice_vector& chunks, + cudf::detail::hostdevice_vector& pages, rmm::cuda_stream_view stream) { // IMPORTANT : if you change how pages are stored within a chunk (dist pages, then data pages), @@ -350,14 +350,14 @@ int decode_page_headers(cudf::detail::hostdevice_vector& c } chunks.host_to_device_async(stream); - gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream); + DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream); // compute max bytes needed for level data auto level_bit_size = cudf::detail::make_counting_transform_iterator(0, [chunks = chunks.begin()] __device__(int i) { auto c = chunks[i]; return static_cast( - max(c.level_bits[gpu::level_type::REPETITION], c.level_bits[gpu::level_type::DEFINITION])); + max(c.level_bits[level_type::REPETITION], c.level_bits[level_type::DEFINITION])); }); // max level data bit size. int const max_level_bits = thrust::reduce(rmm::exec_policy(stream), @@ -388,11 +388,11 @@ int decode_page_headers(cudf::detail::hostdevice_vector& c * @return Device buffer to decompressed page data */ [[nodiscard]] rmm::device_buffer decompress_page_data( - cudf::detail::hostdevice_vector& chunks, - cudf::detail::hostdevice_vector& pages, + cudf::detail::hostdevice_vector& chunks, + cudf::detail::hostdevice_vector& pages, rmm::cuda_stream_view stream) { - auto for_each_codec_page = [&](parquet::Compression codec, std::function const& f) { + auto for_each_codec_page = [&](Compression codec, std::function const& f) { for (size_t c = 0, page_count = 0; c < chunks.size(); c++) { const auto page_stride = chunks[c].max_num_pages; if (chunks[c].codec == codec) { @@ -412,19 +412,16 @@ int decode_page_headers(cudf::detail::hostdevice_vector& c size_t total_decomp_size = 0; struct codec_stats { - parquet::Compression compression_type = UNCOMPRESSED; - size_t num_pages = 0; - int32_t max_decompressed_size = 0; - size_t total_decomp_size = 0; + Compression compression_type = UNCOMPRESSED; + size_t num_pages = 0; + int32_t max_decompressed_size = 0; + size_t total_decomp_size = 0; }; - std::array codecs{codec_stats{parquet::GZIP}, - codec_stats{parquet::SNAPPY}, - codec_stats{parquet::BROTLI}, - codec_stats{parquet::ZSTD}}; + std::array codecs{codec_stats{GZIP}, codec_stats{SNAPPY}, codec_stats{BROTLI}, codec_stats{ZSTD}}; auto is_codec_supported = [&codecs](int8_t codec) { - if (codec == parquet::UNCOMPRESSED) return true; + if (codec == UNCOMPRESSED) return true; return std::find_if(codecs.begin(), codecs.end(), [codec](auto& cstats) { return codec == cstats.compression_type; }) != codecs.end(); @@ -445,7 +442,7 @@ int decode_page_headers(cudf::detail::hostdevice_vector& c codec.num_pages++; num_comp_pages++; }); - if (codec.compression_type == parquet::BROTLI && codec.num_pages > 0) { + if (codec.compression_type == BROTLI && codec.num_pages > 0) { debrotli_scratch.resize(get_gpu_debrotli_scratch_size(codec.num_pages), stream); } } @@ -482,7 +479,7 @@ int decode_page_headers(cudf::detail::hostdevice_vector& c auto& page = pages[page_idx]; // offset will only be non-zero for V2 pages auto const offset = - page.lvl_bytes[gpu::level_type::DEFINITION] + page.lvl_bytes[gpu::level_type::REPETITION]; + page.lvl_bytes[level_type::DEFINITION] + page.lvl_bytes[level_type::REPETITION]; // for V2 need to copy def and rep level info into place, and then offset the // input and output buffers. otherwise we'd have to keep both the compressed // and decompressed data. @@ -509,11 +506,11 @@ int decode_page_headers(cudf::detail::hostdevice_vector& c device_span d_comp_res_view(comp_res.data() + start_pos, codec.num_pages); switch (codec.compression_type) { - case parquet::GZIP: + case GZIP: gpuinflate(d_comp_in, d_comp_out, d_comp_res_view, gzip_header_included::YES, stream); break; - case parquet::SNAPPY: - if (nvcomp_integration::is_stable_enabled()) { + case SNAPPY: + if (cudf::io::detail::nvcomp_integration::is_stable_enabled()) { nvcomp::batched_decompress(nvcomp::compression_type::SNAPPY, d_comp_in, d_comp_out, @@ -525,7 +522,7 @@ int decode_page_headers(cudf::detail::hostdevice_vector& c gpu_unsnap(d_comp_in, d_comp_out, d_comp_res_view, stream); } break; - case parquet::ZSTD: + case ZSTD: nvcomp::batched_decompress(nvcomp::compression_type::ZSTD, d_comp_in, d_comp_out, @@ -534,7 +531,7 @@ int decode_page_headers(cudf::detail::hostdevice_vector& c codec.total_decomp_size, stream); break; - case parquet::BROTLI: + case BROTLI: gpu_debrotli(d_comp_in, d_comp_out, d_comp_res_view, @@ -594,9 +591,9 @@ void reader::impl::allocate_nesting_info() }); page_nesting_info = - cudf::detail::hostdevice_vector{total_page_nesting_infos, _stream}; + cudf::detail::hostdevice_vector{total_page_nesting_infos, _stream}; page_nesting_decode_info = - cudf::detail::hostdevice_vector{total_page_nesting_infos, _stream}; + cudf::detail::hostdevice_vector{total_page_nesting_infos, _stream}; // update pointers in the PageInfos int target_page_index = 0; @@ -653,10 +650,10 @@ void reader::impl::allocate_nesting_info() if (!cur_schema.is_stub()) { // initialize each page within the chunk for (int p_idx = 0; p_idx < chunks[idx].num_data_pages; p_idx++) { - gpu::PageNestingInfo* pni = + PageNestingInfo* pni = &page_nesting_info[nesting_info_index + (p_idx * per_page_nesting_info_size)]; - gpu::PageNestingDecodeInfo* nesting_info = + PageNestingDecodeInfo* nesting_info = &page_nesting_decode_info[nesting_info_index + (p_idx * per_page_nesting_info_size)]; // if we have lists, set our start and end depth remappings @@ -717,9 +714,9 @@ void reader::impl::allocate_level_decode_space() for (size_t idx = 0; idx < pages.size(); idx++) { auto& p = pages[idx]; - p.lvl_decode_buf[gpu::level_type::DEFINITION] = buf; + p.lvl_decode_buf[level_type::DEFINITION] = buf; buf += (LEVEL_DECODE_BUF_SIZE * _pass_itm_data->level_type_size); - p.lvl_decode_buf[gpu::level_type::REPETITION] = buf; + p.lvl_decode_buf[level_type::REPETITION] = buf; buf += (LEVEL_DECODE_BUF_SIZE * _pass_itm_data->level_type_size); } } @@ -824,25 +821,25 @@ void reader::impl::load_global_chunk_info() schema.converted_type, schema.type_length); - chunks.push_back(gpu::ColumnChunkDesc(col_meta.total_compressed_size, - nullptr, - col_meta.num_values, - schema.type, - type_width, - row_group_start, - row_group_rows, - schema.max_definition_level, - schema.max_repetition_level, - _metadata->get_output_nesting_depth(col.schema_idx), - required_bits(schema.max_definition_level), - required_bits(schema.max_repetition_level), - col_meta.codec, - converted_type, - schema.logical_type, - schema.decimal_precision, - clock_rate, - i, - col.schema_idx)); + chunks.push_back(ColumnChunkDesc(col_meta.total_compressed_size, + nullptr, + col_meta.num_values, + schema.type, + type_width, + row_group_start, + row_group_rows, + schema.max_definition_level, + schema.max_repetition_level, + _metadata->get_output_nesting_depth(col.schema_idx), + required_bits(schema.max_definition_level), + required_bits(schema.max_repetition_level), + col_meta.codec, + converted_type, + schema.logical_type, + schema.decimal_precision, + clock_rate, + i, + col.schema_idx)); } remaining_rows -= row_group_rows; @@ -909,7 +906,7 @@ void reader::impl::compute_input_pass_row_group_info() void reader::impl::setup_pass() { // this will also cause the previous pass information to be deleted - _pass_itm_data = std::make_unique(); + _pass_itm_data = std::make_unique(); // setup row groups to be loaded for this pass auto const row_group_start = _input_pass_row_group_offsets[_current_input_pass]; @@ -929,8 +926,7 @@ void reader::impl::setup_pass() auto chunk_start = _file_itm_data.chunks.begin() + (row_group_start * chunks_per_rowgroup); auto chunk_end = _file_itm_data.chunks.begin() + (row_group_end * chunks_per_rowgroup); - _pass_itm_data->chunks = - cudf::detail::hostdevice_vector(num_chunks, _stream); + _pass_itm_data->chunks = cudf::detail::hostdevice_vector(num_chunks, _stream); std::copy(chunk_start, chunk_end, _pass_itm_data->chunks.begin()); // adjust skip_rows and num_rows by what's available in the row groups we are processing @@ -970,7 +966,7 @@ void reader::impl::load_and_decompress_data() // Process dataset chunk pages into output columns auto const total_pages = count_page_headers(chunks, _stream); if (total_pages <= 0) { return; } - pages = cudf::detail::hostdevice_vector(total_pages, total_pages, _stream); + pages = cudf::detail::hostdevice_vector(total_pages, total_pages, _stream); // decoding of column/page information _pass_itm_data->level_type_size = decode_page_headers(chunks, pages, _stream); @@ -978,7 +974,7 @@ void reader::impl::load_and_decompress_data() decomp_page_data = decompress_page_data(chunks, pages, _stream); // Free compressed data for (size_t c = 0; c < chunks.size(); c++) { - if (chunks[c].codec != parquet::Compression::UNCOMPRESSED) { raw_page_data[c].reset(); } + if (chunks[c].codec != Compression::UNCOMPRESSED) { raw_page_data[c].reset(); } } } @@ -1019,14 +1015,13 @@ struct cumulative_row_info { }; #if defined(PREPROCESS_DEBUG) -void print_pages(cudf::detail::hostdevice_vector& pages, - rmm::cuda_stream_view _stream) +void print_pages(cudf::detail::hostdevice_vector& pages, rmm::cuda_stream_view _stream) { pages.device_to_host_sync(_stream); for (size_t idx = 0; idx < pages.size(); idx++) { auto const& p = pages[idx]; // skip dictionary pages - if (p.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) { continue; } + if (p.flags & PAGEINFO_FLAGS_DICTIONARY) { continue; } printf( "P(%lu, s:%d): chunk_row(%d), num_rows(%d), skipped_values(%d), skipped_leaf_values(%d), " "str_bytes(%d)\n", @@ -1040,7 +1035,7 @@ void print_pages(cudf::detail::hostdevice_vector& pages, } } -void print_cumulative_page_info(cudf::detail::hostdevice_vector& pages, +void print_cumulative_page_info(cudf::detail::hostdevice_vector& pages, rmm::device_uvector const& page_index, rmm::device_uvector const& c_info, rmm::cuda_stream_view stream) @@ -1067,7 +1062,7 @@ void print_cumulative_page_info(cudf::detail::hostdevice_vector& printf("Schema %d\n", schemas[idx]); for (size_t pidx = 0; pidx < pages.size(); pidx++) { auto const& page = pages[h_page_index[pidx]]; - if (page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY || page.src_col_schema != schemas[idx]) { + if (page.flags & PAGEINFO_FLAGS_DICTIONARY || page.src_col_schema != schemas[idx]) { continue; } printf("\tP: {%lu, %lu}\n", h_cinfo[pidx].row_count, h_cinfo[pidx].size_bytes); @@ -1075,10 +1070,9 @@ void print_cumulative_page_info(cudf::detail::hostdevice_vector& } } -void print_cumulative_row_info( - host_span sizes, - std::string const& label, - std::optional> splits = std::nullopt) +void print_cumulative_row_info(host_span sizes, + std::string const& label, + std::optional> splits = std::nullopt) { if (splits.has_value()) { printf("------------\nSplits\n"); @@ -1093,7 +1087,7 @@ void print_cumulative_row_info( if (splits.has_value()) { // if we have a split at this row count and this is the last instance of this row count auto start = thrust::make_transform_iterator( - splits->begin(), [](gpu::chunk_read_info const& i) { return i.skip_rows; }); + splits->begin(), [](chunk_read_info const& i) { return i.skip_rows; }); auto end = start + splits->size(); auto split = std::find(start, end, sizes[idx].row_count); auto const split_index = [&]() -> int { @@ -1180,12 +1174,12 @@ __device__ size_t row_size_functor::operator()(size_t num_rows, boo * Sums across all nesting levels. */ struct get_cumulative_row_info { - gpu::PageInfo const* const pages; + PageInfo const* const pages; __device__ cumulative_row_info operator()(size_type index) { auto const& page = pages[index]; - if (page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) { + if (page.flags & PAGEINFO_FLAGS_DICTIONARY) { return cumulative_row_info{0, 0, page.src_col_schema}; } @@ -1250,15 +1244,15 @@ struct row_total_size { * @param num_rows Total number of rows to read * @param chunk_read_limit Limit on total number of bytes to be returned per read, for all columns */ -std::vector find_splits(std::vector const& sizes, - size_t num_rows, - size_t chunk_read_limit) +std::vector find_splits(std::vector const& sizes, + size_t num_rows, + size_t chunk_read_limit) { // now we have an array of {row_count, real output bytes}. just walk through it and generate // splits. // TODO: come up with a clever way to do this entirely in parallel. For now, as long as batch // sizes are reasonably large, this shouldn't iterate too many times - std::vector splits; + std::vector splits; { size_t cur_pos = 0; size_t cur_cumulative_size = 0; @@ -1290,7 +1284,7 @@ std::vector find_splits(std::vector c auto const start_row = cur_row_count; cur_row_count = sizes[split_pos].row_count; - splits.push_back(gpu::chunk_read_info{start_row, cur_row_count - start_row}); + splits.push_back(chunk_read_info{start_row, cur_row_count - start_row}); cur_pos = split_pos; cur_cumulative_size = sizes[split_pos].size_bytes; } @@ -1311,12 +1305,11 @@ std::vector find_splits(std::vector c * @param chunk_read_limit Limit on total number of bytes to be returned per read, for all columns * @param stream CUDA stream to use */ -std::vector compute_splits( - cudf::detail::hostdevice_vector& pages, - gpu::pass_intermediate_data const& id, - size_t num_rows, - size_t chunk_read_limit, - rmm::cuda_stream_view stream) +std::vector compute_splits(cudf::detail::hostdevice_vector& pages, + pass_intermediate_data const& id, + size_t num_rows, + size_t chunk_read_limit, + rmm::cuda_stream_view stream) { auto const& page_keys = id.page_keys; auto const& page_index = id.page_index; @@ -1395,16 +1388,16 @@ std::vector compute_splits( } struct get_page_chunk_idx { - __device__ size_type operator()(gpu::PageInfo const& page) { return page.chunk_idx; } + __device__ size_type operator()(PageInfo const& page) { return page.chunk_idx; } }; struct get_page_num_rows { - __device__ size_type operator()(gpu::PageInfo const& page) { return page.num_rows; } + __device__ size_type operator()(PageInfo const& page) { return page.num_rows; } }; struct get_page_column_index { - gpu::ColumnChunkDesc const* chunks; - __device__ size_type operator()(gpu::PageInfo const& page) + ColumnChunkDesc const* chunks; + __device__ size_type operator()(PageInfo const& page) { return chunks[page.chunk_idx].src_col_index; } @@ -1441,7 +1434,7 @@ struct get_page_nesting_size { input_col_info const* const input_cols; size_type const max_depth; size_t const num_pages; - gpu::PageInfo const* const pages; + PageInfo const* const pages; int const* page_indices; __device__ size_type operator()(size_t index) const @@ -1450,7 +1443,7 @@ struct get_page_nesting_size { auto const& page = pages[page_indices[indices.page_idx]]; if (page.src_col_schema != input_cols[indices.col_idx].schema_idx || - page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY || + page.flags & PAGEINFO_FLAGS_DICTIONARY || indices.depth_idx >= input_cols[indices.col_idx].nesting_depth) { return 0; } @@ -1468,7 +1461,7 @@ struct get_reduction_key { * @brief Writes to the chunk_row field of the PageInfo struct. */ struct chunk_row_output_iter { - gpu::PageInfo* p; + PageInfo* p; using value_type = size_type; using difference_type = size_type; using pointer = size_type*; @@ -1490,7 +1483,7 @@ struct chunk_row_output_iter { * @brief Writes to the page_start_value field of the PageNestingInfo struct, keyed by schema. */ struct start_offset_output_iterator { - gpu::PageInfo const* pages; + PageInfo const* pages; int const* page_indices; size_t cur_index; input_col_info const* input_cols; @@ -1529,9 +1522,9 @@ struct start_offset_output_iterator { { auto const indices = reduction_indices{index, max_depth, num_pages}; - gpu::PageInfo const& p = pages[page_indices[indices.page_idx]]; + PageInfo const& p = pages[page_indices[indices.page_idx]]; if (p.src_col_schema != input_cols[indices.col_idx].schema_idx || - p.flags & gpu::PAGEINFO_FLAGS_DICTIONARY || + p.flags & PAGEINFO_FLAGS_DICTIONARY || indices.depth_idx >= input_cols[indices.col_idx].nesting_depth) { return empty; } @@ -1540,15 +1533,15 @@ struct start_offset_output_iterator { }; struct flat_column_num_rows { - gpu::PageInfo const* pages; - gpu::ColumnChunkDesc const* chunks; + PageInfo const* pages; + ColumnChunkDesc const* chunks; __device__ size_type operator()(size_type pindex) const { - gpu::PageInfo const& page = pages[pindex]; + PageInfo const& page = pages[pindex]; // ignore dictionary pages and pages belonging to any column containing repetition (lists) - if ((page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) || - (chunks[page.chunk_idx].max_level[gpu::level_type::REPETITION] > 0)) { + if ((page.flags & PAGEINFO_FLAGS_DICTIONARY) || + (chunks[page.chunk_idx].max_level[level_type::REPETITION] > 0)) { return 0; } return page.num_rows; @@ -1581,8 +1574,8 @@ struct row_counts_different { * @param expected_row_count Expected row count, if applicable * @param stream CUDA stream used for device memory operations and kernel launches */ -void detect_malformed_pages(cudf::detail::hostdevice_vector& pages, - cudf::detail::hostdevice_vector const& chunks, +void detect_malformed_pages(cudf::detail::hostdevice_vector& pages, + cudf::detail::hostdevice_vector const& chunks, device_span page_keys, device_span page_index, std::optional expected_row_count, @@ -1631,23 +1624,21 @@ void detect_malformed_pages(cudf::detail::hostdevice_vector& page } struct page_to_string_size { - gpu::PageInfo* pages; - gpu::ColumnChunkDesc const* chunks; + PageInfo* pages; + ColumnChunkDesc const* chunks; __device__ size_t operator()(size_type page_idx) const { auto const page = pages[page_idx]; auto const chunk = chunks[page.chunk_idx]; - if (not is_string_col(chunk) || (page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) != 0) { - return 0; - } + if (not is_string_col(chunk) || (page.flags & PAGEINFO_FLAGS_DICTIONARY) != 0) { return 0; } return pages[page_idx].str_bytes; } }; struct page_offset_output_iter { - gpu::PageInfo* p; + PageInfo* p; size_type const* index; using value_type = size_type; @@ -1738,7 +1729,7 @@ void reader::impl::preprocess_pages(bool uses_custom_row_bounds, size_t chunk_re cols = &out_buf.children; // if this has a list parent, we have to get column sizes from the - // data computed during gpu::ComputePageSizes + // data computed during ComputePageSizes if (out_buf.user_data & PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT) { has_lists = true; break; @@ -1749,7 +1740,7 @@ void reader::impl::preprocess_pages(bool uses_custom_row_bounds, size_t chunk_re // generate string dict indices if necessary { - auto is_dict_chunk = [](gpu::ColumnChunkDesc const& chunk) { + auto is_dict_chunk = [](ColumnChunkDesc const& chunk) { return (chunk.data_type & 0x7) == BYTE_ARRAY && chunk.num_dict_pages > 0; }; @@ -1785,7 +1776,7 @@ void reader::impl::preprocess_pages(bool uses_custom_row_bounds, size_t chunk_re if (total_str_dict_indexes > 0) { chunks.host_to_device_async(_stream); - gpu::BuildStringDictionaryIndex(chunks.device_ptr(), chunks.size(), _stream); + BuildStringDictionaryIndex(chunks.device_ptr(), chunks.size(), _stream); } } @@ -1800,14 +1791,14 @@ void reader::impl::preprocess_pages(bool uses_custom_row_bounds, size_t chunk_re // if: // - user has passed custom row bounds // - we will be doing a chunked read - gpu::ComputePageSizes(pages, - chunks, - 0, // 0-max size_t. process all possible rows - std::numeric_limits::max(), - true, // compute num_rows - chunk_read_limit > 0, // compute string sizes - _pass_itm_data->level_type_size, - _stream); + ComputePageSizes(pages, + chunks, + 0, // 0-max size_t. process all possible rows + std::numeric_limits::max(), + true, // compute num_rows + chunk_read_limit > 0, // compute string sizes + _pass_itm_data->level_type_size, + _stream); // computes: // PageInfo::chunk_row (the absolute start row index) for all pages @@ -1836,7 +1827,7 @@ void reader::impl::preprocess_pages(bool uses_custom_row_bounds, size_t chunk_re _pass_itm_data->output_chunk_read_info = _output_chunk_read_limit > 0 ? compute_splits(pages, *_pass_itm_data, num_rows, chunk_read_limit, _stream) - : std::vector{{skip_rows, num_rows}}; + : std::vector{{skip_rows, num_rows}}; } void reader::impl::allocate_columns(size_t skip_rows, size_t num_rows, bool uses_custom_row_bounds) @@ -1853,14 +1844,14 @@ void reader::impl::allocate_columns(size_t skip_rows, size_t num_rows, bool uses // respect the user bounds. It is only necessary to do this second pass if uses_custom_row_bounds // is set (if the user has specified artificial bounds). if (uses_custom_row_bounds) { - gpu::ComputePageSizes(pages, - chunks, - skip_rows, - num_rows, - false, // num_rows is already computed - false, // no need to compute string sizes - _pass_itm_data->level_type_size, - _stream); + ComputePageSizes(pages, + chunks, + skip_rows, + num_rows, + false, // num_rows is already computed + false, // no need to compute string sizes + _pass_itm_data->level_type_size, + _stream); // print_pages(pages, _stream); } @@ -1879,7 +1870,7 @@ void reader::impl::allocate_columns(size_t skip_rows, size_t num_rows, bool uses cols = &out_buf.children; // if this has a list parent, we have to get column sizes from the - // data computed during gpu::ComputePageSizes + // data computed during ComputePageSizes if (out_buf.user_data & PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT) { has_lists = true; } @@ -2014,4 +2005,4 @@ std::vector reader::impl::calculate_page_string_offsets() return col_sizes; } -} // namespace cudf::io::detail::parquet +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/rle_stream.cuh b/cpp/src/io/parquet/rle_stream.cuh index 2545a074a38..799d6d9fd64 100644 --- a/cpp/src/io/parquet/rle_stream.cuh +++ b/cpp/src/io/parquet/rle_stream.cuh @@ -20,7 +20,7 @@ #include #include -namespace cudf::io::parquet::gpu { +namespace cudf::io::parquet::detail { template constexpr int rle_stream_required_run_buffer_size() @@ -362,4 +362,4 @@ struct rle_stream { } }; -} // namespace cudf::io::parquet::gpu +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/writer_impl.cu b/cpp/src/io/parquet/writer_impl.cu index a124f352ee4..50589f23626 100644 --- a/cpp/src/io/parquet/writer_impl.cu +++ b/cpp/src/io/parquet/writer_impl.cu @@ -54,12 +54,9 @@ #include #include -namespace cudf { -namespace io { -namespace detail { -namespace parquet { -using namespace cudf::io::parquet; -using namespace cudf::io; +namespace cudf::io::parquet::detail { + +using namespace cudf::io::detail; struct aggregate_writer_metadata { aggregate_writer_metadata(host_span partitions, @@ -185,13 +182,13 @@ namespace { * @param compression The compression type * @return The supported Parquet compression */ -parquet::Compression to_parquet_compression(compression_type compression) +Compression to_parquet_compression(compression_type compression) { switch (compression) { case compression_type::AUTO: - case compression_type::SNAPPY: return parquet::Compression::SNAPPY; - case compression_type::ZSTD: return parquet::Compression::ZSTD; - case compression_type::NONE: return parquet::Compression::UNCOMPRESSED; + case compression_type::SNAPPY: return Compression::SNAPPY; + case compression_type::ZSTD: return Compression::ZSTD; + case compression_type::NONE: return Compression::UNCOMPRESSED; default: CUDF_FAIL("Unsupported compression type"); } } @@ -206,7 +203,7 @@ void update_chunk_encodings(std::vector& encodings, uint32_t enc_mask) { for (uint8_t enc = 0; enc < static_cast(Encoding::NUM_ENCODINGS); enc++) { auto const enc_enum = static_cast(enc); - if ((enc_mask & gpu::encoding_to_mask(enc_enum)) != 0) { encodings.push_back(enc_enum); } + if ((enc_mask & encoding_to_mask(enc_enum)) != 0) { encodings.push_back(enc_enum); } } } @@ -761,11 +758,11 @@ struct parquet_column_view { std::vector const& schema_tree, rmm::cuda_stream_view stream); - [[nodiscard]] gpu::parquet_column_device_view get_device_view(rmm::cuda_stream_view stream) const; + [[nodiscard]] parquet_column_device_view get_device_view(rmm::cuda_stream_view stream) const; [[nodiscard]] column_view cudf_column_view() const { return cudf_col; } - [[nodiscard]] parquet::Type physical_type() const { return schema_node.type; } - [[nodiscard]] parquet::ConvertedType converted_type() const { return schema_node.converted_type; } + [[nodiscard]] Type physical_type() const { return schema_node.type; } + [[nodiscard]] ConvertedType converted_type() const { return schema_node.converted_type; } std::vector const& get_path_in_schema() { return path_in_schema; } @@ -846,11 +843,11 @@ parquet_column_view::parquet_column_view(schema_tree_node const& schema_node, uint16_t max_rep_level = 0; curr_schema_node = schema_node; while (curr_schema_node.parent_idx != -1) { - if (curr_schema_node.repetition_type == parquet::REPEATED or - curr_schema_node.repetition_type == parquet::OPTIONAL) { + if (curr_schema_node.repetition_type == REPEATED or + curr_schema_node.repetition_type == OPTIONAL) { ++max_def_level; } - if (curr_schema_node.repetition_type == parquet::REPEATED) { ++max_rep_level; } + if (curr_schema_node.repetition_type == REPEATED) { ++max_rep_level; } curr_schema_node = schema_tree[curr_schema_node.parent_idx]; } CUDF_EXPECTS(max_def_level < 256, "Definition levels above 255 are not supported"); @@ -897,9 +894,9 @@ parquet_column_view::parquet_column_view(schema_tree_node const& schema_node, } } -gpu::parquet_column_device_view parquet_column_view::get_device_view(rmm::cuda_stream_view) const +parquet_column_device_view parquet_column_view::get_device_view(rmm::cuda_stream_view) const { - auto desc = gpu::parquet_column_device_view{}; // Zero out all fields + auto desc = parquet_column_device_view{}; // Zero out all fields desc.stats_dtype = schema_node.stats_dtype; desc.ts_scale = schema_node.ts_scale; @@ -931,8 +928,8 @@ gpu::parquet_column_device_view parquet_column_view::get_device_view(rmm::cuda_s * @param fragment_size Number of rows per fragment * @param stream CUDA stream used for device memory operations and kernel launches */ -void init_row_group_fragments(cudf::detail::hostdevice_2dvector& frag, - device_span col_desc, +void init_row_group_fragments(cudf::detail::hostdevice_2dvector& frag, + device_span col_desc, host_span partitions, device_span part_frag_offset, uint32_t fragment_size, @@ -940,7 +937,7 @@ void init_row_group_fragments(cudf::detail::hostdevice_2dvector frag, +void calculate_page_fragments(device_span frag, host_span frag_sizes, rmm::cuda_stream_view stream) { auto d_frag_sz = cudf::detail::make_device_uvector_async( frag_sizes, stream, rmm::mr::get_current_device_resource()); - gpu::CalculatePageFragments(frag, d_frag_sz, stream); + CalculatePageFragments(frag, d_frag_sz, stream); } /** @@ -972,13 +969,13 @@ void calculate_page_fragments(device_span frag, * @param stream CUDA stream used for device memory operations and kernel launches */ void gather_fragment_statistics(device_span frag_stats, - device_span frags, + device_span frags, bool int96_timestamps, rmm::cuda_stream_view stream) { rmm::device_uvector frag_stats_group(frag_stats.size(), stream); - gpu::InitFragmentStatistics(frag_stats_group, frags, stream); + InitFragmentStatistics(frag_stats_group, frags, stream); detail::calculate_group_statistics( frag_stats.data(), frag_stats_group.data(), frag_stats.size(), stream, int96_timestamps); stream.synchronize(); @@ -1008,8 +1005,8 @@ size_t max_compression_output_size(Compression codec, uint32_t compression_block return compress_max_output_chunk_size(to_nvcomp_compression_type(codec), compression_blocksize); } -auto init_page_sizes(hostdevice_2dvector& chunks, - device_span col_desc, +auto init_page_sizes(hostdevice_2dvector& chunks, + device_span col_desc, uint32_t num_columns, size_t max_page_size_bytes, size_type max_page_size_rows, @@ -1021,19 +1018,19 @@ auto init_page_sizes(hostdevice_2dvector& chunks, chunks.host_to_device_async(stream); // Calculate number of pages and store in respective chunks - gpu::InitEncoderPages(chunks, - {}, - {}, - {}, - col_desc, - num_columns, - max_page_size_bytes, - max_page_size_rows, - page_alignment(compression_codec), - write_v2_headers, - nullptr, - nullptr, - stream); + InitEncoderPages(chunks, + {}, + {}, + {}, + col_desc, + num_columns, + max_page_size_bytes, + max_page_size_rows, + page_alignment(compression_codec), + write_v2_headers, + nullptr, + nullptr, + stream); chunks.device_to_host_sync(stream); int num_pages = 0; @@ -1046,19 +1043,19 @@ auto init_page_sizes(hostdevice_2dvector& chunks, // Now that we know the number of pages, allocate an array to hold per page size and get it // populated cudf::detail::hostdevice_vector page_sizes(num_pages, stream); - gpu::InitEncoderPages(chunks, - {}, - page_sizes, - {}, - col_desc, - num_columns, - max_page_size_bytes, - max_page_size_rows, - page_alignment(compression_codec), - write_v2_headers, - nullptr, - nullptr, - stream); + InitEncoderPages(chunks, + {}, + page_sizes, + {}, + col_desc, + num_columns, + max_page_size_bytes, + max_page_size_rows, + page_alignment(compression_codec), + write_v2_headers, + nullptr, + nullptr, + stream); page_sizes.device_to_host_sync(stream); // Get per-page max compressed size @@ -1072,26 +1069,26 @@ auto init_page_sizes(hostdevice_2dvector& chunks, comp_page_sizes.host_to_device_async(stream); // Use per-page max compressed size to calculate chunk.compressed_size - gpu::InitEncoderPages(chunks, - {}, - {}, - comp_page_sizes, - col_desc, - num_columns, - max_page_size_bytes, - max_page_size_rows, - page_alignment(compression_codec), - write_v2_headers, - nullptr, - nullptr, - stream); + InitEncoderPages(chunks, + {}, + {}, + comp_page_sizes, + col_desc, + num_columns, + max_page_size_bytes, + max_page_size_rows, + page_alignment(compression_codec), + write_v2_headers, + nullptr, + nullptr, + stream); chunks.device_to_host_sync(stream); return comp_page_sizes; } size_t max_page_bytes(Compression compression, size_t max_page_size_bytes) { - if (compression == parquet::Compression::UNCOMPRESSED) { return max_page_size_bytes; } + if (compression == Compression::UNCOMPRESSED) { return max_page_size_bytes; } auto const ncomp_type = to_nvcomp_compression_type(compression); auto const nvcomp_limit = nvcomp::is_compression_disabled(ncomp_type) @@ -1104,9 +1101,9 @@ size_t max_page_bytes(Compression compression, size_t max_page_size_bytes) } std::pair>, std::vector>> -build_chunk_dictionaries(hostdevice_2dvector& chunks, - host_span col_desc, - device_2dspan frags, +build_chunk_dictionaries(hostdevice_2dvector& chunks, + host_span col_desc, + device_2dspan frags, Compression compression, dictionary_policy dict_policy, size_t max_dict_size, @@ -1130,7 +1127,7 @@ build_chunk_dictionaries(hostdevice_2dvector& chunks, } // Allocate slots for each chunk - std::vector> hash_maps_storage; + std::vector> hash_maps_storage; hash_maps_storage.reserve(h_chunks.size()); for (auto& chunk : h_chunks) { if (col_desc[chunk.col_desc_id].physical_type == Type::BOOLEAN || @@ -1149,8 +1146,8 @@ build_chunk_dictionaries(hostdevice_2dvector& chunks, chunks.host_to_device_async(stream); - gpu::initialize_chunk_hash_maps(chunks.device_view().flat_view(), stream); - gpu::populate_chunk_hash_maps(frags, stream); + initialize_chunk_hash_maps(chunks.device_view().flat_view(), stream); + populate_chunk_hash_maps(frags, stream); chunks.device_to_host_sync(stream); @@ -1197,8 +1194,8 @@ build_chunk_dictionaries(hostdevice_2dvector& chunks, chunk.dict_index = inserted_dict_index.data(); } chunks.host_to_device_async(stream); - gpu::collect_map_entries(chunks.device_view().flat_view(), stream); - gpu::get_dictionary_indices(frags, stream); + collect_map_entries(chunks.device_view().flat_view(), stream); + get_dictionary_indices(frags, stream); return std::pair(std::move(dict_data), std::move(dict_index)); } @@ -1221,9 +1218,9 @@ build_chunk_dictionaries(hostdevice_2dvector& chunks, * @param write_v2_headers True if version 2 page headers are to be written * @param stream CUDA stream used for device memory operations and kernel launches */ -void init_encoder_pages(hostdevice_2dvector& chunks, - device_span col_desc, - device_span pages, +void init_encoder_pages(hostdevice_2dvector& chunks, + device_span col_desc, + device_span pages, cudf::detail::hostdevice_vector& comp_page_sizes, statistics_chunk* page_stats, statistics_chunk* frag_stats, @@ -1286,8 +1283,8 @@ void init_encoder_pages(hostdevice_2dvector& chunks, * @param write_v2_headers True if V2 page headers should be written * @param stream CUDA stream used for device memory operations and kernel launches */ -void encode_pages(hostdevice_2dvector& chunks, - device_span pages, +void encode_pages(hostdevice_2dvector& chunks, + device_span pages, uint32_t pages_in_batch, uint32_t first_page_in_batch, uint32_t rowgroups_in_batch, @@ -1308,8 +1305,7 @@ void encode_pages(hostdevice_2dvector& chunks, ? device_span(page_stats + first_page_in_batch, pages_in_batch) : device_span(); - uint32_t max_comp_pages = - (compression != parquet::Compression::UNCOMPRESSED) ? pages_in_batch : 0; + uint32_t max_comp_pages = (compression != Compression::UNCOMPRESSED) ? pages_in_batch : 0; rmm::device_uvector> comp_in(max_comp_pages, stream); rmm::device_uvector> comp_out(max_comp_pages, stream); @@ -1319,9 +1315,9 @@ void encode_pages(hostdevice_2dvector& chunks, comp_res.end(), compression_result{0, compression_status::FAILURE}); - gpu::EncodePages(batch_pages, write_v2_headers, comp_in, comp_out, comp_res, stream); + EncodePages(batch_pages, write_v2_headers, comp_in, comp_out, comp_res, stream); switch (compression) { - case parquet::Compression::SNAPPY: + case Compression::SNAPPY: if (nvcomp::is_compression_disabled(nvcomp::compression_type::SNAPPY)) { gpu_snap(comp_in, comp_out, comp_res, stream); } else { @@ -1329,7 +1325,7 @@ void encode_pages(hostdevice_2dvector& chunks, nvcomp::compression_type::SNAPPY, comp_in, comp_out, comp_res, stream); } break; - case parquet::Compression::ZSTD: { + case Compression::ZSTD: { if (auto const reason = nvcomp::is_compression_disabled(nvcomp::compression_type::ZSTD); reason) { CUDF_FAIL("Compression error: " + reason.value()); @@ -1338,7 +1334,7 @@ void encode_pages(hostdevice_2dvector& chunks, break; } - case parquet::Compression::UNCOMPRESSED: break; + case Compression::UNCOMPRESSED: break; default: CUDF_FAIL("invalid compression type"); } @@ -1378,7 +1374,7 @@ void encode_pages(hostdevice_2dvector& chunks, * @param column_index_truncate_length maximum length of min or max values in column index, in bytes * @return Computed buffer size needed to encode the column index */ -size_t column_index_buffer_size(gpu::EncColumnChunk* ck, int32_t column_index_truncate_length) +size_t column_index_buffer_size(EncColumnChunk* ck, int32_t column_index_truncate_length) { // encoding the column index for a given chunk requires: // each list (4 of them) requires 6 bytes of overhead @@ -1499,8 +1495,8 @@ auto convert_table_to_parquet_data(table_input_metadata& table_meta, std::vector this_table_schema(schema_tree.begin(), schema_tree.end()); // Initialize column description - cudf::detail::hostdevice_vector col_desc(parquet_columns.size(), - stream); + cudf::detail::hostdevice_vector col_desc(parquet_columns.size(), + stream); std::transform( parquet_columns.begin(), parquet_columns.end(), col_desc.host_ptr(), [&](auto const& pcol) { return pcol.get_device_view(stream); @@ -1576,7 +1572,7 @@ auto convert_table_to_parquet_data(table_input_metadata& table_meta, auto d_part_frag_offset = cudf::detail::make_device_uvector_async( part_frag_offset, stream, rmm::mr::get_current_device_resource()); - cudf::detail::hostdevice_2dvector row_group_fragments( + cudf::detail::hostdevice_2dvector row_group_fragments( num_columns, num_fragments, stream); // Create table_device_view so that corresponding column_device_view data @@ -1588,7 +1584,7 @@ auto convert_table_to_parquet_data(table_input_metadata& table_meta, if (num_fragments != 0) { // Move column info to device col_desc.host_to_device_async(stream); - leaf_column_views = create_leaf_column_device_views( + leaf_column_views = create_leaf_column_device_views( col_desc, *parent_column_table_device_view, stream); init_row_group_fragments(row_group_fragments, @@ -1662,7 +1658,7 @@ auto convert_table_to_parquet_data(table_input_metadata& table_meta, // Initialize row groups and column chunks auto const num_chunks = num_rowgroups * num_columns; - hostdevice_2dvector chunks(num_rowgroups, num_columns, stream); + hostdevice_2dvector chunks(num_rowgroups, num_columns, stream); // total fragments per column (in case they are non-uniform) std::vector frags_per_column(num_columns, 0); @@ -1678,7 +1674,7 @@ auto convert_table_to_parquet_data(table_input_metadata& table_meta, row_group.total_byte_size = 0; row_group.columns.resize(num_columns); for (int c = 0; c < num_columns; c++) { - gpu::EncColumnChunk& ck = chunks[r + first_rg_in_part[p]][c]; + EncColumnChunk& ck = chunks[r + first_rg_in_part[p]][c]; ck = {}; ck.col_desc = col_desc.device_ptr() + c; @@ -1700,7 +1696,7 @@ auto convert_table_to_parquet_data(table_input_metadata& table_meta, return l + r.num_values; }); ck.plain_data_size = std::accumulate( - chunk_fragments.begin(), chunk_fragments.end(), 0, [](int sum, gpu::PageFragment frag) { + chunk_fragments.begin(), chunk_fragments.end(), 0, [](int sum, PageFragment frag) { return sum + frag.fragment_data_size; }); auto& column_chunk_meta = row_group.columns[c].meta_data; @@ -1731,7 +1727,7 @@ auto convert_table_to_parquet_data(table_input_metadata& table_meta, frags_per_column.empty() ? 0 : frag_offsets.back() + frags_per_column.back(); rmm::device_uvector frag_stats(0, stream); - cudf::detail::hostdevice_vector page_fragments(total_frags, stream); + cudf::detail::hostdevice_vector page_fragments(total_frags, stream); // update fragments and/or prepare for fragment statistics calculation if necessary if (total_frags != 0) { @@ -1749,9 +1745,9 @@ auto convert_table_to_parquet_data(table_input_metadata& table_meta, auto const& row_group = agg_meta->file(p).row_groups[global_r]; uint32_t const fragments_in_chunk = util::div_rounding_up_unsafe(row_group.num_rows, frag_size); - gpu::EncColumnChunk& ck = chunks[r + first_rg_in_part[p]][c]; - ck.fragments = page_fragments.device_ptr(frag_offset); - ck.first_fragment = frag_offset; + EncColumnChunk& ck = chunks[r + first_rg_in_part[p]][c]; + ck.fragments = page_fragments.device_ptr(frag_offset); + ck.first_fragment = frag_offset; // update the chunk pointer here for each fragment in chunk.fragments for (uint32_t i = 0; i < fragments_in_chunk; i++) { @@ -1817,8 +1813,8 @@ auto convert_table_to_parquet_data(table_input_metadata& table_meta, size_t comp_rowgroup_size = 0; if (r < num_rowgroups) { for (int i = 0; i < num_columns; i++) { - gpu::EncColumnChunk* ck = &chunks[r][i]; - ck->first_page = num_pages; + EncColumnChunk* ck = &chunks[r][i]; + ck->first_page = num_pages; num_pages += ck->num_pages; pages_in_batch += ck->num_pages; rowgroup_size += ck->bfr_size; @@ -1850,7 +1846,7 @@ auto convert_table_to_parquet_data(table_input_metadata& table_meta, } // Clear compressed buffer size if compression has been turned off - if (compression == parquet::Compression::UNCOMPRESSED) { max_comp_bfr_size = 0; } + if (compression == Compression::UNCOMPRESSED) { max_comp_bfr_size = 0; } // Initialize data pointers in batch uint32_t const num_stats_bfr = @@ -1864,7 +1860,7 @@ auto convert_table_to_parquet_data(table_input_metadata& table_meta, stream); rmm::device_buffer col_idx_bfr(column_index_bfr_size, stream); - rmm::device_uvector pages(num_pages, stream); + rmm::device_uvector pages(num_pages, stream); // This contains stats for both the pages and the rowgroups. TODO: make them separate. rmm::device_uvector page_stats(num_stats_bfr, stream); @@ -1874,10 +1870,10 @@ auto convert_table_to_parquet_data(table_input_metadata& table_meta, auto bfr_c = static_cast(comp_bfr.data()); for (auto j = 0; j < batch_list[b]; j++, r++) { for (auto i = 0; i < num_columns; i++) { - gpu::EncColumnChunk& ck = chunks[r][i]; - ck.uncompressed_bfr = bfr; - ck.compressed_bfr = bfr_c; - ck.column_index_blob = bfr_i; + EncColumnChunk& ck = chunks[r][i]; + ck.uncompressed_bfr = bfr; + ck.compressed_bfr = bfr_c; + ck.column_index_blob = bfr_i; bfr += ck.bfr_size; bfr_c += ck.compressed_size; if (stats_granularity == statistics_freq::STATISTICS_COLUMN) { @@ -1960,7 +1956,7 @@ auto convert_table_to_parquet_data(table_input_metadata& table_meta, if (ck.ck_stat_size != 0) { std::vector const stats_blob = cudf::detail::make_std_vector_sync( device_span(dev_bfr, ck.ck_stat_size), stream); - cudf::io::parquet::CompactProtocolReader cp(stats_blob.data(), stats_blob.size()); + CompactProtocolReader cp(stats_blob.data(), stats_blob.size()); cp.read(&column_chunk_meta.statistics); need_sync = true; } @@ -2142,8 +2138,8 @@ void writer::impl::write(table_view const& input, std::vector co void writer::impl::write_parquet_data_to_sink( std::unique_ptr& updated_agg_meta, - device_span pages, - host_2dspan chunks, + device_span pages, + host_2dspan chunks, host_span global_rowgroup_base, host_span first_rg_in_part, host_span batch_list, @@ -2209,7 +2205,7 @@ void writer::impl::write_parquet_data_to_sink( int const global_r = global_rowgroup_base[p] + r - first_rg_in_part[p]; auto const& row_group = _agg_meta->file(p).row_groups[global_r]; for (std::size_t i = 0; i < num_columns; i++) { - gpu::EncColumnChunk const& ck = chunks[r][i]; + EncColumnChunk const& ck = chunks[r][i]; auto const& column_chunk_meta = row_group.columns[i].meta_data; // start transfer of the column index @@ -2392,7 +2388,4 @@ std::unique_ptr> writer::merge_row_group_metadata( return std::make_unique>(std::move(output)); } -} // namespace parquet -} // namespace detail -} // namespace io -} // namespace cudf +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/writer_impl.hpp b/cpp/src/io/parquet/writer_impl.hpp index 89ef85ba2bd..1d27a8400c8 100644 --- a/cpp/src/io/parquet/writer_impl.hpp +++ b/cpp/src/io/parquet/writer_impl.hpp @@ -38,15 +38,11 @@ #include #include -namespace cudf { -namespace io { -namespace detail { -namespace parquet { +namespace cudf::io::parquet::detail { + // Forward internal classes struct aggregate_writer_metadata; -using namespace cudf::io::parquet; -using namespace cudf::io; using cudf::detail::device_2dspan; using cudf::detail::host_2dspan; using cudf::detail::hostdevice_2dvector; @@ -66,7 +62,7 @@ class writer::impl { */ explicit impl(std::vector> sinks, parquet_writer_options const& options, - single_write_mode mode, + cudf::io::detail::single_write_mode mode, rmm::cuda_stream_view stream); /** @@ -79,7 +75,7 @@ class writer::impl { */ explicit impl(std::vector> sinks, chunked_parquet_writer_options const& options, - single_write_mode mode, + cudf::io::detail::single_write_mode mode, rmm::cuda_stream_view stream); /** @@ -139,8 +135,8 @@ class writer::impl { * @param[out] bounce_buffer Temporary host output buffer */ void write_parquet_data_to_sink(std::unique_ptr& updated_agg_meta, - device_span pages, - host_2dspan chunks, + device_span pages, + host_2dspan chunks, host_span global_rowgroup_base, host_span first_rg_in_part, host_span batch_list, @@ -164,9 +160,10 @@ class writer::impl { bool const _write_v2_headers; int32_t const _column_index_truncate_length; std::vector> const _kv_meta; // Optional user metadata. - single_write_mode const _single_write_mode; // Special parameter only used by `write()` to - // indicate that we are guaranteeing a single table - // write. This enables some internal optimizations. + cudf::io::detail::single_write_mode const + _single_write_mode; // Special parameter only used by `write()` to + // indicate that we are guaranteeing a single table + // write. This enables some internal optimizations. std::vector> const _out_sink; // Internal states, filled during `write()` and written to sink during `write` and `close()`. @@ -180,7 +177,4 @@ class writer::impl { bool _closed = false; // To track if the output has been written to sink. }; -} // namespace parquet -} // namespace detail -} // namespace io -} // namespace cudf +} // namespace cudf::io::parquet::detail diff --git a/cpp/tests/io/parquet_test.cpp b/cpp/tests/io/parquet_test.cpp index 73c946a5feb..3e5d7033e60 100644 --- a/cpp/tests/io/parquet_test.cpp +++ b/cpp/tests/io/parquet_test.cpp @@ -200,29 +200,30 @@ std::unique_ptr make_parquet_list_list_col( // of the file to populate the FileMetaData pointed to by file_meta_data. // throws cudf::logic_error if the file or metadata is invalid. void read_footer(std::unique_ptr const& source, - cudf::io::parquet::FileMetaData* file_meta_data) + cudf::io::parquet::detail::FileMetaData* file_meta_data) { - constexpr auto header_len = sizeof(cudf::io::parquet::file_header_s); - constexpr auto ender_len = sizeof(cudf::io::parquet::file_ender_s); + constexpr auto header_len = sizeof(cudf::io::parquet::detail::file_header_s); + constexpr auto ender_len = sizeof(cudf::io::parquet::detail::file_ender_s); auto const len = source->size(); auto const header_buffer = source->host_read(0, header_len); auto const header = - reinterpret_cast(header_buffer->data()); + reinterpret_cast(header_buffer->data()); auto const ender_buffer = source->host_read(len - ender_len, ender_len); - auto const ender = reinterpret_cast(ender_buffer->data()); + auto const ender = + reinterpret_cast(ender_buffer->data()); // checks for valid header, footer, and file length ASSERT_GT(len, header_len + ender_len); - ASSERT_TRUE(header->magic == cudf::io::parquet::parquet_magic && - ender->magic == cudf::io::parquet::parquet_magic); + ASSERT_TRUE(header->magic == cudf::io::parquet::detail::parquet_magic && + ender->magic == cudf::io::parquet::detail::parquet_magic); ASSERT_TRUE(ender->footer_len != 0 && ender->footer_len <= (len - header_len - ender_len)); // parquet files end with 4-byte footer_length and 4-byte magic == "PAR1" // seek backwards from the end of the file (footer_length + 8 bytes of ender) auto const footer_buffer = source->host_read(len - ender->footer_len - ender_len, ender->footer_len); - cudf::io::parquet::CompactProtocolReader cp(footer_buffer->data(), ender->footer_len); + cudf::io::parquet::detail::CompactProtocolReader cp(footer_buffer->data(), ender->footer_len); // returns true on success bool res = cp.read(file_meta_data); @@ -233,14 +234,14 @@ void read_footer(std::unique_ptr const& source, // this assumes the data is uncompressed. // throws cudf::logic_error if the page_loc data is invalid. int read_dict_bits(std::unique_ptr const& source, - cudf::io::parquet::PageLocation const& page_loc) + cudf::io::parquet::detail::PageLocation const& page_loc) { CUDF_EXPECTS(page_loc.offset > 0, "Cannot find page header"); CUDF_EXPECTS(page_loc.compressed_page_size > 0, "Invalid page header length"); - cudf::io::parquet::PageHeader page_hdr; + cudf::io::parquet::detail::PageHeader page_hdr; auto const page_buf = source->host_read(page_loc.offset, page_loc.compressed_page_size); - cudf::io::parquet::CompactProtocolReader cp(page_buf->data(), page_buf->size()); + cudf::io::parquet::detail::CompactProtocolReader cp(page_buf->data(), page_buf->size()); bool res = cp.read(&page_hdr); CUDF_EXPECTS(res, "Cannot parse page header"); @@ -252,15 +253,16 @@ int read_dict_bits(std::unique_ptr const& source, // read column index from datasource at location indicated by chunk, // parse and return as a ColumnIndex struct. // throws cudf::logic_error if the chunk data is invalid. -cudf::io::parquet::ColumnIndex read_column_index( - std::unique_ptr const& source, cudf::io::parquet::ColumnChunk const& chunk) +cudf::io::parquet::detail::ColumnIndex read_column_index( + std::unique_ptr const& source, + cudf::io::parquet::detail::ColumnChunk const& chunk) { CUDF_EXPECTS(chunk.column_index_offset > 0, "Cannot find column index"); CUDF_EXPECTS(chunk.column_index_length > 0, "Invalid column index length"); - cudf::io::parquet::ColumnIndex colidx; + cudf::io::parquet::detail::ColumnIndex colidx; auto const ci_buf = source->host_read(chunk.column_index_offset, chunk.column_index_length); - cudf::io::parquet::CompactProtocolReader cp(ci_buf->data(), ci_buf->size()); + cudf::io::parquet::detail::CompactProtocolReader cp(ci_buf->data(), ci_buf->size()); bool res = cp.read(&colidx); CUDF_EXPECTS(res, "Cannot parse column index"); return colidx; @@ -269,22 +271,24 @@ cudf::io::parquet::ColumnIndex read_column_index( // read offset index from datasource at location indicated by chunk, // parse and return as an OffsetIndex struct. // throws cudf::logic_error if the chunk data is invalid. -cudf::io::parquet::OffsetIndex read_offset_index( - std::unique_ptr const& source, cudf::io::parquet::ColumnChunk const& chunk) +cudf::io::parquet::detail::OffsetIndex read_offset_index( + std::unique_ptr const& source, + cudf::io::parquet::detail::ColumnChunk const& chunk) { CUDF_EXPECTS(chunk.offset_index_offset > 0, "Cannot find offset index"); CUDF_EXPECTS(chunk.offset_index_length > 0, "Invalid offset index length"); - cudf::io::parquet::OffsetIndex offidx; + cudf::io::parquet::detail::OffsetIndex offidx; auto const oi_buf = source->host_read(chunk.offset_index_offset, chunk.offset_index_length); - cudf::io::parquet::CompactProtocolReader cp(oi_buf->data(), oi_buf->size()); + cudf::io::parquet::detail::CompactProtocolReader cp(oi_buf->data(), oi_buf->size()); bool res = cp.read(&offidx); CUDF_EXPECTS(res, "Cannot parse offset index"); return offidx; } // Return as a Statistics from the column chunk -cudf::io::parquet::Statistics const& get_statistics(cudf::io::parquet::ColumnChunk const& chunk) +cudf::io::parquet::detail::Statistics const& get_statistics( + cudf::io::parquet::detail::ColumnChunk const& chunk) { return chunk.meta_data.statistics; } @@ -292,15 +296,16 @@ cudf::io::parquet::Statistics const& get_statistics(cudf::io::parquet::ColumnChu // read page header from datasource at location indicated by page_loc, // parse and return as a PageHeader struct. // throws cudf::logic_error if the page_loc data is invalid. -cudf::io::parquet::PageHeader read_page_header(std::unique_ptr const& source, - cudf::io::parquet::PageLocation const& page_loc) +cudf::io::parquet::detail::PageHeader read_page_header( + std::unique_ptr const& source, + cudf::io::parquet::detail::PageLocation const& page_loc) { CUDF_EXPECTS(page_loc.offset > 0, "Cannot find page header"); CUDF_EXPECTS(page_loc.compressed_page_size > 0, "Invalid page header length"); - cudf::io::parquet::PageHeader page_hdr; + cudf::io::parquet::detail::PageHeader page_hdr; auto const page_buf = source->host_read(page_loc.offset, page_loc.compressed_page_size); - cudf::io::parquet::CompactProtocolReader cp(page_buf->data(), page_buf->size()); + cudf::io::parquet::detail::CompactProtocolReader cp(page_buf->data(), page_buf->size()); bool res = cp.read(&page_hdr); CUDF_EXPECTS(res, "Cannot parse page header"); return page_hdr; @@ -3686,7 +3691,7 @@ TEST_F(ParquetWriterTest, CheckPageRows) // check first page header and make sure it has only page_rows values auto const source = cudf::io::datasource::create(filepath); - cudf::io::parquet::FileMetaData fmd; + cudf::io::parquet::detail::FileMetaData fmd; read_footer(source, &fmd); ASSERT_GT(fmd.row_groups.size(), 0); @@ -3697,7 +3702,7 @@ TEST_F(ParquetWriterTest, CheckPageRows) // read first data page header. sizeof(PageHeader) is not exact, but the thrift encoded // version should be smaller than size of the struct. auto const ph = read_page_header( - source, {first_chunk.data_page_offset, sizeof(cudf::io::parquet::PageHeader), 0}); + source, {first_chunk.data_page_offset, sizeof(cudf::io::parquet::detail::PageHeader), 0}); EXPECT_EQ(ph.data_page_header.num_values, page_rows); } @@ -3722,7 +3727,7 @@ TEST_F(ParquetWriterTest, CheckPageRowsAdjusted) // check first page header and make sure it has only page_rows values auto const source = cudf::io::datasource::create(filepath); - cudf::io::parquet::FileMetaData fmd; + cudf::io::parquet::detail::FileMetaData fmd; read_footer(source, &fmd); ASSERT_GT(fmd.row_groups.size(), 0); @@ -3733,7 +3738,7 @@ TEST_F(ParquetWriterTest, CheckPageRowsAdjusted) // read first data page header. sizeof(PageHeader) is not exact, but the thrift encoded // version should be smaller than size of the struct. auto const ph = read_page_header( - source, {first_chunk.data_page_offset, sizeof(cudf::io::parquet::PageHeader), 0}); + source, {first_chunk.data_page_offset, sizeof(cudf::io::parquet::detail::PageHeader), 0}); EXPECT_LE(ph.data_page_header.num_values, rows_per_page); } @@ -3759,7 +3764,7 @@ TEST_F(ParquetWriterTest, CheckPageRowsTooSmall) // check that file is written correctly when rows/page < fragment size auto const source = cudf::io::datasource::create(filepath); - cudf::io::parquet::FileMetaData fmd; + cudf::io::parquet::detail::FileMetaData fmd; read_footer(source, &fmd); ASSERT_TRUE(fmd.row_groups.size() > 0); @@ -3770,7 +3775,7 @@ TEST_F(ParquetWriterTest, CheckPageRowsTooSmall) // read first data page header. sizeof(PageHeader) is not exact, but the thrift encoded // version should be smaller than size of the struct. auto const ph = read_page_header( - source, {first_chunk.data_page_offset, sizeof(cudf::io::parquet::PageHeader), 0}); + source, {first_chunk.data_page_offset, sizeof(cudf::io::parquet::detail::PageHeader), 0}); // there should be only one page since the fragment size is larger than rows_per_page EXPECT_EQ(ph.data_page_header.num_values, num_rows); @@ -3798,7 +3803,7 @@ TEST_F(ParquetWriterTest, Decimal128Stats) cudf::io::write_parquet(out_opts); auto const source = cudf::io::datasource::create(filepath); - cudf::io::parquet::FileMetaData fmd; + cudf::io::parquet::detail::FileMetaData fmd; read_footer(source, &fmd); @@ -4031,7 +4036,7 @@ TYPED_TEST(ParquetWriterComparableTypeTest, ThreeColumnSorted) cudf::io::write_parquet(out_opts); auto const source = cudf::io::datasource::create(filepath); - cudf::io::parquet::FileMetaData fmd; + cudf::io::parquet::detail::FileMetaData fmd; read_footer(source, &fmd); ASSERT_GT(fmd.row_groups.size(), 0); @@ -4041,10 +4046,10 @@ TYPED_TEST(ParquetWriterComparableTypeTest, ThreeColumnSorted) // now check that the boundary order for chunk 1 is ascending, // chunk 2 is descending, and chunk 3 is unordered - cudf::io::parquet::BoundaryOrder expected_orders[] = { - cudf::io::parquet::BoundaryOrder::ASCENDING, - cudf::io::parquet::BoundaryOrder::DESCENDING, - cudf::io::parquet::BoundaryOrder::UNORDERED}; + cudf::io::parquet::detail::BoundaryOrder expected_orders[] = { + cudf::io::parquet::detail::BoundaryOrder::ASCENDING, + cudf::io::parquet::detail::BoundaryOrder::DESCENDING, + cudf::io::parquet::detail::BoundaryOrder::UNORDERED}; for (std::size_t i = 0; i < columns.size(); i++) { auto const ci = read_column_index(source, columns[i]); @@ -4067,15 +4072,15 @@ int32_t compare(T& v1, T& v2) // 1 if v1 > v2. int32_t compare_binary(std::vector const& v1, std::vector const& v2, - cudf::io::parquet::Type ptype, - cudf::io::parquet::ConvertedType ctype) + cudf::io::parquet::detail::Type ptype, + cudf::io::parquet::detail::ConvertedType ctype) { switch (ptype) { - case cudf::io::parquet::INT32: + case cudf::io::parquet::detail::INT32: switch (ctype) { - case cudf::io::parquet::UINT_8: - case cudf::io::parquet::UINT_16: - case cudf::io::parquet::UINT_32: + case cudf::io::parquet::detail::UINT_8: + case cudf::io::parquet::detail::UINT_16: + case cudf::io::parquet::detail::UINT_32: return compare(*(reinterpret_cast(v1.data())), *(reinterpret_cast(v2.data()))); default: @@ -4083,23 +4088,23 @@ int32_t compare_binary(std::vector const& v1, *(reinterpret_cast(v2.data()))); } - case cudf::io::parquet::INT64: - if (ctype == cudf::io::parquet::UINT_64) { + case cudf::io::parquet::detail::INT64: + if (ctype == cudf::io::parquet::detail::UINT_64) { return compare(*(reinterpret_cast(v1.data())), *(reinterpret_cast(v2.data()))); } return compare(*(reinterpret_cast(v1.data())), *(reinterpret_cast(v2.data()))); - case cudf::io::parquet::FLOAT: + case cudf::io::parquet::detail::FLOAT: return compare(*(reinterpret_cast(v1.data())), *(reinterpret_cast(v2.data()))); - case cudf::io::parquet::DOUBLE: + case cudf::io::parquet::detail::DOUBLE: return compare(*(reinterpret_cast(v1.data())), *(reinterpret_cast(v2.data()))); - case cudf::io::parquet::BYTE_ARRAY: { + case cudf::io::parquet::detail::BYTE_ARRAY: { int32_t v1sz = v1.size(); int32_t v2sz = v2.size(); int32_t ret = memcmp(v1.data(), v2.data(), std::min(v1sz, v2sz)); @@ -4142,7 +4147,7 @@ TEST_P(ParquetV2Test, LargeColumnIndex) cudf::io::write_parquet(out_opts); auto const source = cudf::io::datasource::create(filepath); - cudf::io::parquet::FileMetaData fmd; + cudf::io::parquet::detail::FileMetaData fmd; read_footer(source, &fmd); @@ -4164,10 +4169,10 @@ TEST_P(ParquetV2Test, LargeColumnIndex) TEST_P(ParquetV2Test, CheckColumnOffsetIndex) { - constexpr auto num_rows = 100000; - auto const is_v2 = GetParam(); - auto const expected_hdr_type = - is_v2 ? cudf::io::parquet::PageType::DATA_PAGE_V2 : cudf::io::parquet::PageType::DATA_PAGE; + constexpr auto num_rows = 100000; + auto const is_v2 = GetParam(); + auto const expected_hdr_type = is_v2 ? cudf::io::parquet::detail::PageType::DATA_PAGE_V2 + : cudf::io::parquet::detail::PageType::DATA_PAGE; // fixed length strings auto str1_elements = cudf::detail::make_counting_transform_iterator(0, [](auto i) { @@ -4210,7 +4215,7 @@ TEST_P(ParquetV2Test, CheckColumnOffsetIndex) cudf::io::write_parquet(out_opts); auto const source = cudf::io::datasource::create(filepath); - cudf::io::parquet::FileMetaData fmd; + cudf::io::parquet::detail::FileMetaData fmd; read_footer(source, &fmd); @@ -4255,10 +4260,10 @@ TEST_P(ParquetV2Test, CheckColumnOffsetIndex) TEST_P(ParquetV2Test, CheckColumnOffsetIndexNulls) { - constexpr auto num_rows = 100000; - auto const is_v2 = GetParam(); - auto const expected_hdr_type = - is_v2 ? cudf::io::parquet::PageType::DATA_PAGE_V2 : cudf::io::parquet::PageType::DATA_PAGE; + constexpr auto num_rows = 100000; + auto const is_v2 = GetParam(); + auto const expected_hdr_type = is_v2 ? cudf::io::parquet::detail::PageType::DATA_PAGE_V2 + : cudf::io::parquet::detail::PageType::DATA_PAGE; // fixed length strings auto str1_elements = cudf::detail::make_counting_transform_iterator(0, [](auto i) { @@ -4311,7 +4316,7 @@ TEST_P(ParquetV2Test, CheckColumnOffsetIndexNulls) cudf::io::write_parquet(out_opts); auto const source = cudf::io::datasource::create(filepath); - cudf::io::parquet::FileMetaData fmd; + cudf::io::parquet::detail::FileMetaData fmd; read_footer(source, &fmd); @@ -4362,10 +4367,10 @@ TEST_P(ParquetV2Test, CheckColumnOffsetIndexNulls) TEST_P(ParquetV2Test, CheckColumnOffsetIndexNullColumn) { - constexpr auto num_rows = 100000; - auto const is_v2 = GetParam(); - auto const expected_hdr_type = - is_v2 ? cudf::io::parquet::PageType::DATA_PAGE_V2 : cudf::io::parquet::PageType::DATA_PAGE; + constexpr auto num_rows = 100000; + auto const is_v2 = GetParam(); + auto const expected_hdr_type = is_v2 ? cudf::io::parquet::detail::PageType::DATA_PAGE_V2 + : cudf::io::parquet::detail::PageType::DATA_PAGE; // fixed length strings auto str1_elements = cudf::detail::make_counting_transform_iterator(0, [](auto i) { @@ -4403,7 +4408,7 @@ TEST_P(ParquetV2Test, CheckColumnOffsetIndexNullColumn) cudf::io::write_parquet(out_opts); auto const source = cudf::io::datasource::create(filepath); - cudf::io::parquet::FileMetaData fmd; + cudf::io::parquet::detail::FileMetaData fmd; read_footer(source, &fmd); @@ -4458,9 +4463,9 @@ TEST_P(ParquetV2Test, CheckColumnOffsetIndexNullColumn) TEST_P(ParquetV2Test, CheckColumnOffsetIndexStruct) { - auto const is_v2 = GetParam(); - auto const expected_hdr_type = - is_v2 ? cudf::io::parquet::PageType::DATA_PAGE_V2 : cudf::io::parquet::PageType::DATA_PAGE; + auto const is_v2 = GetParam(); + auto const expected_hdr_type = is_v2 ? cudf::io::parquet::detail::PageType::DATA_PAGE_V2 + : cudf::io::parquet::detail::PageType::DATA_PAGE; auto c0 = testdata::ascending(); @@ -4495,7 +4500,7 @@ TEST_P(ParquetV2Test, CheckColumnOffsetIndexStruct) cudf::io::write_parquet(out_opts); auto const source = cudf::io::datasource::create(filepath); - cudf::io::parquet::FileMetaData fmd; + cudf::io::parquet::detail::FileMetaData fmd; read_footer(source, &fmd); @@ -4542,9 +4547,9 @@ TEST_P(ParquetV2Test, CheckColumnOffsetIndexStruct) TEST_P(ParquetV2Test, CheckColumnOffsetIndexStructNulls) { - auto const is_v2 = GetParam(); - auto const expected_hdr_type = - is_v2 ? cudf::io::parquet::PageType::DATA_PAGE_V2 : cudf::io::parquet::PageType::DATA_PAGE; + auto const is_v2 = GetParam(); + auto const expected_hdr_type = is_v2 ? cudf::io::parquet::detail::PageType::DATA_PAGE_V2 + : cudf::io::parquet::detail::PageType::DATA_PAGE; auto validity2 = cudf::detail::make_counting_transform_iterator(0, [](cudf::size_type i) { return i % 2; }); @@ -4586,7 +4591,7 @@ TEST_P(ParquetV2Test, CheckColumnOffsetIndexStructNulls) cudf::io::write_parquet(out_opts); auto const source = cudf::io::datasource::create(filepath); - cudf::io::parquet::FileMetaData fmd; + cudf::io::parquet::detail::FileMetaData fmd; read_footer(source, &fmd); @@ -4616,9 +4621,9 @@ TEST_P(ParquetV2Test, CheckColumnOffsetIndexStructNulls) TEST_P(ParquetV2Test, CheckColumnIndexListWithNulls) { - auto const is_v2 = GetParam(); - auto const expected_hdr_type = - is_v2 ? cudf::io::parquet::PageType::DATA_PAGE_V2 : cudf::io::parquet::PageType::DATA_PAGE; + auto const is_v2 = GetParam(); + auto const expected_hdr_type = is_v2 ? cudf::io::parquet::detail::PageType::DATA_PAGE_V2 + : cudf::io::parquet::detail::PageType::DATA_PAGE; using cudf::test::iterators::null_at; using cudf::test::iterators::nulls_at; @@ -4711,7 +4716,7 @@ TEST_P(ParquetV2Test, CheckColumnIndexListWithNulls) cudf::io::write_parquet(out_opts); auto const source = cudf::io::datasource::create(filepath); - cudf::io::parquet::FileMetaData fmd; + cudf::io::parquet::detail::FileMetaData fmd; read_footer(source, &fmd); @@ -4812,7 +4817,7 @@ TEST_F(ParquetWriterTest, CheckColumnIndexTruncation) cudf::io::write_parquet(out_opts); auto const source = cudf::io::datasource::create(filepath); - cudf::io::parquet::FileMetaData fmd; + cudf::io::parquet::detail::FileMetaData fmd; read_footer(source, &fmd); @@ -4870,7 +4875,7 @@ TEST_F(ParquetWriterTest, BinaryColumnIndexTruncation) cudf::io::write_parquet(out_opts); auto const source = cudf::io::datasource::create(filepath); - cudf::io::parquet::FileMetaData fmd; + cudf::io::parquet::detail::FileMetaData fmd; read_footer(source, &fmd); @@ -5030,10 +5035,10 @@ TEST_F(ParquetReaderTest, NestedByteArray) cudf::io::write_parquet(out_opts); auto source = cudf::io::datasource::create(filepath); - cudf::io::parquet::FileMetaData fmd; + cudf::io::parquet::detail::FileMetaData fmd; read_footer(source, &fmd); - EXPECT_EQ(fmd.schema[5].type, cudf::io::parquet::Type::BYTE_ARRAY); + EXPECT_EQ(fmd.schema[5].type, cudf::io::parquet::detail::Type::BYTE_ARRAY); std::vector md{ {}, @@ -5081,12 +5086,12 @@ TEST_F(ParquetWriterTest, ByteArrayStats) auto result = cudf::io::read_parquet(in_opts); auto source = cudf::io::datasource::create(filepath); - cudf::io::parquet::FileMetaData fmd; + cudf::io::parquet::detail::FileMetaData fmd; read_footer(source, &fmd); - EXPECT_EQ(fmd.schema[1].type, cudf::io::parquet::Type::BYTE_ARRAY); - EXPECT_EQ(fmd.schema[2].type, cudf::io::parquet::Type::BYTE_ARRAY); + EXPECT_EQ(fmd.schema[1].type, cudf::io::parquet::detail::Type::BYTE_ARRAY); + EXPECT_EQ(fmd.schema[2].type, cudf::io::parquet::detail::Type::BYTE_ARRAY); auto const stats0 = get_statistics(fmd.row_groups[0].columns[0]); auto const stats1 = get_statistics(fmd.row_groups[0].columns[1]); @@ -5137,9 +5142,9 @@ TEST_F(ParquetReaderTest, StructByteArray) TEST_F(ParquetReaderTest, NestingOptimizationTest) { - // test nesting levels > cudf::io::parquet::gpu::max_cacheable_nesting_decode_info deep. + // test nesting levels > cudf::io::parquet::detail::max_cacheable_nesting_decode_info deep. constexpr cudf::size_type num_nesting_levels = 16; - static_assert(num_nesting_levels > cudf::io::parquet::gpu::max_cacheable_nesting_decode_info); + static_assert(num_nesting_levels > cudf::io::parquet::detail::max_cacheable_nesting_decode_info); constexpr cudf::size_type rows_per_level = 2; constexpr cudf::size_type num_values = (1 << num_nesting_levels) * rows_per_level; @@ -5206,13 +5211,13 @@ TEST_F(ParquetWriterTest, SingleValueDictionaryTest) // make sure dictionary was used auto const source = cudf::io::datasource::create(filepath); - cudf::io::parquet::FileMetaData fmd; + cudf::io::parquet::detail::FileMetaData fmd; read_footer(source, &fmd); auto used_dict = [&fmd]() { for (auto enc : fmd.row_groups[0].columns[0].meta_data.encodings) { - if (enc == cudf::io::parquet::Encoding::PLAIN_DICTIONARY or - enc == cudf::io::parquet::Encoding::RLE_DICTIONARY) { + if (enc == cudf::io::parquet::detail::Encoding::PLAIN_DICTIONARY or + enc == cudf::io::parquet::detail::Encoding::RLE_DICTIONARY) { return true; } } @@ -5252,13 +5257,13 @@ TEST_F(ParquetWriterTest, DictionaryNeverTest) // make sure dictionary was not used auto const source = cudf::io::datasource::create(filepath); - cudf::io::parquet::FileMetaData fmd; + cudf::io::parquet::detail::FileMetaData fmd; read_footer(source, &fmd); auto used_dict = [&fmd]() { for (auto enc : fmd.row_groups[0].columns[0].meta_data.encodings) { - if (enc == cudf::io::parquet::Encoding::PLAIN_DICTIONARY or - enc == cudf::io::parquet::Encoding::RLE_DICTIONARY) { + if (enc == cudf::io::parquet::detail::Encoding::PLAIN_DICTIONARY or + enc == cudf::io::parquet::detail::Encoding::RLE_DICTIONARY) { return true; } } @@ -5303,13 +5308,13 @@ TEST_F(ParquetWriterTest, DictionaryAdaptiveTest) // make sure dictionary was used as expected. col0 should use one, // col1 should not. auto const source = cudf::io::datasource::create(filepath); - cudf::io::parquet::FileMetaData fmd; + cudf::io::parquet::detail::FileMetaData fmd; read_footer(source, &fmd); auto used_dict = [&fmd](int col) { for (auto enc : fmd.row_groups[0].columns[col].meta_data.encodings) { - if (enc == cudf::io::parquet::Encoding::PLAIN_DICTIONARY or - enc == cudf::io::parquet::Encoding::RLE_DICTIONARY) { + if (enc == cudf::io::parquet::detail::Encoding::PLAIN_DICTIONARY or + enc == cudf::io::parquet::detail::Encoding::RLE_DICTIONARY) { return true; } } @@ -5354,13 +5359,13 @@ TEST_F(ParquetWriterTest, DictionaryAlwaysTest) // make sure dictionary was used for both columns auto const source = cudf::io::datasource::create(filepath); - cudf::io::parquet::FileMetaData fmd; + cudf::io::parquet::detail::FileMetaData fmd; read_footer(source, &fmd); auto used_dict = [&fmd](int col) { for (auto enc : fmd.row_groups[0].columns[col].meta_data.encodings) { - if (enc == cudf::io::parquet::Encoding::PLAIN_DICTIONARY or - enc == cudf::io::parquet::Encoding::RLE_DICTIONARY) { + if (enc == cudf::io::parquet::detail::Encoding::PLAIN_DICTIONARY or + enc == cudf::io::parquet::detail::Encoding::RLE_DICTIONARY) { return true; } } @@ -5438,13 +5443,13 @@ TEST_P(ParquetSizedTest, DictionaryTest) // make sure dictionary was used auto const source = cudf::io::datasource::create(filepath); - cudf::io::parquet::FileMetaData fmd; + cudf::io::parquet::detail::FileMetaData fmd; read_footer(source, &fmd); auto used_dict = [&fmd]() { for (auto enc : fmd.row_groups[0].columns[0].meta_data.encodings) { - if (enc == cudf::io::parquet::Encoding::PLAIN_DICTIONARY or - enc == cudf::io::parquet::Encoding::RLE_DICTIONARY) { + if (enc == cudf::io::parquet::detail::Encoding::PLAIN_DICTIONARY or + enc == cudf::io::parquet::detail::Encoding::RLE_DICTIONARY) { return true; } } @@ -6664,7 +6669,7 @@ TEST_F(ParquetWriterTest, PreserveNullability) TEST_P(ParquetV2Test, CheckEncodings) { - using cudf::io::parquet::Encoding; + using cudf::io::parquet::detail::Encoding; constexpr auto num_rows = 100'000; auto const is_v2 = GetParam(); @@ -6697,7 +6702,7 @@ TEST_P(ParquetV2Test, CheckEncodings) }; auto const source = cudf::io::datasource::create(filepath); - cudf::io::parquet::FileMetaData fmd; + cudf::io::parquet::detail::FileMetaData fmd; read_footer(source, &fmd); auto const& chunk0_enc = fmd.row_groups[0].columns[0].meta_data.encodings; From e345620ddaf5d8ac87e2428a84508ecfec2ba4f8 Mon Sep 17 00:00:00 2001 From: Suraj Aralihalli Date: Mon, 9 Oct 2023 10:13:25 -0700 Subject: [PATCH 135/150] Add stream parameter to List Manipulation and Operations APIs (#14248) I have organized the public List APIs into **three** distinct categories based on their functionality, simplifying the PRs for easier and shorter reviews. This particular PR introduces the `stream` parameter only to the `List Manipulation and Operations APIs`, which fall under `Section 1`. See next comment for other sections. 1. List Manipulation and Operations (`combine.hpp`, `contains.hpp`, `count_elements.hpp`) ``` concatenate_rows concatenate_list_elements contains_nulls contains - search_keys contains - search_key index_of - search_keys index_of - search_key count_elements ``` This PR addresses issues in the following files: 1. **column_wrapper.hpp**: - Corrects the improper passing of the stream value in the `make_lists_column` function. - Enables the missing cast to `lists_column_view`. - Substitutes `copy_bitmask` with `cudf::detail::copy_bitmask` to include the stream parameter. 2. **concatenate.cu:** - Substitutes `create_null_mask` with `cudf::detail::create_null_mask` to include the stream parameter. Authors: - Suraj Aralihalli (https://github.com/SurajAralihalli) - Vyas Ramasubramani (https://github.com/vyasr) - Yunsong Wang (https://github.com/PointKernel) Approvers: - Yunsong Wang (https://github.com/PointKernel) - Mark Harris (https://github.com/harrism) URL: https://github.com/rapidsai/cudf/pull/14248 --- cpp/include/cudf/lists/combine.hpp | 4 + cpp/include/cudf/lists/contains.hpp | 14 ++- cpp/include/cudf/lists/count_elements.hpp | 2 + cpp/include/cudf_test/column_wrapper.hpp | 45 +++++++--- .../combine/concatenate_list_elements.cu | 3 +- cpp/src/lists/combine/concatenate_rows.cu | 3 +- cpp/src/lists/contains.cu | 37 ++++---- cpp/src/lists/copying/concatenate.cu | 5 +- cpp/src/lists/count_elements.cu | 3 +- cpp/tests/CMakeLists.txt | 1 + cpp/tests/streams/lists_test.cpp | 87 +++++++++++++++++++ 11 files changed, 169 insertions(+), 35 deletions(-) create mode 100644 cpp/tests/streams/lists_test.cpp diff --git a/cpp/include/cudf/lists/combine.hpp b/cpp/include/cudf/lists/combine.hpp index 0bc76828fc3..0d9c1c157eb 100644 --- a/cpp/include/cudf/lists/combine.hpp +++ b/cpp/include/cudf/lists/combine.hpp @@ -57,6 +57,7 @@ enum class concatenate_null_policy { IGNORE, NULLIFY_OUTPUT_ROW }; * @param input Table of lists to be concatenated. * @param null_policy The parameter to specify whether a null list element will be ignored from * concatenation, or any concatenation involving a null element will result in a null list. + * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource used to allocate the returned column's device memory. * @return A new column in which each row is a list resulted from concatenating all list elements in * the corresponding row of the input table. @@ -64,6 +65,7 @@ enum class concatenate_null_policy { IGNORE, NULLIFY_OUTPUT_ROW }; std::unique_ptr concatenate_rows( table_view const& input, concatenate_null_policy null_policy = concatenate_null_policy::IGNORE, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -86,6 +88,7 @@ std::unique_ptr concatenate_rows( * @param input The lists column containing lists of list elements to concatenate. * @param null_policy The parameter to specify whether a null list element will be ignored from * concatenation, or any concatenation involving a null element will result in a null list. + * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource used to allocate the returned column's device memory. * @return A new column in which each row is a list resulted from concatenating all list elements in * the corresponding row of the input lists column. @@ -93,6 +96,7 @@ std::unique_ptr concatenate_rows( std::unique_ptr concatenate_list_elements( column_view const& input, concatenate_null_policy null_policy = concatenate_null_policy::IGNORE, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group diff --git a/cpp/include/cudf/lists/contains.hpp b/cpp/include/cudf/lists/contains.hpp index 21c2ca1d64e..7cf67ec9205 100644 --- a/cpp/include/cudf/lists/contains.hpp +++ b/cpp/include/cudf/lists/contains.hpp @@ -42,12 +42,14 @@ namespace lists { * * @param lists Lists column whose `n` rows are to be searched * @param search_key The scalar key to be looked up in each list row + * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource used to allocate the returned column's device memory * @return BOOL8 column of `n` rows with the result of the lookup */ std::unique_ptr contains( cudf::lists_column_view const& lists, cudf::scalar const& search_key, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -63,13 +65,15 @@ std::unique_ptr contains( * 2. The list row `lists[i]` is null * * @param lists Lists column whose `n` rows are to be searched - * @param search_keys Column of elements to be looked up in each list row + * @param search_keys Column of elements to be looked up in each list row. + * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource used to allocate the returned column's device memory * @return BOOL8 column of `n` rows with the result of the lookup */ std::unique_ptr contains( cudf::lists_column_view const& lists, cudf::column_view const& search_keys, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -84,12 +88,14 @@ std::unique_ptr contains( * A row with an empty list will always return false. * Nulls inside non-null nested elements (such as lists or structs) are not considered. * - * @param lists Lists column whose `n` rows are to be searched + * @param lists Lists column whose `n` rows are to be searched. + * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource used to allocate the returned column's device memory * @return BOOL8 column of `n` rows with the result of the lookup */ std::unique_ptr contains_nulls( cudf::lists_column_view const& lists, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -125,6 +131,7 @@ enum class duplicate_find_option : int32_t { * @param search_key The scalar key to be looked up in each list row * @param find_option Whether to return the position of the first match (`FIND_FIRST`) or * last (`FIND_LAST`) + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return column of `n` rows with the location of the `search_key` */ @@ -132,6 +139,7 @@ std::unique_ptr index_of( cudf::lists_column_view const& lists, cudf::scalar const& search_key, duplicate_find_option find_option = duplicate_find_option::FIND_FIRST, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -160,6 +168,7 @@ std::unique_ptr index_of( * `lists` * @param find_option Whether to return the position of the first match (`FIND_FIRST`) or * last (`FIND_LAST`) + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return column of `n` rows with the location of the `search_key` */ @@ -167,6 +176,7 @@ std::unique_ptr index_of( cudf::lists_column_view const& lists, cudf::column_view const& search_keys, duplicate_find_option find_option = duplicate_find_option::FIND_FIRST, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group diff --git a/cpp/include/cudf/lists/count_elements.hpp b/cpp/include/cudf/lists/count_elements.hpp index 552ba058b93..e4bd0dca9ae 100644 --- a/cpp/include/cudf/lists/count_elements.hpp +++ b/cpp/include/cudf/lists/count_elements.hpp @@ -45,11 +45,13 @@ namespace lists { * in the output column. * * @param input Input lists column + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return New column with the number of elements for each row */ std::unique_ptr count_elements( lists_column_view const& input, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of lists_elements group diff --git a/cpp/include/cudf_test/column_wrapper.hpp b/cpp/include/cudf_test/column_wrapper.hpp index c0932b81dc3..e94dfea9dcf 100644 --- a/cpp/include/cudf_test/column_wrapper.hpp +++ b/cpp/include/cudf_test/column_wrapper.hpp @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -1281,6 +1282,11 @@ class dictionary_column_wrapper : public detail::column_wrapper { template class lists_column_wrapper : public detail::column_wrapper { public: + /** + * @brief Cast to lists_column_view + */ + operator lists_column_view() const { return cudf::lists_column_view{wrapped->view()}; } + /** * @brief Construct a lists column containing a single list of fixed-width * type from an initializer list of values. @@ -1542,8 +1548,12 @@ class lists_column_wrapper : public detail::column_wrapper { rmm::device_buffer&& null_mask) { // construct the list column - wrapped = make_lists_column( - num_rows, std::move(offsets), std::move(values), null_count, std::move(null_mask)); + wrapped = make_lists_column(num_rows, + std::move(offsets), + std::move(values), + null_count, + std::move(null_mask), + cudf::test::get_default_stream()); } /** @@ -1618,8 +1628,12 @@ class lists_column_wrapper : public detail::column_wrapper { }(); // construct the list column - wrapped = make_lists_column( - cols.size(), std::move(offsets), std::move(data), null_count, std::move(null_mask)); + wrapped = make_lists_column(cols.size(), + std::move(offsets), + std::move(data), + null_count, + std::move(null_mask), + cudf::test::get_default_stream()); } /** @@ -1647,8 +1661,12 @@ class lists_column_wrapper : public detail::column_wrapper { depth = 0; size_type num_elements = offsets->size() == 0 ? 0 : offsets->size() - 1; - wrapped = - make_lists_column(num_elements, std::move(offsets), std::move(c), 0, rmm::device_buffer{}); + wrapped = make_lists_column(num_elements, + std::move(offsets), + std::move(c), + 0, + rmm::device_buffer{}, + cudf::test::get_default_stream()); } /** @@ -1697,12 +1715,15 @@ class lists_column_wrapper : public detail::column_wrapper { } lists_column_view lcv(col); - return make_lists_column(col.size(), - std::make_unique(lcv.offsets()), - normalize_column(lists_column_view(col).child(), - lists_column_view(expected_hierarchy).child()), - col.null_count(), - copy_bitmask(col)); + return make_lists_column( + col.size(), + std::make_unique(lcv.offsets()), + normalize_column(lists_column_view(col).child(), + lists_column_view(expected_hierarchy).child()), + col.null_count(), + cudf::detail::copy_bitmask( + col, cudf::test::get_default_stream(), rmm::mr::get_current_device_resource()), + cudf::test::get_default_stream()); } std::pair, std::vector>> preprocess_columns( diff --git a/cpp/src/lists/combine/concatenate_list_elements.cu b/cpp/src/lists/combine/concatenate_list_elements.cu index fbe297765f8..99dbd55678b 100644 --- a/cpp/src/lists/combine/concatenate_list_elements.cu +++ b/cpp/src/lists/combine/concatenate_list_elements.cu @@ -271,10 +271,11 @@ std::unique_ptr concatenate_list_elements(column_view const& input, */ std::unique_ptr concatenate_list_elements(column_view const& input, concatenate_null_policy null_policy, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::concatenate_list_elements(input, null_policy, cudf::get_default_stream(), mr); + return detail::concatenate_list_elements(input, null_policy, stream, mr); } } // namespace lists diff --git a/cpp/src/lists/combine/concatenate_rows.cu b/cpp/src/lists/combine/concatenate_rows.cu index 658538b0195..49be7b5ff17 100644 --- a/cpp/src/lists/combine/concatenate_rows.cu +++ b/cpp/src/lists/combine/concatenate_rows.cu @@ -305,10 +305,11 @@ std::unique_ptr concatenate_rows(table_view const& input, */ std::unique_ptr concatenate_rows(table_view const& input, concatenate_null_policy null_policy, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::concatenate_rows(input, null_policy, cudf::get_default_stream(), mr); + return detail::concatenate_rows(input, null_policy, stream, mr); } } // namespace lists diff --git a/cpp/src/lists/contains.cu b/cpp/src/lists/contains.cu index df1d043bdb6..4733a5d63a8 100644 --- a/cpp/src/lists/contains.cu +++ b/cpp/src/lists/contains.cu @@ -287,7 +287,7 @@ std::unique_ptr index_of(lists_column_view const& lists, } auto search_key_col = cudf::make_column_from_scalar(search_key, lists.size(), stream, mr); - return index_of(lists, search_key_col->view(), find_option, stream, mr); + return detail::index_of(lists, search_key_col->view(), find_option, stream, mr); } std::unique_ptr index_of(lists_column_view const& lists, @@ -306,11 +306,11 @@ std::unique_ptr contains(lists_column_view const& lists, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { - auto key_indices = index_of(lists, - search_key, - duplicate_find_option::FIND_FIRST, - stream, - rmm::mr::get_current_device_resource()); + auto key_indices = detail::index_of(lists, + search_key, + duplicate_find_option::FIND_FIRST, + stream, + rmm::mr::get_current_device_resource()); return to_contains(std::move(key_indices), stream, mr); } @@ -322,11 +322,11 @@ std::unique_ptr contains(lists_column_view const& lists, CUDF_EXPECTS(search_keys.size() == lists.size(), "Number of search keys must match list column size."); - auto key_indices = index_of(lists, - search_keys, - duplicate_find_option::FIND_FIRST, - stream, - rmm::mr::get_current_device_resource()); + auto key_indices = detail::index_of(lists, + search_keys, + duplicate_find_option::FIND_FIRST, + stream, + rmm::mr::get_current_device_resource()); return to_contains(std::move(key_indices), stream, mr); } @@ -364,43 +364,48 @@ std::unique_ptr contains_nulls(lists_column_view const& lists, std::unique_ptr contains(lists_column_view const& lists, cudf::scalar const& search_key, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::contains(lists, search_key, cudf::get_default_stream(), mr); + return detail::contains(lists, search_key, stream, mr); } std::unique_ptr contains(lists_column_view const& lists, column_view const& search_keys, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::contains(lists, search_keys, cudf::get_default_stream(), mr); + return detail::contains(lists, search_keys, stream, mr); } std::unique_ptr contains_nulls(lists_column_view const& lists, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::contains_nulls(lists, cudf::get_default_stream(), mr); + return detail::contains_nulls(lists, stream, mr); } std::unique_ptr index_of(lists_column_view const& lists, cudf::scalar const& search_key, duplicate_find_option find_option, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::index_of(lists, search_key, find_option, cudf::get_default_stream(), mr); + return detail::index_of(lists, search_key, find_option, stream, mr); } std::unique_ptr index_of(lists_column_view const& lists, column_view const& search_keys, duplicate_find_option find_option, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::index_of(lists, search_keys, find_option, cudf::get_default_stream(), mr); + return detail::index_of(lists, search_keys, find_option, stream, mr); } } // namespace cudf::lists diff --git a/cpp/src/lists/copying/concatenate.cu b/cpp/src/lists/copying/concatenate.cu index ddd0dfbe084..5407b88236f 100644 --- a/cpp/src/lists/copying/concatenate.cu +++ b/cpp/src/lists/copying/concatenate.cu @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -123,8 +124,8 @@ std::unique_ptr concatenate(host_span columns, // if any of the input columns have nulls, construct the output mask bool const has_nulls = std::any_of(columns.begin(), columns.end(), [](auto const& col) { return col.has_nulls(); }); - rmm::device_buffer null_mask = create_null_mask( - total_list_count, has_nulls ? mask_state::UNINITIALIZED : mask_state::UNALLOCATED); + rmm::device_buffer null_mask = cudf::detail::create_null_mask( + total_list_count, has_nulls ? mask_state::UNINITIALIZED : mask_state::UNALLOCATED, stream, mr); auto null_mask_data = static_cast(null_mask.data()); auto const null_count = has_nulls ? cudf::detail::concatenate_masks(columns, null_mask_data, stream) : size_type{0}; diff --git a/cpp/src/lists/count_elements.cu b/cpp/src/lists/count_elements.cu index 40a14d805e1..2fd0851067a 100644 --- a/cpp/src/lists/count_elements.cu +++ b/cpp/src/lists/count_elements.cu @@ -73,10 +73,11 @@ std::unique_ptr count_elements(lists_column_view const& input, // external APIS std::unique_ptr count_elements(lists_column_view const& input, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::count_elements(input, cudf::get_default_stream(), mr); + return detail::count_elements(input, stream, mr); } } // namespace lists diff --git a/cpp/tests/CMakeLists.txt b/cpp/tests/CMakeLists.txt index ac13c121530..ffaba7d6fa7 100644 --- a/cpp/tests/CMakeLists.txt +++ b/cpp/tests/CMakeLists.txt @@ -638,6 +638,7 @@ ConfigureTest( ) ConfigureTest(STREAM_SORTING_TEST streams/sorting_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_TEXT_TEST streams/text/ngrams_test.cpp STREAM_MODE testing) +ConfigureTest(STREAM_LISTS_TEST streams/lists_test.cpp STREAM_MODE testing) # ################################################################################################## # Install tests #################################################################################### diff --git a/cpp/tests/streams/lists_test.cpp b/cpp/tests/streams/lists_test.cpp new file mode 100644 index 00000000000..e292b551d83 --- /dev/null +++ b/cpp/tests/streams/lists_test.cpp @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include +#include +#include + +class ListTest : public cudf::test::BaseFixture {}; + +TEST_F(ListTest, ConcatenateRows) +{ + cudf::test::lists_column_wrapper list_col_1{{0, 1}, {2, 3}, {4, 5}}; + cudf::test::lists_column_wrapper list_col_2{{0, 1}, {2, 3}, {4, 5}}; + cudf::table_view lists_table({list_col_1, list_col_2}); + cudf::lists::concatenate_rows( + lists_table, cudf::lists::concatenate_null_policy::IGNORE, cudf::test::get_default_stream()); +} + +TEST_F(ListTest, ConcatenateListElements) +{ + cudf::test::lists_column_wrapper ll_column{{{0, 1}, {2, 3}}, {{4, 5}, {6, 7}}}; + cudf::lists::concatenate_list_elements( + ll_column, cudf::lists::concatenate_null_policy::IGNORE, cudf::test::get_default_stream()); +} + +TEST_F(ListTest, ContainsNulls) +{ + cudf::test::lists_column_wrapper list_col{{0, 1}, {2, 3}, {4, 5}}; + cudf::lists::contains_nulls(list_col, cudf::test::get_default_stream()); +} + +TEST_F(ListTest, ContainsSearchKey) +{ + cudf::test::lists_column_wrapper list_col{{0, 1}, {2, 3}, {4, 5}}; + cudf::numeric_scalar search_key(2, true, cudf::test::get_default_stream()); + cudf::lists::contains(list_col, search_key, cudf::test::get_default_stream()); +} + +TEST_F(ListTest, ContainsSearchKeys) +{ + cudf::test::lists_column_wrapper list_col{{0, 1}, {2, 3}, {4, 5}}; + cudf::test::fixed_width_column_wrapper search_keys({1, 2, 3}); + cudf::lists::contains(list_col, search_keys, cudf::test::get_default_stream()); +} + +TEST_F(ListTest, IndexOfSearchKey) +{ + cudf::test::lists_column_wrapper list_col{{0, 1}, {2, 3}, {4, 5}}; + cudf::numeric_scalar search_key(2, true, cudf::test::get_default_stream()); + cudf::lists::index_of(list_col, + search_key, + cudf::lists::duplicate_find_option::FIND_FIRST, + cudf::test::get_default_stream()); +} + +TEST_F(ListTest, IndexOfSearchKeys) +{ + cudf::test::lists_column_wrapper list_col{{0, 1}, {2, 3}, {4, 5}}; + cudf::test::fixed_width_column_wrapper search_keys({1, 2, 3}); + cudf::lists::index_of(list_col, + search_keys, + cudf::lists::duplicate_find_option::FIND_FIRST, + cudf::test::get_default_stream()); +} + +TEST_F(ListTest, CountElements) +{ + cudf::test::lists_column_wrapper list_col{{0, 1}, {2, 3, 7}, {4, 5}}; + cudf::lists::count_elements(list_col, cudf::test::get_default_stream()); +} From b4fd77b30311f3b1de39cac22423f2c3a32ec72d Mon Sep 17 00:00:00 2001 From: nvdbaranec <56695930+nvdbaranec@users.noreply.github.com> Date: Tue, 10 Oct 2023 12:20:42 -0500 Subject: [PATCH 136/150] Centralize chunked reading code in the parquet reader to reader_impl_chunking.cu (#14262) As a precursor to further chunked reader work, this PR centralizes chunk-related code (mostly from the `reader::impl` class) into `reader_impl_chunking.cu` and `reader_impl_chunking.hpp`. Also cleans up some variable naming and locations in `reader::impl` and the `file_intermediate_data` and `pass_intermediate_data classes`. Authors: - https://github.com/nvdbaranec Approvers: - Vukasin Milovanovic (https://github.com/vuule) - Robert Maynard (https://github.com/robertmaynard) - Nghia Truong (https://github.com/ttnghia) URL: https://github.com/rapidsai/cudf/pull/14262 --- cpp/CMakeLists.txt | 1 + cpp/src/io/parquet/parquet_gpu.hpp | 73 --- cpp/src/io/parquet/reader_impl.cpp | 12 +- cpp/src/io/parquet/reader_impl.hpp | 49 +- cpp/src/io/parquet/reader_impl_chunking.cu | 598 +++++++++++++++++++ cpp/src/io/parquet/reader_impl_chunking.hpp | 87 +++ cpp/src/io/parquet/reader_impl_helpers.hpp | 17 + cpp/src/io/parquet/reader_impl_preprocess.cu | 558 +---------------- cpp/src/io/utilities/column_buffer.cpp | 10 +- 9 files changed, 751 insertions(+), 654 deletions(-) create mode 100644 cpp/src/io/parquet/reader_impl_chunking.cu create mode 100644 cpp/src/io/parquet/reader_impl_chunking.hpp diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index 000f80065ab..f8b9762f1d4 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -401,6 +401,7 @@ add_library( src/io/parquet/predicate_pushdown.cpp src/io/parquet/reader.cpp src/io/parquet/reader_impl.cpp + src/io/parquet/reader_impl_chunking.cu src/io/parquet/reader_impl_helpers.cpp src/io/parquet/reader_impl_preprocess.cu src/io/parquet/writer_impl.cu diff --git a/cpp/src/io/parquet/parquet_gpu.hpp b/cpp/src/io/parquet/parquet_gpu.hpp index 767668cc65e..6a93fec0c46 100644 --- a/cpp/src/io/parquet/parquet_gpu.hpp +++ b/cpp/src/io/parquet/parquet_gpu.hpp @@ -318,79 +318,6 @@ struct ColumnChunkDesc { int32_t src_col_schema{}; // my schema index in the file }; -/** - * @brief The row_group_info class - */ -struct row_group_info { - size_type index; // row group index within a file. aggregate_reader_metadata::get_row_group() is - // called with index and source_index - size_t start_row; - size_type source_index; // file index. - - row_group_info() = default; - - row_group_info(size_type index, size_t start_row, size_type source_index) - : index{index}, start_row{start_row}, source_index{source_index} - { - } -}; - -/** - * @brief Struct to store file-level data that remains constant for - * all passes/chunks for the file. - */ -struct file_intermediate_data { - // all row groups to read - std::vector row_groups{}; - - // all chunks from the selected row groups. We may end up reading these chunks progressively - // instead of all at once - std::vector chunks{}; - - // skip_rows/num_rows values for the entire file. these need to be adjusted per-pass because we - // may not be visiting every row group that contains these bounds - size_t global_skip_rows; - size_t global_num_rows; -}; - -/** - * @brief Structs to identify the reading row range for each chunk of rows in chunked reading. - */ -struct chunk_read_info { - size_t skip_rows; - size_t num_rows; -}; - -/** - * @brief Struct to store pass-level data that remains constant for a single pass. - */ -struct pass_intermediate_data { - std::vector> raw_page_data; - rmm::device_buffer decomp_page_data; - - // rowgroup, chunk and page information for the current pass. - std::vector row_groups{}; - cudf::detail::hostdevice_vector chunks{}; - cudf::detail::hostdevice_vector pages_info{}; - cudf::detail::hostdevice_vector page_nesting_info{}; - cudf::detail::hostdevice_vector page_nesting_decode_info{}; - - rmm::device_uvector page_keys{0, rmm::cuda_stream_default}; - rmm::device_uvector page_index{0, rmm::cuda_stream_default}; - rmm::device_uvector str_dict_index{0, rmm::cuda_stream_default}; - - std::vector output_chunk_read_info; - std::size_t current_output_chunk{0}; - - rmm::device_buffer level_decode_data{}; - int level_type_size{0}; - - // skip_rows and num_rows values for this particular pass. these may be adjusted values from the - // global values stored in file_intermediate_data. - size_t skip_rows; - size_t num_rows; -}; - /** * @brief Struct describing an encoder column */ diff --git a/cpp/src/io/parquet/reader_impl.cpp b/cpp/src/io/parquet/reader_impl.cpp index 26ec83d5946..db81222157a 100644 --- a/cpp/src/io/parquet/reader_impl.cpp +++ b/cpp/src/io/parquet/reader_impl.cpp @@ -349,14 +349,14 @@ void reader::impl::prepare_data(int64_t skip_rows, not _input_columns.empty()) { // fills in chunk information without physically loading or decompressing // the associated data - load_global_chunk_info(); + create_global_chunk_info(); // compute schedule of input reads. Each rowgroup contains 1 chunk per column. For now // we will read an entire row group at a time. However, it is possible to do // sub-rowgroup reads if we made some estimates on individual chunk sizes (tricky) and // changed the high level structure such that we weren't always reading an entire table's // worth of columns at once. - compute_input_pass_row_group_info(); + compute_input_passes(); } _file_preprocessed = true; @@ -364,7 +364,7 @@ void reader::impl::prepare_data(int64_t skip_rows, // if we have to start a new pass, do that now if (!_pass_preprocessed) { - auto const num_passes = _input_pass_row_group_offsets.size() - 1; + auto const num_passes = _file_itm_data.input_pass_row_group_offsets.size() - 1; // always create the pass struct, even if we end up with no passes. // this will also cause the previous pass information to be deleted @@ -373,7 +373,7 @@ void reader::impl::prepare_data(int64_t skip_rows, if (_file_itm_data.global_num_rows > 0 && not _file_itm_data.row_groups.empty() && not _input_columns.empty() && _current_input_pass < num_passes) { // setup the pass_intermediate_info for this pass. - setup_pass(); + setup_next_pass(); load_and_decompress_data(); preprocess_pages(uses_custom_row_bounds, _output_chunk_read_limit); @@ -541,8 +541,8 @@ bool reader::impl::has_next() {} /*row_group_indices, empty means read all row groups*/, std::nullopt /*filter*/); - auto const num_input_passes = - _input_pass_row_group_offsets.size() == 0 ? 0 : _input_pass_row_group_offsets.size() - 1; + size_t const num_input_passes = std::max( + int64_t{0}, static_cast(_file_itm_data.input_pass_row_group_offsets.size()) - 1); return (_pass_itm_data->current_output_chunk < _pass_itm_data->output_chunk_read_info.size()) || (_current_input_pass < num_input_passes); } diff --git a/cpp/src/io/parquet/reader_impl.hpp b/cpp/src/io/parquet/reader_impl.hpp index 6003b931b04..cea4ba35606 100644 --- a/cpp/src/io/parquet/reader_impl.hpp +++ b/cpp/src/io/parquet/reader_impl.hpp @@ -22,6 +22,7 @@ #pragma once #include "parquet_gpu.hpp" +#include "reader_impl_chunking.hpp" #include "reader_impl_helpers.hpp" #include @@ -136,10 +137,6 @@ class reader::impl { host_span const> row_group_indices, std::optional> filter); - void load_global_chunk_info(); - void compute_input_pass_row_group_info(); - void setup_pass(); - /** * @brief Create chunk information and start file reads * @@ -250,6 +247,31 @@ class reader::impl { */ void decode_page_data(size_t skip_rows, size_t num_rows); + /** + * @brief Creates file-wide parquet chunk information. + * + * Creates information about all chunks in the file, storing it in + * the file-wide _file_itm_data structure. + */ + void create_global_chunk_info(); + + /** + * @brief Computes all of the passes we will perform over the file. + */ + void compute_input_passes(); + + /** + * @brief Close out the existing pass (if any) and prepare for the next pass. + */ + void setup_next_pass(); + + /** + * @brief Given a set of pages that have had their sizes computed by nesting level and + * a limit on total read size, generate a set of {skip_rows, num_rows} pairs representing + * a set of reads that will generate output columns of total size <= `chunk_read_limit` bytes. + */ + void compute_splits_for_pass(); + private: rmm::cuda_stream_view _stream; rmm::mr::device_memory_resource* _mr = nullptr; @@ -278,7 +300,7 @@ class reader::impl { // chunked reading happens in 2 parts: // - // At the top level there is the "pass" in which we try and limit the + // At the top level, the entire file is divided up into "passes" omn which we try and limit the // total amount of temporary memory (compressed data, decompressed data) in use // via _input_pass_read_limit. // @@ -286,19 +308,16 @@ class reader::impl { // byte size is controlled by _output_chunk_read_limit. file_intermediate_data _file_itm_data; + bool _file_preprocessed{false}; + std::unique_ptr _pass_itm_data; + bool _pass_preprocessed{false}; - // an array of offsets into _file_itm_data::global_chunks. Each pair of offsets represents - // the start/end of the chunks to be loaded for a given pass. - std::vector _input_pass_row_group_offsets{}; - std::vector _input_pass_row_count{}; - std::size_t _current_input_pass{0}; - std::size_t _chunk_count{0}; + std::size_t _output_chunk_read_limit{0}; // output chunk size limit in bytes + std::size_t _input_pass_read_limit{0}; // input pass memory usage limit in bytes - std::size_t _output_chunk_read_limit{0}; - std::size_t _input_pass_read_limit{0}; - bool _pass_preprocessed{false}; - bool _file_preprocessed{false}; + std::size_t _current_input_pass{0}; // current input pass index + std::size_t _chunk_count{0}; // how many output chunks we have produced }; } // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/reader_impl_chunking.cu b/cpp/src/io/parquet/reader_impl_chunking.cu new file mode 100644 index 00000000000..ad52a7dfcc1 --- /dev/null +++ b/cpp/src/io/parquet/reader_impl_chunking.cu @@ -0,0 +1,598 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "reader_impl.hpp" +#include "reader_impl_chunking.hpp" + +#include +#include + +#include + +#include + +#include +#include +#include +#include + +namespace cudf::io::parquet::detail { + +namespace { + +struct cumulative_row_info { + size_t row_count; // cumulative row count + size_t size_bytes; // cumulative size in bytes + int key; // schema index +}; + +#if defined(CHUNKING_DEBUG) +void print_cumulative_page_info(cudf::detail::hostdevice_vector& pages, + rmm::device_uvector const& page_index, + rmm::device_uvector const& c_info, + rmm::cuda_stream_view stream) +{ + pages.device_to_host_sync(stream); + + printf("------------\nCumulative sizes by page\n"); + + std::vector schemas(pages.size()); + std::vector h_page_index(pages.size()); + CUDF_CUDA_TRY(cudaMemcpy( + h_page_index.data(), page_index.data(), sizeof(int) * pages.size(), cudaMemcpyDefault)); + std::vector h_cinfo(pages.size()); + CUDF_CUDA_TRY(cudaMemcpy( + h_cinfo.data(), c_info.data(), sizeof(cumulative_row_info) * pages.size(), cudaMemcpyDefault)); + auto schema_iter = cudf::detail::make_counting_transform_iterator( + 0, [&](size_type i) { return pages[h_page_index[i]].src_col_schema; }); + thrust::copy(thrust::seq, schema_iter, schema_iter + pages.size(), schemas.begin()); + auto last = thrust::unique(thrust::seq, schemas.begin(), schemas.end()); + schemas.resize(last - schemas.begin()); + printf("Num schemas: %lu\n", schemas.size()); + + for (size_t idx = 0; idx < schemas.size(); idx++) { + printf("Schema %d\n", schemas[idx]); + for (size_t pidx = 0; pidx < pages.size(); pidx++) { + auto const& page = pages[h_page_index[pidx]]; + if (page.flags & PAGEINFO_FLAGS_DICTIONARY || page.src_col_schema != schemas[idx]) { + continue; + } + printf("\tP: {%lu, %lu}\n", h_cinfo[pidx].row_count, h_cinfo[pidx].size_bytes); + } + } +} + +void print_cumulative_row_info(host_span sizes, + std::string const& label, + std::optional> splits = std::nullopt) +{ + if (splits.has_value()) { + printf("------------\nSplits\n"); + for (size_t idx = 0; idx < splits->size(); idx++) { + printf("{%lu, %lu}\n", splits.value()[idx].skip_rows, splits.value()[idx].num_rows); + } + } + + printf("------------\nCumulative sizes %s\n", label.c_str()); + for (size_t idx = 0; idx < sizes.size(); idx++) { + printf("{%lu, %lu, %d}", sizes[idx].row_count, sizes[idx].size_bytes, sizes[idx].key); + if (splits.has_value()) { + // if we have a split at this row count and this is the last instance of this row count + auto start = thrust::make_transform_iterator( + splits->begin(), [](chunk_read_info const& i) { return i.skip_rows; }); + auto end = start + splits->size(); + auto split = std::find(start, end, sizes[idx].row_count); + auto const split_index = [&]() -> int { + if (split != end && + ((idx == sizes.size() - 1) || (sizes[idx + 1].row_count > sizes[idx].row_count))) { + return static_cast(std::distance(start, split)); + } + return idx == 0 ? 0 : -1; + }(); + if (split_index >= 0) { + printf(" <-- split {%lu, %lu}", + splits.value()[split_index].skip_rows, + splits.value()[split_index].num_rows); + } + } + printf("\n"); + } +} +#endif // CHUNKING_DEBUG + +/** + * @brief Functor which reduces two cumulative_row_info structs of the same key. + */ +struct cumulative_row_sum { + cumulative_row_info operator() + __device__(cumulative_row_info const& a, cumulative_row_info const& b) const + { + return cumulative_row_info{a.row_count + b.row_count, a.size_bytes + b.size_bytes, a.key}; + } +}; + +/** + * @brief Functor which computes the total data size for a given type of cudf column. + * + * In the case of strings, the return size does not include the chars themselves. That + * information is tracked separately (see PageInfo::str_bytes). + */ +struct row_size_functor { + __device__ size_t validity_size(size_t num_rows, bool nullable) + { + return nullable ? (cudf::util::div_rounding_up_safe(num_rows, size_t{32}) * 4) : 0; + } + + template + __device__ size_t operator()(size_t num_rows, bool nullable) + { + auto const element_size = sizeof(device_storage_type_t); + return (element_size * num_rows) + validity_size(num_rows, nullable); + } +}; + +template <> +__device__ size_t row_size_functor::operator()(size_t num_rows, bool nullable) +{ + auto const offset_size = sizeof(size_type); + // NOTE: Adding the + 1 offset here isn't strictly correct. There will only be 1 extra offset + // for the entire column, whereas this is adding an extra offset per page. So we will get a + // small over-estimate of the real size of the order : # of pages * 4 bytes. It seems better + // to overestimate size somewhat than to underestimate it and potentially generate chunks + // that are too large. + return (offset_size * (num_rows + 1)) + validity_size(num_rows, nullable); +} + +template <> +__device__ size_t row_size_functor::operator()(size_t num_rows, bool nullable) +{ + return validity_size(num_rows, nullable); +} + +template <> +__device__ size_t row_size_functor::operator()(size_t num_rows, bool nullable) +{ + // only returns the size of offsets and validity. the size of the actual string chars + // is tracked separately. + auto const offset_size = sizeof(size_type); + // see note about offsets in the list_view template. + return (offset_size * (num_rows + 1)) + validity_size(num_rows, nullable); +} + +/** + * @brief Functor which computes the total output cudf data size for all of + * the data in this page. + * + * Sums across all nesting levels. + */ +struct get_cumulative_row_info { + PageInfo const* const pages; + + __device__ cumulative_row_info operator()(size_type index) + { + auto const& page = pages[index]; + if (page.flags & PAGEINFO_FLAGS_DICTIONARY) { + return cumulative_row_info{0, 0, page.src_col_schema}; + } + + // total nested size, not counting string data + auto iter = + cudf::detail::make_counting_transform_iterator(0, [page, index] __device__(size_type i) { + auto const& pni = page.nesting[i]; + return cudf::type_dispatcher( + data_type{pni.type}, row_size_functor{}, pni.size, pni.nullable); + }); + + size_t const row_count = static_cast(page.nesting[0].size); + return { + row_count, + thrust::reduce(thrust::seq, iter, iter + page.num_output_nesting_levels) + page.str_bytes, + page.src_col_schema}; + } +}; + +/** + * @brief Functor which computes the effective size of all input columns by page. + * + * For a given row, we want to find the cost of all pages for all columns involved + * in loading up to that row. The complication here is that not all pages are the + * same size between columns. Example: + * + * page row counts + * Column A: 0 <----> 100 <----> 200 + * Column B: 0 <---------------> 200 <--------> 400 + | + * if we decide to split at row 100, we don't really know the actual amount of bytes in column B + * at that point. So we have to proceed as if we are taking the bytes from all 200 rows of that + * page. Essentially, a conservative over-estimate of the real size. + */ +struct row_total_size { + cumulative_row_info const* c_info; + size_type const* key_offsets; + size_t num_keys; + + __device__ cumulative_row_info operator()(cumulative_row_info const& i) + { + // sum sizes for each input column at this row + size_t sum = 0; + for (int idx = 0; idx < num_keys; idx++) { + auto const start = key_offsets[idx]; + auto const end = key_offsets[idx + 1]; + auto iter = cudf::detail::make_counting_transform_iterator( + 0, [&] __device__(size_type i) { return c_info[i].row_count; }); + auto const page_index = + thrust::lower_bound(thrust::seq, iter + start, iter + end, i.row_count) - iter; + sum += c_info[page_index].size_bytes; + } + return {i.row_count, sum, i.key}; + } +}; + +/** + * @brief Given a vector of cumulative {row_count, byte_size} pairs and a chunk read + * limit, determine the set of splits. + * + * @param sizes Vector of cumulative {row_count, byte_size} pairs + * @param num_rows Total number of rows to read + * @param chunk_read_limit Limit on total number of bytes to be returned per read, for all columns + */ +std::vector find_splits(std::vector const& sizes, + size_t num_rows, + size_t chunk_read_limit) +{ + // now we have an array of {row_count, real output bytes}. just walk through it and generate + // splits. + // TODO: come up with a clever way to do this entirely in parallel. For now, as long as batch + // sizes are reasonably large, this shouldn't iterate too many times + std::vector splits; + { + size_t cur_pos = 0; + size_t cur_cumulative_size = 0; + size_t cur_row_count = 0; + auto start = thrust::make_transform_iterator(sizes.begin(), [&](cumulative_row_info const& i) { + return i.size_bytes - cur_cumulative_size; + }); + auto end = start + sizes.size(); + while (cur_row_count < num_rows) { + int64_t split_pos = + thrust::lower_bound(thrust::seq, start + cur_pos, end, chunk_read_limit) - start; + + // if we're past the end, or if the returned bucket is > than the chunk_read_limit, move back + // one. + if (static_cast(split_pos) >= sizes.size() || + (sizes[split_pos].size_bytes - cur_cumulative_size > chunk_read_limit)) { + split_pos--; + } + + // best-try. if we can't find something that'll fit, we have to go bigger. we're doing this in + // a loop because all of the cumulative sizes for all the pages are sorted into one big list. + // so if we had two columns, both of which had an entry {1000, 10000}, that entry would be in + // the list twice. so we have to iterate until we skip past all of them. The idea is that we + // either do this, or we have to call unique() on the input first. + while (split_pos < (static_cast(sizes.size()) - 1) && + (split_pos < 0 || sizes[split_pos].row_count == cur_row_count)) { + split_pos++; + } + + auto const start_row = cur_row_count; + cur_row_count = sizes[split_pos].row_count; + splits.push_back(chunk_read_info{start_row, cur_row_count - start_row}); + cur_pos = split_pos; + cur_cumulative_size = sizes[split_pos].size_bytes; + } + } + // print_cumulative_row_info(sizes, "adjusted", splits); + + return splits; +} + +/** + * @brief Converts cuDF units to Parquet units. + * + * @return A tuple of Parquet type width, Parquet clock rate and Parquet decimal type. + */ +[[nodiscard]] std::tuple conversion_info(type_id column_type_id, + type_id timestamp_type_id, + Type physical, + int8_t converted, + int32_t length) +{ + int32_t type_width = (physical == FIXED_LEN_BYTE_ARRAY) ? length : 0; + int32_t clock_rate = 0; + if (column_type_id == type_id::INT8 or column_type_id == type_id::UINT8) { + type_width = 1; // I32 -> I8 + } else if (column_type_id == type_id::INT16 or column_type_id == type_id::UINT16) { + type_width = 2; // I32 -> I16 + } else if (column_type_id == type_id::INT32) { + type_width = 4; // str -> hash32 + } else if (is_chrono(data_type{column_type_id})) { + clock_rate = to_clockrate(timestamp_type_id); + } + + int8_t converted_type = converted; + if (converted_type == DECIMAL && column_type_id != type_id::FLOAT64 && + not cudf::is_fixed_point(data_type{column_type_id})) { + converted_type = UNKNOWN; // Not converting to float64 or decimal + } + return std::make_tuple(type_width, clock_rate, converted_type); +} + +/** + * @brief Return the required number of bits to store a value. + */ +template +[[nodiscard]] T required_bits(uint32_t max_level) +{ + return static_cast(CompactProtocolReader::NumRequiredBits(max_level)); +} + +struct row_count_compare { + __device__ bool operator()(cumulative_row_info const& a, cumulative_row_info const& b) + { + return a.row_count < b.row_count; + } +}; + +} // anonymous namespace + +void reader::impl::create_global_chunk_info() +{ + auto const num_rows = _file_itm_data.global_num_rows; + auto const& row_groups_info = _file_itm_data.row_groups; + auto& chunks = _file_itm_data.chunks; + + // Descriptors for all the chunks that make up the selected columns + auto const num_input_columns = _input_columns.size(); + auto const num_chunks = row_groups_info.size() * num_input_columns; + + // Initialize column chunk information + auto remaining_rows = num_rows; + for (auto const& rg : row_groups_info) { + auto const& row_group = _metadata->get_row_group(rg.index, rg.source_index); + auto const row_group_start = rg.start_row; + auto const row_group_rows = std::min(remaining_rows, row_group.num_rows); + + // generate ColumnChunkDesc objects for everything to be decoded (all input columns) + for (size_t i = 0; i < num_input_columns; ++i) { + auto col = _input_columns[i]; + // look up metadata + auto& col_meta = _metadata->get_column_metadata(rg.index, rg.source_index, col.schema_idx); + auto& schema = _metadata->get_schema(col.schema_idx); + + auto [type_width, clock_rate, converted_type] = + conversion_info(to_type_id(schema, _strings_to_categorical, _timestamp_type.id()), + _timestamp_type.id(), + schema.type, + schema.converted_type, + schema.type_length); + + chunks.push_back(ColumnChunkDesc(col_meta.total_compressed_size, + nullptr, + col_meta.num_values, + schema.type, + type_width, + row_group_start, + row_group_rows, + schema.max_definition_level, + schema.max_repetition_level, + _metadata->get_output_nesting_depth(col.schema_idx), + required_bits(schema.max_definition_level), + required_bits(schema.max_repetition_level), + col_meta.codec, + converted_type, + schema.logical_type, + schema.decimal_precision, + clock_rate, + i, + col.schema_idx)); + } + + remaining_rows -= row_group_rows; + } +} + +void reader::impl::compute_input_passes() +{ + // at this point, row_groups has already been filtered down to just the row groups we need to + // handle optional skip_rows/num_rows parameters. + auto const& row_groups_info = _file_itm_data.row_groups; + + // if the user hasn't specified an input size limit, read everything in a single pass. + if (_input_pass_read_limit == 0) { + _file_itm_data.input_pass_row_group_offsets.push_back(0); + _file_itm_data.input_pass_row_group_offsets.push_back(row_groups_info.size()); + return; + } + + // generate passes. make sure to account for the case where a single row group doesn't fit within + // + std::size_t const read_limit = + _input_pass_read_limit > 0 ? _input_pass_read_limit : std::numeric_limits::max(); + std::size_t cur_pass_byte_size = 0; + std::size_t cur_rg_start = 0; + std::size_t cur_row_count = 0; + _file_itm_data.input_pass_row_group_offsets.push_back(0); + _file_itm_data.input_pass_row_count.push_back(0); + + for (size_t cur_rg_index = 0; cur_rg_index < row_groups_info.size(); cur_rg_index++) { + auto const& rgi = row_groups_info[cur_rg_index]; + auto const& row_group = _metadata->get_row_group(rgi.index, rgi.source_index); + + // can we add this row group + if (cur_pass_byte_size + row_group.total_byte_size >= read_limit) { + // A single row group (the current one) is larger than the read limit: + // We always need to include at least one row group, so end the pass at the end of the current + // row group + if (cur_rg_start == cur_rg_index) { + _file_itm_data.input_pass_row_group_offsets.push_back(cur_rg_index + 1); + _file_itm_data.input_pass_row_count.push_back(cur_row_count + row_group.num_rows); + cur_rg_start = cur_rg_index + 1; + cur_pass_byte_size = 0; + } + // End the pass at the end of the previous row group + else { + _file_itm_data.input_pass_row_group_offsets.push_back(cur_rg_index); + _file_itm_data.input_pass_row_count.push_back(cur_row_count); + cur_rg_start = cur_rg_index; + cur_pass_byte_size = row_group.total_byte_size; + } + } else { + cur_pass_byte_size += row_group.total_byte_size; + } + cur_row_count += row_group.num_rows; + } + // add the last pass if necessary + if (_file_itm_data.input_pass_row_group_offsets.back() != row_groups_info.size()) { + _file_itm_data.input_pass_row_group_offsets.push_back(row_groups_info.size()); + _file_itm_data.input_pass_row_count.push_back(cur_row_count); + } +} + +void reader::impl::setup_next_pass() +{ + // this will also cause the previous pass information to be deleted + _pass_itm_data = std::make_unique(); + + // setup row groups to be loaded for this pass + auto const row_group_start = _file_itm_data.input_pass_row_group_offsets[_current_input_pass]; + auto const row_group_end = _file_itm_data.input_pass_row_group_offsets[_current_input_pass + 1]; + auto const num_row_groups = row_group_end - row_group_start; + _pass_itm_data->row_groups.resize(num_row_groups); + std::copy(_file_itm_data.row_groups.begin() + row_group_start, + _file_itm_data.row_groups.begin() + row_group_end, + _pass_itm_data->row_groups.begin()); + + auto const num_passes = _file_itm_data.input_pass_row_group_offsets.size() - 1; + CUDF_EXPECTS(_current_input_pass < num_passes, "Encountered an invalid read pass index"); + + auto const chunks_per_rowgroup = _input_columns.size(); + auto const num_chunks = chunks_per_rowgroup * num_row_groups; + + auto chunk_start = _file_itm_data.chunks.begin() + (row_group_start * chunks_per_rowgroup); + auto chunk_end = _file_itm_data.chunks.begin() + (row_group_end * chunks_per_rowgroup); + + _pass_itm_data->chunks = cudf::detail::hostdevice_vector(num_chunks, _stream); + std::copy(chunk_start, chunk_end, _pass_itm_data->chunks.begin()); + + // adjust skip_rows and num_rows by what's available in the row groups we are processing + if (num_passes == 1) { + _pass_itm_data->skip_rows = _file_itm_data.global_skip_rows; + _pass_itm_data->num_rows = _file_itm_data.global_num_rows; + } else { + auto const global_start_row = _file_itm_data.global_skip_rows; + auto const global_end_row = global_start_row + _file_itm_data.global_num_rows; + auto const start_row = + std::max(_file_itm_data.input_pass_row_count[_current_input_pass], global_start_row); + auto const end_row = + std::min(_file_itm_data.input_pass_row_count[_current_input_pass + 1], global_end_row); + + // skip_rows is always global in the sense that it is relative to the first row of + // everything we will be reading, regardless of what pass we are on. + // num_rows is how many rows we are reading this pass. + _pass_itm_data->skip_rows = + global_start_row + _file_itm_data.input_pass_row_count[_current_input_pass]; + _pass_itm_data->num_rows = end_row - start_row; + } +} + +void reader::impl::compute_splits_for_pass() +{ + auto const skip_rows = _pass_itm_data->skip_rows; + auto const num_rows = _pass_itm_data->num_rows; + + // simple case : no chunk size, no splits + if (_output_chunk_read_limit <= 0) { + _pass_itm_data->output_chunk_read_info = std::vector{{skip_rows, num_rows}}; + return; + } + + auto& pages = _pass_itm_data->pages_info; + + auto const& page_keys = _pass_itm_data->page_keys; + auto const& page_index = _pass_itm_data->page_index; + + // generate cumulative row counts and sizes + rmm::device_uvector c_info(page_keys.size(), _stream); + // convert PageInfo to cumulative_row_info + auto page_input = thrust::make_transform_iterator(page_index.begin(), + get_cumulative_row_info{pages.device_ptr()}); + thrust::inclusive_scan_by_key(rmm::exec_policy(_stream), + page_keys.begin(), + page_keys.end(), + page_input, + c_info.begin(), + thrust::equal_to{}, + cumulative_row_sum{}); + // print_cumulative_page_info(pages, page_index, c_info, stream); + + // sort by row count + rmm::device_uvector c_info_sorted{c_info, _stream}; + thrust::sort( + rmm::exec_policy(_stream), c_info_sorted.begin(), c_info_sorted.end(), row_count_compare{}); + + // std::vector h_c_info_sorted(c_info_sorted.size()); + // CUDF_CUDA_TRY(cudaMemcpy(h_c_info_sorted.data(), + // c_info_sorted.data(), + // sizeof(cumulative_row_info) * c_info_sorted.size(), + // cudaMemcpyDefault)); + // print_cumulative_row_info(h_c_info_sorted, "raw"); + + // generate key offsets (offsets to the start of each partition of keys). worst case is 1 page per + // key + rmm::device_uvector key_offsets(page_keys.size() + 1, _stream); + auto const key_offsets_end = thrust::reduce_by_key(rmm::exec_policy(_stream), + page_keys.begin(), + page_keys.end(), + thrust::make_constant_iterator(1), + thrust::make_discard_iterator(), + key_offsets.begin()) + .second; + size_t const num_unique_keys = key_offsets_end - key_offsets.begin(); + thrust::exclusive_scan( + rmm::exec_policy(_stream), key_offsets.begin(), key_offsets.end(), key_offsets.begin()); + + // adjust the cumulative info such that for each row count, the size includes any pages that span + // that row count. this is so that if we have this case: + // page row counts + // Column A: 0 <----> 100 <----> 200 + // Column B: 0 <---------------> 200 <--------> 400 + // | + // if we decide to split at row 100, we don't really know the actual amount of bytes in column B + // at that point. So we have to proceed as if we are taking the bytes from all 200 rows of that + // page. + // + rmm::device_uvector aggregated_info(c_info.size(), _stream); + thrust::transform(rmm::exec_policy(_stream), + c_info_sorted.begin(), + c_info_sorted.end(), + aggregated_info.begin(), + row_total_size{c_info.data(), key_offsets.data(), num_unique_keys}); + + // bring back to the cpu + std::vector h_aggregated_info(aggregated_info.size()); + CUDF_CUDA_TRY(cudaMemcpyAsync(h_aggregated_info.data(), + aggregated_info.data(), + sizeof(cumulative_row_info) * c_info.size(), + cudaMemcpyDefault, + _stream.value())); + _stream.synchronize(); + + // generate the actual splits + _pass_itm_data->output_chunk_read_info = + find_splits(h_aggregated_info, num_rows, _output_chunk_read_limit); +} + +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/reader_impl_chunking.hpp b/cpp/src/io/parquet/reader_impl_chunking.hpp new file mode 100644 index 00000000000..dfc239d8451 --- /dev/null +++ b/cpp/src/io/parquet/reader_impl_chunking.hpp @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "reader_impl_helpers.hpp" + +#include + +namespace cudf::io::parquet::detail { + +/** + * @brief Struct to store file-level data that remains constant for + * all passes/chunks in the file. + */ +struct file_intermediate_data { + // all row groups to read + std::vector row_groups{}; + + // all chunks from the selected row groups. We may end up reading these chunks progressively + // instead of all at once + std::vector chunks{}; + + // an array of offsets into _file_itm_data::global_chunks. Each pair of offsets represents + // the start/end of the chunks to be loaded for a given pass. + std::vector input_pass_row_group_offsets{}; + // row counts per input-pass + std::vector input_pass_row_count{}; + + // skip_rows/num_rows values for the entire file. these need to be adjusted per-pass because we + // may not be visiting every row group that contains these bounds + size_t global_skip_rows; + size_t global_num_rows; +}; + +/** + * @brief Struct to identify the range for each chunk of rows during a chunked reading pass. + */ +struct chunk_read_info { + size_t skip_rows; + size_t num_rows; +}; + +/** + * @brief Struct to store pass-level data that remains constant for a single pass. + */ +struct pass_intermediate_data { + std::vector> raw_page_data; + rmm::device_buffer decomp_page_data; + + // rowgroup, chunk and page information for the current pass. + std::vector row_groups{}; + cudf::detail::hostdevice_vector chunks{}; + cudf::detail::hostdevice_vector pages_info{}; + cudf::detail::hostdevice_vector page_nesting_info{}; + cudf::detail::hostdevice_vector page_nesting_decode_info{}; + + rmm::device_uvector page_keys{0, rmm::cuda_stream_default}; + rmm::device_uvector page_index{0, rmm::cuda_stream_default}; + rmm::device_uvector str_dict_index{0, rmm::cuda_stream_default}; + + std::vector output_chunk_read_info; + std::size_t current_output_chunk{0}; + + rmm::device_buffer level_decode_data{}; + int level_type_size{0}; + + // skip_rows and num_rows values for this particular pass. these may be adjusted values from the + // global values stored in file_intermediate_data. + size_t skip_rows; + size_t num_rows; +}; + +} // namespace cudf::io::parquet::detail diff --git a/cpp/src/io/parquet/reader_impl_helpers.hpp b/cpp/src/io/parquet/reader_impl_helpers.hpp index 1a73e2f55ac..8d8ab8707be 100644 --- a/cpp/src/io/parquet/reader_impl_helpers.hpp +++ b/cpp/src/io/parquet/reader_impl_helpers.hpp @@ -34,6 +34,23 @@ namespace cudf::io::parquet::detail { +/** + * @brief The row_group_info class + */ +struct row_group_info { + size_type index; // row group index within a file. aggregate_reader_metadata::get_row_group() is + // called with index and source_index + size_t start_row; + size_type source_index; // file index. + + row_group_info() = default; + + row_group_info(size_type index, size_t start_row, size_type source_index) + : index{index}, start_row{start_row}, source_index{source_index} + { + } +}; + /** * @brief Function that translates Parquet datatype to cuDF type enum */ diff --git a/cpp/src/io/parquet/reader_impl_preprocess.cu b/cpp/src/io/parquet/reader_impl_preprocess.cu index 4bc6bb6f43b..ce45f709ee1 100644 --- a/cpp/src/io/parquet/reader_impl_preprocess.cu +++ b/cpp/src/io/parquet/reader_impl_preprocess.cu @@ -18,7 +18,6 @@ #include #include -#include #include #include @@ -44,7 +43,6 @@ #include namespace cudf::io::parquet::detail { - namespace { /** @@ -170,46 +168,6 @@ void generate_depth_remappings(std::map, std::ve } } -/** - * @brief Return the required number of bits to store a value. - */ -template -[[nodiscard]] T required_bits(uint32_t max_level) -{ - return static_cast(CompactProtocolReader::NumRequiredBits(max_level)); -} - -/** - * @brief Converts cuDF units to Parquet units. - * - * @return A tuple of Parquet type width, Parquet clock rate and Parquet decimal type. - */ -[[nodiscard]] std::tuple conversion_info(type_id column_type_id, - type_id timestamp_type_id, - Type physical, - int8_t converted, - int32_t length) -{ - int32_t type_width = (physical == FIXED_LEN_BYTE_ARRAY) ? length : 0; - int32_t clock_rate = 0; - if (column_type_id == type_id::INT8 or column_type_id == type_id::UINT8) { - type_width = 1; // I32 -> I8 - } else if (column_type_id == type_id::INT16 or column_type_id == type_id::UINT16) { - type_width = 2; // I32 -> I16 - } else if (column_type_id == type_id::INT32) { - type_width = 4; // str -> hash32 - } else if (is_chrono(data_type{column_type_id})) { - clock_rate = to_clockrate(timestamp_type_id); - } - - int8_t converted_type = converted; - if (converted_type == DECIMAL && column_type_id != type_id::FLOAT64 && - not cudf::is_fixed_point(data_type{column_type_id})) { - converted_type = UNKNOWN; // Not converting to float64 or decimal - } - return std::make_tuple(type_width, clock_rate, converted_type); -} - /** * @brief Reads compressed page data to device memory. * @@ -790,163 +748,6 @@ std::pair>> reader::impl::read_and_decompres return {total_decompressed_size > 0, std::move(read_chunk_tasks)}; } -void reader::impl::load_global_chunk_info() -{ - auto const num_rows = _file_itm_data.global_num_rows; - auto const& row_groups_info = _file_itm_data.row_groups; - auto& chunks = _file_itm_data.chunks; - - // Descriptors for all the chunks that make up the selected columns - auto const num_input_columns = _input_columns.size(); - auto const num_chunks = row_groups_info.size() * num_input_columns; - - // Initialize column chunk information - auto remaining_rows = num_rows; - for (auto const& rg : row_groups_info) { - auto const& row_group = _metadata->get_row_group(rg.index, rg.source_index); - auto const row_group_start = rg.start_row; - auto const row_group_rows = std::min(remaining_rows, row_group.num_rows); - - // generate ColumnChunkDesc objects for everything to be decoded (all input columns) - for (size_t i = 0; i < num_input_columns; ++i) { - auto col = _input_columns[i]; - // look up metadata - auto& col_meta = _metadata->get_column_metadata(rg.index, rg.source_index, col.schema_idx); - auto& schema = _metadata->get_schema(col.schema_idx); - - auto [type_width, clock_rate, converted_type] = - conversion_info(to_type_id(schema, _strings_to_categorical, _timestamp_type.id()), - _timestamp_type.id(), - schema.type, - schema.converted_type, - schema.type_length); - - chunks.push_back(ColumnChunkDesc(col_meta.total_compressed_size, - nullptr, - col_meta.num_values, - schema.type, - type_width, - row_group_start, - row_group_rows, - schema.max_definition_level, - schema.max_repetition_level, - _metadata->get_output_nesting_depth(col.schema_idx), - required_bits(schema.max_definition_level), - required_bits(schema.max_repetition_level), - col_meta.codec, - converted_type, - schema.logical_type, - schema.decimal_precision, - clock_rate, - i, - col.schema_idx)); - } - - remaining_rows -= row_group_rows; - } -} - -void reader::impl::compute_input_pass_row_group_info() -{ - // at this point, row_groups has already been filtered down to just the row groups we need to - // handle optional skip_rows/num_rows parameters. - auto const& row_groups_info = _file_itm_data.row_groups; - - // if the user hasn't specified an input size limit, read everything in a single pass. - if (_input_pass_read_limit == 0) { - _input_pass_row_group_offsets.push_back(0); - _input_pass_row_group_offsets.push_back(row_groups_info.size()); - return; - } - - // generate passes. make sure to account for the case where a single row group doesn't fit within - // - std::size_t const read_limit = - _input_pass_read_limit > 0 ? _input_pass_read_limit : std::numeric_limits::max(); - std::size_t cur_pass_byte_size = 0; - std::size_t cur_rg_start = 0; - std::size_t cur_row_count = 0; - _input_pass_row_group_offsets.push_back(0); - _input_pass_row_count.push_back(0); - - for (size_t cur_rg_index = 0; cur_rg_index < row_groups_info.size(); cur_rg_index++) { - auto const& rgi = row_groups_info[cur_rg_index]; - auto const& row_group = _metadata->get_row_group(rgi.index, rgi.source_index); - - // can we add this row group - if (cur_pass_byte_size + row_group.total_byte_size >= read_limit) { - // A single row group (the current one) is larger than the read limit: - // We always need to include at least one row group, so end the pass at the end of the current - // row group - if (cur_rg_start == cur_rg_index) { - _input_pass_row_group_offsets.push_back(cur_rg_index + 1); - _input_pass_row_count.push_back(cur_row_count + row_group.num_rows); - cur_rg_start = cur_rg_index + 1; - cur_pass_byte_size = 0; - } - // End the pass at the end of the previous row group - else { - _input_pass_row_group_offsets.push_back(cur_rg_index); - _input_pass_row_count.push_back(cur_row_count); - cur_rg_start = cur_rg_index; - cur_pass_byte_size = row_group.total_byte_size; - } - } else { - cur_pass_byte_size += row_group.total_byte_size; - } - cur_row_count += row_group.num_rows; - } - // add the last pass if necessary - if (_input_pass_row_group_offsets.back() != row_groups_info.size()) { - _input_pass_row_group_offsets.push_back(row_groups_info.size()); - _input_pass_row_count.push_back(cur_row_count); - } -} - -void reader::impl::setup_pass() -{ - // this will also cause the previous pass information to be deleted - _pass_itm_data = std::make_unique(); - - // setup row groups to be loaded for this pass - auto const row_group_start = _input_pass_row_group_offsets[_current_input_pass]; - auto const row_group_end = _input_pass_row_group_offsets[_current_input_pass + 1]; - auto const num_row_groups = row_group_end - row_group_start; - _pass_itm_data->row_groups.resize(num_row_groups); - std::copy(_file_itm_data.row_groups.begin() + row_group_start, - _file_itm_data.row_groups.begin() + row_group_end, - _pass_itm_data->row_groups.begin()); - - auto const num_passes = _input_pass_row_group_offsets.size() - 1; - CUDF_EXPECTS(_current_input_pass < num_passes, "Encountered an invalid read pass index"); - - auto const chunks_per_rowgroup = _input_columns.size(); - auto const num_chunks = chunks_per_rowgroup * num_row_groups; - - auto chunk_start = _file_itm_data.chunks.begin() + (row_group_start * chunks_per_rowgroup); - auto chunk_end = _file_itm_data.chunks.begin() + (row_group_end * chunks_per_rowgroup); - - _pass_itm_data->chunks = cudf::detail::hostdevice_vector(num_chunks, _stream); - std::copy(chunk_start, chunk_end, _pass_itm_data->chunks.begin()); - - // adjust skip_rows and num_rows by what's available in the row groups we are processing - if (num_passes == 1) { - _pass_itm_data->skip_rows = _file_itm_data.global_skip_rows; - _pass_itm_data->num_rows = _file_itm_data.global_num_rows; - } else { - auto const global_start_row = _file_itm_data.global_skip_rows; - auto const global_end_row = global_start_row + _file_itm_data.global_num_rows; - auto const start_row = std::max(_input_pass_row_count[_current_input_pass], global_start_row); - auto const end_row = std::min(_input_pass_row_count[_current_input_pass + 1], global_end_row); - - // skip_rows is always global in the sense that it is relative to the first row of - // everything we will be reading, regardless of what pass we are on. - // num_rows is how many rows we are reading this pass. - _pass_itm_data->skip_rows = global_start_row + _input_pass_row_count[_current_input_pass]; - _pass_itm_data->num_rows = end_row - start_row; - } -} - void reader::impl::load_and_decompress_data() { // This function should never be called if `num_rows == 0`. @@ -1034,359 +835,8 @@ void print_pages(cudf::detail::hostdevice_vector& pages, rmm::cuda_str p.str_bytes); } } - -void print_cumulative_page_info(cudf::detail::hostdevice_vector& pages, - rmm::device_uvector const& page_index, - rmm::device_uvector const& c_info, - rmm::cuda_stream_view stream) -{ - pages.device_to_host_sync(stream); - - printf("------------\nCumulative sizes by page\n"); - - std::vector schemas(pages.size()); - std::vector h_page_index(pages.size()); - CUDF_CUDA_TRY(cudaMemcpy( - h_page_index.data(), page_index.data(), sizeof(int) * pages.size(), cudaMemcpyDefault)); - std::vector h_cinfo(pages.size()); - CUDF_CUDA_TRY(cudaMemcpy( - h_cinfo.data(), c_info.data(), sizeof(cumulative_row_info) * pages.size(), cudaMemcpyDefault)); - auto schema_iter = cudf::detail::make_counting_transform_iterator( - 0, [&](size_type i) { return pages[h_page_index[i]].src_col_schema; }); - thrust::copy(thrust::seq, schema_iter, schema_iter + pages.size(), schemas.begin()); - auto last = thrust::unique(thrust::seq, schemas.begin(), schemas.end()); - schemas.resize(last - schemas.begin()); - printf("Num schemas: %lu\n", schemas.size()); - - for (size_t idx = 0; idx < schemas.size(); idx++) { - printf("Schema %d\n", schemas[idx]); - for (size_t pidx = 0; pidx < pages.size(); pidx++) { - auto const& page = pages[h_page_index[pidx]]; - if (page.flags & PAGEINFO_FLAGS_DICTIONARY || page.src_col_schema != schemas[idx]) { - continue; - } - printf("\tP: {%lu, %lu}\n", h_cinfo[pidx].row_count, h_cinfo[pidx].size_bytes); - } - } -} - -void print_cumulative_row_info(host_span sizes, - std::string const& label, - std::optional> splits = std::nullopt) -{ - if (splits.has_value()) { - printf("------------\nSplits\n"); - for (size_t idx = 0; idx < splits->size(); idx++) { - printf("{%lu, %lu}\n", splits.value()[idx].skip_rows, splits.value()[idx].num_rows); - } - } - - printf("------------\nCumulative sizes %s\n", label.c_str()); - for (size_t idx = 0; idx < sizes.size(); idx++) { - printf("{%lu, %lu, %d}", sizes[idx].row_count, sizes[idx].size_bytes, sizes[idx].key); - if (splits.has_value()) { - // if we have a split at this row count and this is the last instance of this row count - auto start = thrust::make_transform_iterator( - splits->begin(), [](chunk_read_info const& i) { return i.skip_rows; }); - auto end = start + splits->size(); - auto split = std::find(start, end, sizes[idx].row_count); - auto const split_index = [&]() -> int { - if (split != end && - ((idx == sizes.size() - 1) || (sizes[idx + 1].row_count > sizes[idx].row_count))) { - return static_cast(std::distance(start, split)); - } - return idx == 0 ? 0 : -1; - }(); - if (split_index >= 0) { - printf(" <-- split {%lu, %lu}", - splits.value()[split_index].skip_rows, - splits.value()[split_index].num_rows); - } - } - printf("\n"); - } -} #endif // PREPROCESS_DEBUG -/** - * @brief Functor which reduces two cumulative_row_info structs of the same key. - */ -struct cumulative_row_sum { - cumulative_row_info operator() - __device__(cumulative_row_info const& a, cumulative_row_info const& b) const - { - return cumulative_row_info{a.row_count + b.row_count, a.size_bytes + b.size_bytes, a.key}; - } -}; - -/** - * @brief Functor which computes the total data size for a given type of cudf column. - * - * In the case of strings, the return size does not include the chars themselves. That - * information is tracked separately (see PageInfo::str_bytes). - */ -struct row_size_functor { - __device__ size_t validity_size(size_t num_rows, bool nullable) - { - return nullable ? (cudf::util::div_rounding_up_safe(num_rows, size_t{32}) * 4) : 0; - } - - template - __device__ size_t operator()(size_t num_rows, bool nullable) - { - auto const element_size = sizeof(device_storage_type_t); - return (element_size * num_rows) + validity_size(num_rows, nullable); - } -}; - -template <> -__device__ size_t row_size_functor::operator()(size_t num_rows, bool nullable) -{ - auto const offset_size = sizeof(size_type); - // NOTE: Adding the + 1 offset here isn't strictly correct. There will only be 1 extra offset - // for the entire column, whereas this is adding an extra offset per page. So we will get a - // small over-estimate of the real size of the order : # of pages * 4 bytes. It seems better - // to overestimate size somewhat than to underestimate it and potentially generate chunks - // that are too large. - return (offset_size * (num_rows + 1)) + validity_size(num_rows, nullable); -} - -template <> -__device__ size_t row_size_functor::operator()(size_t num_rows, bool nullable) -{ - return validity_size(num_rows, nullable); -} - -template <> -__device__ size_t row_size_functor::operator()(size_t num_rows, bool nullable) -{ - // only returns the size of offsets and validity. the size of the actual string chars - // is tracked separately. - auto const offset_size = sizeof(size_type); - // see note about offsets in the list_view template. - return (offset_size * (num_rows + 1)) + validity_size(num_rows, nullable); -} - -/** - * @brief Functor which computes the total output cudf data size for all of - * the data in this page. - * - * Sums across all nesting levels. - */ -struct get_cumulative_row_info { - PageInfo const* const pages; - - __device__ cumulative_row_info operator()(size_type index) - { - auto const& page = pages[index]; - if (page.flags & PAGEINFO_FLAGS_DICTIONARY) { - return cumulative_row_info{0, 0, page.src_col_schema}; - } - - // total nested size, not counting string data - auto iter = - cudf::detail::make_counting_transform_iterator(0, [page, index] __device__(size_type i) { - auto const& pni = page.nesting[i]; - return cudf::type_dispatcher( - data_type{pni.type}, row_size_functor{}, pni.size, pni.nullable); - }); - - size_t const row_count = static_cast(page.nesting[0].size); - return { - row_count, - thrust::reduce(thrust::seq, iter, iter + page.num_output_nesting_levels) + page.str_bytes, - page.src_col_schema}; - } -}; - -/** - * @brief Functor which computes the effective size of all input columns by page. - * - * For a given row, we want to find the cost of all pages for all columns involved - * in loading up to that row. The complication here is that not all pages are the - * same size between columns. Example: - * - * page row counts - * Column A: 0 <----> 100 <----> 200 - * Column B: 0 <---------------> 200 <--------> 400 - | - * if we decide to split at row 100, we don't really know the actual amount of bytes in column B - * at that point. So we have to proceed as if we are taking the bytes from all 200 rows of that - * page. Essentially, a conservative over-estimate of the real size. - */ -struct row_total_size { - cumulative_row_info const* c_info; - size_type const* key_offsets; - size_t num_keys; - - __device__ cumulative_row_info operator()(cumulative_row_info const& i) - { - // sum sizes for each input column at this row - size_t sum = 0; - for (int idx = 0; idx < num_keys; idx++) { - auto const start = key_offsets[idx]; - auto const end = key_offsets[idx + 1]; - auto iter = cudf::detail::make_counting_transform_iterator( - 0, [&] __device__(size_type i) { return c_info[i].row_count; }); - auto const page_index = - thrust::lower_bound(thrust::seq, iter + start, iter + end, i.row_count) - iter; - sum += c_info[page_index].size_bytes; - } - return {i.row_count, sum, i.key}; - } -}; - -/** - * @brief Given a vector of cumulative {row_count, byte_size} pairs and a chunk read - * limit, determine the set of splits. - * - * @param sizes Vector of cumulative {row_count, byte_size} pairs - * @param num_rows Total number of rows to read - * @param chunk_read_limit Limit on total number of bytes to be returned per read, for all columns - */ -std::vector find_splits(std::vector const& sizes, - size_t num_rows, - size_t chunk_read_limit) -{ - // now we have an array of {row_count, real output bytes}. just walk through it and generate - // splits. - // TODO: come up with a clever way to do this entirely in parallel. For now, as long as batch - // sizes are reasonably large, this shouldn't iterate too many times - std::vector splits; - { - size_t cur_pos = 0; - size_t cur_cumulative_size = 0; - size_t cur_row_count = 0; - auto start = thrust::make_transform_iterator(sizes.begin(), [&](cumulative_row_info const& i) { - return i.size_bytes - cur_cumulative_size; - }); - auto end = start + sizes.size(); - while (cur_row_count < num_rows) { - int64_t split_pos = - thrust::lower_bound(thrust::seq, start + cur_pos, end, chunk_read_limit) - start; - - // if we're past the end, or if the returned bucket is > than the chunk_read_limit, move back - // one. - if (static_cast(split_pos) >= sizes.size() || - (sizes[split_pos].size_bytes - cur_cumulative_size > chunk_read_limit)) { - split_pos--; - } - - // best-try. if we can't find something that'll fit, we have to go bigger. we're doing this in - // a loop because all of the cumulative sizes for all the pages are sorted into one big list. - // so if we had two columns, both of which had an entry {1000, 10000}, that entry would be in - // the list twice. so we have to iterate until we skip past all of them. The idea is that we - // either do this, or we have to call unique() on the input first. - while (split_pos < (static_cast(sizes.size()) - 1) && - (split_pos < 0 || sizes[split_pos].row_count == cur_row_count)) { - split_pos++; - } - - auto const start_row = cur_row_count; - cur_row_count = sizes[split_pos].row_count; - splits.push_back(chunk_read_info{start_row, cur_row_count - start_row}); - cur_pos = split_pos; - cur_cumulative_size = sizes[split_pos].size_bytes; - } - } - // print_cumulative_row_info(sizes, "adjusted", splits); - - return splits; -} - -/** - * @brief Given a set of pages that have had their sizes computed by nesting level and - * a limit on total read size, generate a set of {skip_rows, num_rows} pairs representing - * a set of reads that will generate output columns of total size <= `chunk_read_limit` bytes. - * - * @param pages All pages in the file - * @param id Additional intermediate information required to process the pages - * @param num_rows Total number of rows to read - * @param chunk_read_limit Limit on total number of bytes to be returned per read, for all columns - * @param stream CUDA stream to use - */ -std::vector compute_splits(cudf::detail::hostdevice_vector& pages, - pass_intermediate_data const& id, - size_t num_rows, - size_t chunk_read_limit, - rmm::cuda_stream_view stream) -{ - auto const& page_keys = id.page_keys; - auto const& page_index = id.page_index; - - // generate cumulative row counts and sizes - rmm::device_uvector c_info(page_keys.size(), stream); - // convert PageInfo to cumulative_row_info - auto page_input = thrust::make_transform_iterator(page_index.begin(), - get_cumulative_row_info{pages.device_ptr()}); - thrust::inclusive_scan_by_key(rmm::exec_policy(stream), - page_keys.begin(), - page_keys.end(), - page_input, - c_info.begin(), - thrust::equal_to{}, - cumulative_row_sum{}); - // print_cumulative_page_info(pages, page_index, c_info, stream); - - // sort by row count - rmm::device_uvector c_info_sorted{c_info, stream}; - thrust::sort(rmm::exec_policy(stream), - c_info_sorted.begin(), - c_info_sorted.end(), - [] __device__(cumulative_row_info const& a, cumulative_row_info const& b) { - return a.row_count < b.row_count; - }); - - // std::vector h_c_info_sorted(c_info_sorted.size()); - // CUDF_CUDA_TRY(cudaMemcpy(h_c_info_sorted.data(), - // c_info_sorted.data(), - // sizeof(cumulative_row_info) * c_info_sorted.size(), - // cudaMemcpyDefault)); - // print_cumulative_row_info(h_c_info_sorted, "raw"); - - // generate key offsets (offsets to the start of each partition of keys). worst case is 1 page per - // key - rmm::device_uvector key_offsets(page_keys.size() + 1, stream); - auto const key_offsets_end = thrust::reduce_by_key(rmm::exec_policy(stream), - page_keys.begin(), - page_keys.end(), - thrust::make_constant_iterator(1), - thrust::make_discard_iterator(), - key_offsets.begin()) - .second; - size_t const num_unique_keys = key_offsets_end - key_offsets.begin(); - thrust::exclusive_scan( - rmm::exec_policy(stream), key_offsets.begin(), key_offsets.end(), key_offsets.begin()); - - // adjust the cumulative info such that for each row count, the size includes any pages that span - // that row count. this is so that if we have this case: - // page row counts - // Column A: 0 <----> 100 <----> 200 - // Column B: 0 <---------------> 200 <--------> 400 - // | - // if we decide to split at row 100, we don't really know the actual amount of bytes in column B - // at that point. So we have to proceed as if we are taking the bytes from all 200 rows of that - // page. - // - rmm::device_uvector aggregated_info(c_info.size(), stream); - thrust::transform(rmm::exec_policy(stream), - c_info_sorted.begin(), - c_info_sorted.end(), - aggregated_info.begin(), - row_total_size{c_info.data(), key_offsets.data(), num_unique_keys}); - - // bring back to the cpu - std::vector h_aggregated_info(aggregated_info.size()); - CUDF_CUDA_TRY(cudaMemcpyAsync(h_aggregated_info.data(), - aggregated_info.data(), - sizeof(cumulative_row_info) * c_info.size(), - cudaMemcpyDefault, - stream.value())); - stream.synchronize(); - - return find_splits(h_aggregated_info, num_rows, chunk_read_limit); -} - struct get_page_chunk_idx { __device__ size_type operator()(PageInfo const& page) { return page.chunk_idx; } }; @@ -1822,12 +1272,8 @@ void reader::impl::preprocess_pages(bool uses_custom_row_bounds, size_t chunk_re _pass_itm_data->page_keys = std::move(page_keys); _pass_itm_data->page_index = std::move(page_index); - // compute splits if necessary. otherwise return a single split representing - // the whole file. - _pass_itm_data->output_chunk_read_info = - _output_chunk_read_limit > 0 - ? compute_splits(pages, *_pass_itm_data, num_rows, chunk_read_limit, _stream) - : std::vector{{skip_rows, num_rows}}; + // compute splits for the pass + compute_splits_for_pass(); } void reader::impl::allocate_columns(size_t skip_rows, size_t num_rows, bool uses_custom_row_bounds) diff --git a/cpp/src/io/utilities/column_buffer.cpp b/cpp/src/io/utilities/column_buffer.cpp index f3a43cbc63c..dd049d401cf 100644 --- a/cpp/src/io/utilities/column_buffer.cpp +++ b/cpp/src/io/utilities/column_buffer.cpp @@ -51,19 +51,21 @@ std::unique_ptr gather_column_buffer::make_string_column_impl(rmm::cuda_ return make_strings_column(*_strings, stream, _mr); } -void inline_column_buffer::allocate_strings_data(rmm::cuda_stream_view stream) +void cudf::io::detail::inline_column_buffer::allocate_strings_data(rmm::cuda_stream_view stream) { CUDF_EXPECTS(type.id() == type_id::STRING, "allocate_strings_data called for non-string column"); // size + 1 for final offset. _string_data will be initialized later. _data = create_data(data_type{type_id::INT32}, size + 1, stream, _mr); } -void inline_column_buffer::create_string_data(size_t num_bytes, rmm::cuda_stream_view stream) +void cudf::io::detail::inline_column_buffer::create_string_data(size_t num_bytes, + rmm::cuda_stream_view stream) { _string_data = rmm::device_buffer(num_bytes, stream, _mr); } -std::unique_ptr inline_column_buffer::make_string_column_impl(rmm::cuda_stream_view stream) +std::unique_ptr cudf::io::detail::inline_column_buffer::make_string_column_impl( + rmm::cuda_stream_view stream) { // no need for copies, just transfer ownership of the data_buffers to the columns auto const state = mask_state::UNALLOCATED; @@ -324,7 +326,7 @@ std::unique_ptr empty_like(column_buffer_base& buffer, } using pointer_type = gather_column_buffer; -using string_type = inline_column_buffer; +using string_type = cudf::io::detail::inline_column_buffer; using pointer_column_buffer = column_buffer_base; using string_column_buffer = column_buffer_base; From 053da82810ad78286602cfd09e37f8a22cb0a15b Mon Sep 17 00:00:00 2001 From: Mike Wilson Date: Tue, 10 Oct 2023 13:28:01 -0400 Subject: [PATCH 137/150] Make parquet schema index type consistent (#14256) While working on parquet schema issue I noticed that the parent and child index didn't match. Discussion ensued and `size_type` was decided. Authors: - Mike Wilson (https://github.com/hyperbolic2346) Approvers: - Vukasin Milovanovic (https://github.com/vuule) - MithunR (https://github.com/mythrocks) - Nghia Truong (https://github.com/ttnghia) URL: https://github.com/rapidsai/cudf/pull/14256 --- cpp/src/io/parquet/parquet.hpp | 6 ++++-- cpp/src/io/parquet/reader_impl_helpers.cpp | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/cpp/src/io/parquet/parquet.hpp b/cpp/src/io/parquet/parquet.hpp index c5993d73dec..dbec59670c7 100644 --- a/cpp/src/io/parquet/parquet.hpp +++ b/cpp/src/io/parquet/parquet.hpp @@ -18,6 +18,8 @@ #include "parquet_common.hpp" +#include + #include #include @@ -152,8 +154,8 @@ struct SchemaElement { // The following fields are filled in later during schema initialization int max_definition_level = 0; int max_repetition_level = 0; - int parent_idx = 0; - std::vector children_idx; + size_type parent_idx = 0; + std::vector children_idx; bool operator==(SchemaElement const& other) const { diff --git a/cpp/src/io/parquet/reader_impl_helpers.cpp b/cpp/src/io/parquet/reader_impl_helpers.cpp index 171cf07da3e..040c6403f57 100644 --- a/cpp/src/io/parquet/reader_impl_helpers.cpp +++ b/cpp/src/io/parquet/reader_impl_helpers.cpp @@ -213,7 +213,7 @@ void metadata::sanitize_schema() // add a struct child and move this element's children to the struct schema_elem.converted_type = LIST; schema_elem.repetition_type = OPTIONAL; - auto const struct_node_idx = schema.size(); + auto const struct_node_idx = static_cast(schema.size()); SchemaElement struct_elem; struct_elem.name = "struct_node"; From 5039d043a08e7ea7e5656bab60a6fced4dfa2f1d Mon Sep 17 00:00:00 2001 From: David Wendt <45795991+davidwendt@users.noreply.github.com> Date: Tue, 10 Oct 2023 15:06:24 -0400 Subject: [PATCH 138/150] Expose stream parameter in public strings APIs (#14260) Add stream parameter to public APIs: - `cudf::strings::strip()` - `cudf::strings::slice_strings()` - `cudf::strings::pad()` - `cudf::strings::zfill()` - `cudf::strings::wrap()` Also cleaned up some of the doxygen comments and added stream-tests. Reference #13744 Authors: - David Wendt (https://github.com/davidwendt) Approvers: - Vyas Ramasubramani (https://github.com/vyasr) - Nghia Truong (https://github.com/ttnghia) URL: https://github.com/rapidsai/cudf/pull/14260 --- cpp/include/cudf/strings/padding.hpp | 14 ++-- cpp/include/cudf/strings/slice.hpp | 30 +++++---- cpp/include/cudf/strings/strip.hpp | 4 +- cpp/include/cudf/strings/wrap.hpp | 14 ++-- cpp/src/strings/padding.cu | 6 +- cpp/src/strings/slice.cu | 7 +- cpp/src/strings/strip.cu | 5 +- cpp/src/strings/wrap.cu | 8 +-- cpp/tests/CMakeLists.txt | 4 +- cpp/tests/streams/strings/strings_tests.cpp | 71 +++++++++++++++++++++ 10 files changed, 125 insertions(+), 38 deletions(-) create mode 100644 cpp/tests/streams/strings/strings_tests.cpp diff --git a/cpp/include/cudf/strings/padding.hpp b/cpp/include/cudf/strings/padding.hpp index 7699159fbea..f0cb351eeda 100644 --- a/cpp/include/cudf/strings/padding.hpp +++ b/cpp/include/cudf/strings/padding.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -51,6 +51,7 @@ namespace strings { * Default is pad right (left justify) * @param fill_char Single UTF-8 character to use for padding; * Default is the space character + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return New column with padded strings */ @@ -59,6 +60,7 @@ std::unique_ptr pad( size_type width, side_type side = side_type::RIGHT, std::string_view fill_char = " ", + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -79,14 +81,16 @@ std::unique_ptr pad( * r is now ['001234','-09876','+00.34','-342567', '0002+2'] * @endcode * - * @param input Strings instance for this operation. - * @param width The minimum number of characters for each string. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New column of strings. + * @param input Strings instance for this operation + * @param width The minimum number of characters for each string + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New column of strings */ std::unique_ptr zfill( strings_column_view const& input, size_type width, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of doxygen group diff --git a/cpp/include/cudf/strings/slice.hpp b/cpp/include/cudf/strings/slice.hpp index 5f2c71725eb..f106663be9b 100644 --- a/cpp/include/cudf/strings/slice.hpp +++ b/cpp/include/cudf/strings/slice.hpp @@ -50,18 +50,20 @@ namespace strings { * r2 is now ["lo","ob"] * @endcode * - * @param strings Strings column for this operation. - * @param start First character position to begin the substring. - * @param stop Last character position (exclusive) to end the substring. - * @param step Distance between input characters retrieved. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New strings column with sorted elements of this instance. + * @param input Strings column for this operation + * @param start First character position to begin the substring + * @param stop Last character position (exclusive) to end the substring + * @param step Distance between input characters retrieved + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New strings column with sorted elements of this instance */ std::unique_ptr slice_strings( - strings_column_view const& strings, + strings_column_view const& input, numeric_scalar const& start = numeric_scalar(0, false), numeric_scalar const& stop = numeric_scalar(0, false), numeric_scalar const& step = numeric_scalar(1), + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -95,16 +97,18 @@ std::unique_ptr slice_strings( * @throw cudf::logic_error if starts and stops are not same integer type. * @throw cudf::logic_error if starts or stops contains nulls. * - * @param strings Strings column for this operation. - * @param starts First character positions to begin the substring. - * @param stops Last character (exclusive) positions to end the substring. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New strings column with sorted elements of this instance. + * @param input Strings column for this operation + * @param starts First character positions to begin the substring + * @param stops Last character (exclusive) positions to end the substring + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New strings column with sorted elements of this instance */ std::unique_ptr slice_strings( - strings_column_view const& strings, + strings_column_view const& input, column_view const& starts, column_view const& stops, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of doxygen group diff --git a/cpp/include/cudf/strings/strip.hpp b/cpp/include/cudf/strings/strip.hpp index adf3b291144..556d6805ac3 100644 --- a/cpp/include/cudf/strings/strip.hpp +++ b/cpp/include/cudf/strings/strip.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -57,6 +57,7 @@ namespace strings { * string; Default is both * @param to_strip UTF-8 encoded characters to strip from each string; * Default is empty string which indicates strip whitespace characters + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory. * @return New strings column. */ @@ -64,6 +65,7 @@ std::unique_ptr strip( strings_column_view const& input, side_type side = side_type::BOTH, string_scalar const& to_strip = string_scalar(""), + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of doxygen group diff --git a/cpp/include/cudf/strings/wrap.hpp b/cpp/include/cudf/strings/wrap.hpp index 8d2d43c7f0f..efdc3e62aff 100644 --- a/cpp/include/cudf/strings/wrap.hpp +++ b/cpp/include/cudf/strings/wrap.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -57,14 +57,16 @@ namespace strings { * wrapped_string_tbl = ["the quick\nbrown fox\njumped over\nthe lazy\nbrown dog", "hello, world"] * ``` * - * @param[in] strings String column. - * @param[in] width Maximum character width of a line within each string. - * @param[in] mr Device memory resource used to allocate the returned column's device memory - * @return Column of wrapped strings. + * @param input String column + * @param width Maximum character width of a line within each string + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return Column of wrapped strings */ std::unique_ptr wrap( - strings_column_view const& strings, + strings_column_view const& input, size_type width, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of doxygen group diff --git a/cpp/src/strings/padding.cu b/cpp/src/strings/padding.cu index c501a8bf7b4..850ccaa4535 100644 --- a/cpp/src/strings/padding.cu +++ b/cpp/src/strings/padding.cu @@ -168,18 +168,20 @@ std::unique_ptr pad(strings_column_view const& input, size_type width, side_type side, std::string_view fill_char, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::pad(input, width, side, fill_char, cudf::get_default_stream(), mr); + return detail::pad(input, width, side, fill_char, stream, mr); } std::unique_ptr zfill(strings_column_view const& input, size_type width, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::zfill(input, width, cudf::get_default_stream(), mr); + return detail::zfill(input, width, stream, mr); } } // namespace strings diff --git a/cpp/src/strings/slice.cu b/cpp/src/strings/slice.cu index cce6a19a5a6..5a1fee92c7d 100644 --- a/cpp/src/strings/slice.cu +++ b/cpp/src/strings/slice.cu @@ -248,20 +248,21 @@ std::unique_ptr slice_strings(strings_column_view const& strings, numeric_scalar const& start, numeric_scalar const& stop, numeric_scalar const& step, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::slice_strings(strings, start, stop, step, cudf::get_default_stream(), mr); + return detail::slice_strings(strings, start, stop, step, stream, mr); } std::unique_ptr slice_strings(strings_column_view const& strings, column_view const& starts_column, column_view const& stops_column, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::slice_strings( - strings, starts_column, stops_column, cudf::get_default_stream(), mr); + return detail::slice_strings(strings, starts_column, stops_column, stream, mr); } } // namespace strings diff --git a/cpp/src/strings/strip.cu b/cpp/src/strings/strip.cu index 6fb7c671a87..26df76850f7 100644 --- a/cpp/src/strings/strip.cu +++ b/cpp/src/strings/strip.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -86,10 +86,11 @@ std::unique_ptr strip(strings_column_view const& input, std::unique_ptr strip(strings_column_view const& input, side_type side, string_scalar const& to_strip, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::strip(input, side, to_strip, cudf::get_default_stream(), mr); + return detail::strip(input, side, to_strip, stream, mr); } } // namespace strings diff --git a/cpp/src/strings/wrap.cu b/cpp/src/strings/wrap.cu index 335908d65d1..aa87a663964 100644 --- a/cpp/src/strings/wrap.cu +++ b/cpp/src/strings/wrap.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,10 +19,9 @@ #include #include #include -#include -#include #include #include +#include #include #include @@ -133,10 +132,11 @@ std::unique_ptr wrap(strings_column_view const& strings, std::unique_ptr wrap(strings_column_view const& strings, size_type width, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::wrap(strings, width, cudf::get_default_stream(), mr); + return detail::wrap(strings, width, stream, mr); } } // namespace strings diff --git a/cpp/tests/CMakeLists.txt b/cpp/tests/CMakeLists.txt index ffaba7d6fa7..b15a6c41d39 100644 --- a/cpp/tests/CMakeLists.txt +++ b/cpp/tests/CMakeLists.txt @@ -633,8 +633,8 @@ ConfigureTest(STREAM_REPLACE_TEST streams/replace_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_SEARCH_TEST streams/search_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_DICTIONARY_TEST streams/dictionary_test.cpp STREAM_MODE testing) ConfigureTest( - STREAM_STRINGS_TEST streams/strings/case_test.cpp streams/strings/find_test.cpp STREAM_MODE - testing + STREAM_STRINGS_TEST streams/strings/case_test.cpp streams/strings/find_test.cpp + streams/strings/strings_tests.cpp STREAM_MODE testing ) ConfigureTest(STREAM_SORTING_TEST streams/sorting_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_TEXT_TEST streams/text/ngrams_test.cpp STREAM_MODE testing) diff --git a/cpp/tests/streams/strings/strings_tests.cpp b/cpp/tests/streams/strings/strings_tests.cpp new file mode 100644 index 00000000000..0db467a6895 --- /dev/null +++ b/cpp/tests/streams/strings/strings_tests.cpp @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include +#include +#include + +#include + +class StringsTest : public cudf::test::BaseFixture {}; + +TEST_F(StringsTest, Strip) +{ + auto input = cudf::test::strings_column_wrapper({" aBc ", " ", "aaaa ", "\tb"}); + auto view = cudf::strings_column_view(input); + + auto const strip = cudf::string_scalar(" ", true, cudf::test::get_default_stream()); + auto const side = cudf::strings::side_type::BOTH; + cudf::strings::strip(view, side, strip, cudf::test::get_default_stream()); +} + +TEST_F(StringsTest, Pad) +{ + auto input = cudf::test::strings_column_wrapper({"333", "", "4444", "1"}); + auto view = cudf::strings_column_view(input); + + auto const side = cudf::strings::side_type::BOTH; + cudf::strings::pad(view, 6, side, " ", cudf::test::get_default_stream()); + cudf::strings::zfill(view, 6, cudf::test::get_default_stream()); +} + +TEST_F(StringsTest, Wrap) +{ + auto input = cudf::test::strings_column_wrapper({"the quick brown fox jumped"}); + auto view = cudf::strings_column_view(input); + + cudf::strings::wrap(view, 6, cudf::test::get_default_stream()); +} + +TEST_F(StringsTest, Slice) +{ + auto input = cudf::test::strings_column_wrapper({"hello", "these", "are test strings"}); + auto view = cudf::strings_column_view(input); + + auto start = cudf::numeric_scalar(2, true, cudf::test::get_default_stream()); + auto stop = cudf::numeric_scalar(5, true, cudf::test::get_default_stream()); + auto step = cudf::numeric_scalar(1, true, cudf::test::get_default_stream()); + cudf::strings::slice_strings(view, start, stop, step, cudf::test::get_default_stream()); + + auto starts = cudf::test::fixed_width_column_wrapper({1, 2, 3}); + auto stops = cudf::test::fixed_width_column_wrapper({4, 5, 6}); + cudf::strings::slice_strings(view, starts, stops, cudf::test::get_default_stream()); +} From c0c7ed8405c679752439081ee1b42b22658264c9 Mon Sep 17 00:00:00 2001 From: Martin Marenz Date: Wed, 11 Oct 2023 00:04:58 +0200 Subject: [PATCH 139/150] Add `bytes_per_second` to transpose benchmark (#14170) This patch relates to #13735. Benchmark: [transpose_benchmark.txt](https://github.com/rapidsai/cudf/files/12699834/transpose_benchmark.txt) Authors: - Martin Marenz (https://github.com/Blonck) - Mark Harris (https://github.com/harrism) Approvers: - Mark Harris (https://github.com/harrism) - Yunsong Wang (https://github.com/PointKernel) URL: https://github.com/rapidsai/cudf/pull/14170 --- cpp/benchmarks/transpose/transpose.cpp | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/cpp/benchmarks/transpose/transpose.cpp b/cpp/benchmarks/transpose/transpose.cpp index 2f41bda4b88..c2737325462 100644 --- a/cpp/benchmarks/transpose/transpose.cpp +++ b/cpp/benchmarks/transpose/transpose.cpp @@ -20,17 +20,19 @@ #include #include #include +#include #include #include static void BM_transpose(benchmark::State& state) { - auto count = state.range(0); + auto count = state.range(0); + constexpr auto column_type_id = cudf::type_id::INT32; auto int_column_generator = thrust::make_transform_iterator(thrust::counting_iterator(0), [count](int i) { return cudf::make_numeric_column( - cudf::data_type{cudf::type_id::INT32}, count, cudf::mask_state::ALL_VALID); + cudf::data_type{column_type_id}, count, cudf::mask_state::ALL_VALID); }); auto input_table = cudf::table(std::vector(int_column_generator, int_column_generator + count)); @@ -40,6 +42,17 @@ static void BM_transpose(benchmark::State& state) cuda_event_timer raii(state, true); auto output = cudf::transpose(input); } + + // Collect memory statistics. + auto const bytes_read = static_cast(input.num_columns()) * input.num_rows() * + sizeof(cudf::id_to_type); + auto const bytes_written = bytes_read; + // Account for nullability in input and output. + auto const null_bytes = 2 * static_cast(input.num_columns()) * + cudf::bitmask_allocation_size_bytes(input.num_rows()); + + state.SetBytesProcessed(static_cast(state.iterations()) * + (bytes_read + bytes_written + null_bytes)); } class Transpose : public cudf::benchmark {}; From 0ed7725416879a824ee5b96292eda2d1048a9ada Mon Sep 17 00:00:00 2001 From: Martin Marenz Date: Wed, 11 Oct 2023 00:06:23 +0200 Subject: [PATCH 140/150] Add `bytes_per_second` to shift benchmark (#13950) Adds `bytes_per_second` to `SHIFT_BENCH`. This patch relates to #13735. Authors: - Martin Marenz (https://github.com/Blonck) Approvers: - Karthikeyan (https://github.com/karthikeyann) - Nghia Truong (https://github.com/ttnghia) - Mark Harris (https://github.com/harrism) URL: https://github.com/rapidsai/cudf/pull/13950 --- cpp/benchmarks/copying/shift.cu | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/cpp/benchmarks/copying/shift.cu b/cpp/benchmarks/copying/shift.cu index 460100a8fe9..e1169e3bcd6 100644 --- a/cpp/benchmarks/copying/shift.cu +++ b/cpp/benchmarks/copying/shift.cu @@ -56,18 +56,32 @@ static void BM_shift(benchmark::State& state) cudf::size_type size = state.range(0); cudf::size_type offset = size * (static_cast(shift_factor) / 100.0); - auto const input_table = - create_sequence_table({cudf::type_to_id()}, - row_count{size}, - use_validity ? std::optional{1.0} : std::nullopt); + auto constexpr column_type_id = cudf::type_id::INT32; + using column_type = cudf::id_to_type; + + auto const input_table = create_sequence_table( + {column_type_id}, row_count{size}, use_validity ? std::optional{1.0} : std::nullopt); cudf::column_view input{input_table->get_column(0)}; - auto fill = use_validity ? make_scalar() : make_scalar(777); + auto fill = use_validity ? make_scalar() : make_scalar(777); for (auto _ : state) { cuda_event_timer raii(state, true); auto output = cudf::shift(input, offset, *fill); } + + auto const elems_read = (size - offset); + auto const bytes_read = elems_read * sizeof(column_type); + + // If 'use_validity' is false, the fill value is a number, and the entire column + // (excluding the null bitmask) needs to be written. On the other hand, if 'use_validity' + // is true, only the elements that can be shifted are written, along with the full null bitmask. + auto const elems_written = use_validity ? (size - offset) : size; + auto const bytes_written = elems_written * sizeof(column_type); + auto const null_bytes = use_validity ? 2 * cudf::bitmask_allocation_size_bytes(size) : 0; + + state.SetBytesProcessed(static_cast(state.iterations()) * + (bytes_written + bytes_read + null_bytes)); } class Shift : public cudf::benchmark {}; From aa8b0f8e4e71a8e2b076656e0a8bf00bfc15ecb8 Mon Sep 17 00:00:00 2001 From: Ed Seidl Date: Tue, 10 Oct 2023 16:14:51 -0700 Subject: [PATCH 141/150] Handle empty string correctly in Parquet statistics (#14257) An empty string should be a valid minimum value for a string column, but the current parquet writer considers an empty string to have no value when writing the column chunk statistics. This PR changes all fields in the Statistics struct to be `thrust::optional` to help distinguish between a valid empty string and no value. Authors: - Ed Seidl (https://github.com/etseidl) Approvers: - Vukasin Milovanovic (https://github.com/vuule) - Nghia Truong (https://github.com/ttnghia) URL: https://github.com/rapidsai/cudf/pull/14257 --- .../io/parquet/compact_protocol_reader.cpp | 15 ++-- .../io/parquet/compact_protocol_writer.cpp | 12 +-- cpp/src/io/parquet/parquet.hpp | 18 ++-- cpp/src/io/parquet/predicate_pushdown.cpp | 14 +-- cpp/tests/io/parquet_test.cpp | 85 +++++++++++++++---- 5 files changed, 104 insertions(+), 40 deletions(-) diff --git a/cpp/src/io/parquet/compact_protocol_reader.cpp b/cpp/src/io/parquet/compact_protocol_reader.cpp index 81d1be64a45..1a345ee0750 100644 --- a/cpp/src/io/parquet/compact_protocol_reader.cpp +++ b/cpp/src/io/parquet/compact_protocol_reader.cpp @@ -767,12 +767,15 @@ bool CompactProtocolReader::read(ColumnIndex* c) bool CompactProtocolReader::read(Statistics* s) { - auto op = std::make_tuple(parquet_field_binary(1, s->max), - parquet_field_binary(2, s->min), - parquet_field_int64(3, s->null_count), - parquet_field_int64(4, s->distinct_count), - parquet_field_binary(5, s->max_value), - parquet_field_binary(6, s->min_value)); + using optional_binary = parquet_field_optional, parquet_field_binary>; + using optional_int64 = parquet_field_optional; + + auto op = std::make_tuple(optional_binary(1, s->max), + optional_binary(2, s->min), + optional_int64(3, s->null_count), + optional_int64(4, s->distinct_count), + optional_binary(5, s->max_value), + optional_binary(6, s->min_value)); return function_builder(this, op); } diff --git a/cpp/src/io/parquet/compact_protocol_writer.cpp b/cpp/src/io/parquet/compact_protocol_writer.cpp index 9adc8767880..00810269d3c 100644 --- a/cpp/src/io/parquet/compact_protocol_writer.cpp +++ b/cpp/src/io/parquet/compact_protocol_writer.cpp @@ -195,12 +195,12 @@ size_t CompactProtocolWriter::write(ColumnChunkMetaData const& s) size_t CompactProtocolWriter::write(Statistics const& s) { CompactProtocolFieldWriter c(*this); - if (not s.max.empty()) { c.field_binary(1, s.max); } - if (not s.min.empty()) { c.field_binary(2, s.min); } - if (s.null_count != -1) { c.field_int(3, s.null_count); } - if (s.distinct_count != -1) { c.field_int(4, s.distinct_count); } - if (not s.max_value.empty()) { c.field_binary(5, s.max_value); } - if (not s.min_value.empty()) { c.field_binary(6, s.min_value); } + if (s.max.has_value()) { c.field_binary(1, s.max.value()); } + if (s.min.has_value()) { c.field_binary(2, s.min.value()); } + if (s.null_count.has_value()) { c.field_int(3, s.null_count.value()); } + if (s.distinct_count.has_value()) { c.field_int(4, s.distinct_count.value()); } + if (s.max_value.has_value()) { c.field_binary(5, s.max_value.value()); } + if (s.min_value.has_value()) { c.field_binary(6, s.min_value.value()); } return c.value(); } diff --git a/cpp/src/io/parquet/parquet.hpp b/cpp/src/io/parquet/parquet.hpp index dbec59670c7..1cd16ac6102 100644 --- a/cpp/src/io/parquet/parquet.hpp +++ b/cpp/src/io/parquet/parquet.hpp @@ -215,12 +215,18 @@ struct SchemaElement { * @brief Thrift-derived struct describing column chunk statistics */ struct Statistics { - std::vector max; // deprecated max value in signed comparison order - std::vector min; // deprecated min value in signed comparison order - int64_t null_count = -1; // count of null values in the column - int64_t distinct_count = -1; // count of distinct values occurring - std::vector max_value; // max value for column determined by ColumnOrder - std::vector min_value; // min value for column determined by ColumnOrder + // deprecated max value in signed comparison order + thrust::optional> max; + // deprecated min value in signed comparison order + thrust::optional> min; + // count of null values in the column + thrust::optional null_count; + // count of distinct values occurring + thrust::optional distinct_count; + // max value for column determined by ColumnOrder + thrust::optional> max_value; + // min value for column determined by ColumnOrder + thrust::optional> min_value; }; /** diff --git a/cpp/src/io/parquet/predicate_pushdown.cpp b/cpp/src/io/parquet/predicate_pushdown.cpp index 9083be1c2dd..a5851de3c20 100644 --- a/cpp/src/io/parquet/predicate_pushdown.cpp +++ b/cpp/src/io/parquet/predicate_pushdown.cpp @@ -150,12 +150,14 @@ struct stats_caster { { } - void set_index(size_type index, std::vector const& binary_value, Type const type) + void set_index(size_type index, + thrust::optional> const& binary_value, + Type const type) { - if (!binary_value.empty()) { - val[index] = convert(binary_value.data(), binary_value.size(), type); + if (binary_value.has_value()) { + val[index] = convert(binary_value.value().data(), binary_value.value().size(), type); } - if (binary_value.empty()) { + if (not binary_value.has_value()) { clear_bit_unsafe(null_mask.data(), index); null_count++; } @@ -210,10 +212,10 @@ struct stats_caster { auto const& row_group = per_file_metadata[src_idx].row_groups[rg_idx]; auto const& colchunk = row_group.columns[col_idx]; // To support deprecated min, max fields. - auto const& min_value = colchunk.meta_data.statistics.min_value.size() > 0 + auto const& min_value = colchunk.meta_data.statistics.min_value.has_value() ? colchunk.meta_data.statistics.min_value : colchunk.meta_data.statistics.min; - auto const& max_value = colchunk.meta_data.statistics.max_value.size() > 0 + auto const& max_value = colchunk.meta_data.statistics.max_value.has_value() ? colchunk.meta_data.statistics.max_value : colchunk.meta_data.statistics.max; // translate binary data to Type then to diff --git a/cpp/tests/io/parquet_test.cpp b/cpp/tests/io/parquet_test.cpp index 3e5d7033e60..fa85e3a4a1d 100644 --- a/cpp/tests/io/parquet_test.cpp +++ b/cpp/tests/io/parquet_test.cpp @@ -4161,8 +4161,10 @@ TEST_P(ParquetV2Test, LargeColumnIndex) // check trunc(page.min) <= stats.min && trun(page.max) >= stats.max auto const ptype = fmd.schema[c + 1].type; auto const ctype = fmd.schema[c + 1].converted_type; - EXPECT_TRUE(compare_binary(ci.min_values[0], stats.min_value, ptype, ctype) <= 0); - EXPECT_TRUE(compare_binary(ci.max_values[0], stats.max_value, ptype, ctype) >= 0); + ASSERT_TRUE(stats.min_value.has_value()); + ASSERT_TRUE(stats.max_value.has_value()); + EXPECT_TRUE(compare_binary(ci.min_values[0], stats.min_value.value(), ptype, ctype) <= 0); + EXPECT_TRUE(compare_binary(ci.max_values[0], stats.max_value.value(), ptype, ctype) >= 0); } } } @@ -4242,6 +4244,9 @@ TEST_P(ParquetV2Test, CheckColumnOffsetIndex) auto const ci = read_column_index(source, chunk); auto const stats = get_statistics(chunk); + ASSERT_TRUE(stats.min_value.has_value()); + ASSERT_TRUE(stats.max_value.has_value()); + // schema indexing starts at 1 auto const ptype = fmd.schema[c + 1].type; auto const ctype = fmd.schema[c + 1].converted_type; @@ -4250,10 +4255,10 @@ TEST_P(ParquetV2Test, CheckColumnOffsetIndex) EXPECT_FALSE(ci.null_pages[p]); // null_counts should always be 0 EXPECT_EQ(ci.null_counts[p], 0); - EXPECT_TRUE(compare_binary(stats.min_value, ci.min_values[p], ptype, ctype) <= 0); + EXPECT_TRUE(compare_binary(stats.min_value.value(), ci.min_values[p], ptype, ctype) <= 0); } for (size_t p = 0; p < ci.max_values.size(); p++) - EXPECT_TRUE(compare_binary(stats.max_value, ci.max_values[p], ptype, ctype) >= 0); + EXPECT_TRUE(compare_binary(stats.max_value.value(), ci.max_values[p], ptype, ctype) >= 0); } } } @@ -4344,7 +4349,10 @@ TEST_P(ParquetV2Test, CheckColumnOffsetIndexNulls) auto const stats = get_statistics(chunk); // should be half nulls, except no nulls in column 0 - EXPECT_EQ(stats.null_count, c == 0 ? 0 : num_rows / 2); + ASSERT_TRUE(stats.min_value.has_value()); + ASSERT_TRUE(stats.max_value.has_value()); + ASSERT_TRUE(stats.null_count.has_value()); + EXPECT_EQ(stats.null_count.value(), c == 0 ? 0 : num_rows / 2); // schema indexing starts at 1 auto const ptype = fmd.schema[c + 1].type; @@ -4356,10 +4364,10 @@ TEST_P(ParquetV2Test, CheckColumnOffsetIndexNulls) } else { EXPECT_EQ(ci.null_counts[p], 0); } - EXPECT_TRUE(compare_binary(stats.min_value, ci.min_values[p], ptype, ctype) <= 0); + EXPECT_TRUE(compare_binary(stats.min_value.value(), ci.min_values[p], ptype, ctype) <= 0); } for (size_t p = 0; p < ci.max_values.size(); p++) { - EXPECT_TRUE(compare_binary(stats.max_value, ci.max_values[p], ptype, ctype) >= 0); + EXPECT_TRUE(compare_binary(stats.max_value.value(), ci.max_values[p], ptype, ctype) >= 0); } } } @@ -4436,7 +4444,12 @@ TEST_P(ParquetV2Test, CheckColumnOffsetIndexNullColumn) auto const stats = get_statistics(chunk); // there should be no nulls except column 1 which is all nulls - EXPECT_EQ(stats.null_count, c == 1 ? num_rows : 0); + if (c != 1) { + ASSERT_TRUE(stats.min_value.has_value()); + ASSERT_TRUE(stats.max_value.has_value()); + } + ASSERT_TRUE(stats.null_count.has_value()); + EXPECT_EQ(stats.null_count.value(), c == 1 ? num_rows : 0); // schema indexing starts at 1 auto const ptype = fmd.schema[c + 1].type; @@ -4449,12 +4462,12 @@ TEST_P(ParquetV2Test, CheckColumnOffsetIndexNullColumn) } if (not ci.null_pages[p]) { EXPECT_EQ(ci.null_counts[p], 0); - EXPECT_TRUE(compare_binary(stats.min_value, ci.min_values[p], ptype, ctype) <= 0); + EXPECT_TRUE(compare_binary(stats.min_value.value(), ci.min_values[p], ptype, ctype) <= 0); } } for (size_t p = 0; p < ci.max_values.size(); p++) { if (not ci.null_pages[p]) { - EXPECT_TRUE(compare_binary(stats.max_value, ci.max_values[p], ptype, ctype) >= 0); + EXPECT_TRUE(compare_binary(stats.max_value.value(), ci.max_values[p], ptype, ctype) >= 0); } } } @@ -4533,13 +4546,16 @@ TEST_P(ParquetV2Test, CheckColumnOffsetIndexStruct) auto const ci = read_column_index(source, chunk); auto const stats = get_statistics(chunk); + ASSERT_TRUE(stats.min_value.has_value()); + ASSERT_TRUE(stats.max_value.has_value()); + auto const ptype = fmd.schema[colidx].type; auto const ctype = fmd.schema[colidx].converted_type; for (size_t p = 0; p < ci.min_values.size(); p++) { - EXPECT_TRUE(compare_binary(stats.min_value, ci.min_values[p], ptype, ctype) <= 0); + EXPECT_TRUE(compare_binary(stats.min_value.value(), ci.min_values[p], ptype, ctype) <= 0); } for (size_t p = 0; p < ci.max_values.size(); p++) { - EXPECT_TRUE(compare_binary(stats.max_value, ci.max_values[p], ptype, ctype) >= 0); + EXPECT_TRUE(compare_binary(stats.max_value.value(), ci.max_values[p], ptype, ctype) >= 0); } } } @@ -4829,11 +4845,14 @@ TEST_F(ParquetWriterTest, CheckColumnIndexTruncation) auto const ci = read_column_index(source, chunk); auto const stats = get_statistics(chunk); + ASSERT_TRUE(stats.min_value.has_value()); + ASSERT_TRUE(stats.max_value.has_value()); + // check trunc(page.min) <= stats.min && trun(page.max) >= stats.max auto const ptype = fmd.schema[c + 1].type; auto const ctype = fmd.schema[c + 1].converted_type; - EXPECT_TRUE(compare_binary(ci.min_values[0], stats.min_value, ptype, ctype) <= 0); - EXPECT_TRUE(compare_binary(ci.max_values[0], stats.max_value, ptype, ctype) >= 0); + EXPECT_TRUE(compare_binary(ci.min_values[0], stats.min_value.value(), ptype, ctype) <= 0); + EXPECT_TRUE(compare_binary(ci.max_values[0], stats.max_value.value(), ptype, ctype) >= 0); // check that truncated values == expected EXPECT_EQ(memcmp(ci.min_values[0].data(), truncated_min[c], ci.min_values[0].size()), 0); @@ -4890,8 +4909,10 @@ TEST_F(ParquetWriterTest, BinaryColumnIndexTruncation) // check trunc(page.min) <= stats.min && trun(page.max) >= stats.max auto const ptype = fmd.schema[c + 1].type; auto const ctype = fmd.schema[c + 1].converted_type; - EXPECT_TRUE(compare_binary(ci.min_values[0], stats.min_value, ptype, ctype) <= 0); - EXPECT_TRUE(compare_binary(ci.max_values[0], stats.max_value, ptype, ctype) >= 0); + ASSERT_TRUE(stats.min_value.has_value()); + ASSERT_TRUE(stats.max_value.has_value()); + EXPECT_TRUE(compare_binary(ci.min_values[0], stats.min_value.value(), ptype, ctype) <= 0); + EXPECT_TRUE(compare_binary(ci.max_values[0], stats.max_value.value(), ptype, ctype) >= 0); // check that truncated values == expected EXPECT_EQ(ci.min_values[0], truncated_min[c]); @@ -6737,6 +6758,38 @@ TEST_P(ParquetV2Test, CheckEncodings) } } +TEST_F(ParquetWriterTest, EmptyMinStringStatistics) +{ + char const* const min_val = ""; + char const* const max_val = "zzz"; + std::vector strings{min_val, max_val, "pining", "for", "the", "fjords"}; + + column_wrapper string_col{strings.begin(), strings.end()}; + auto const output = table_view{{string_col}}; + auto const filepath = temp_env->get_temp_filepath("EmptyMinStringStatistics.parquet"); + cudf::io::parquet_writer_options out_opts = + cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, output); + cudf::io::write_parquet(out_opts); + + auto const source = cudf::io::datasource::create(filepath); + cudf::io::parquet::detail::FileMetaData fmd; + read_footer(source, &fmd); + + ASSERT_TRUE(fmd.row_groups.size() > 0); + ASSERT_TRUE(fmd.row_groups[0].columns.size() > 0); + auto const& chunk = fmd.row_groups[0].columns[0]; + auto const stats = get_statistics(chunk); + + ASSERT_TRUE(stats.min_value.has_value()); + ASSERT_TRUE(stats.max_value.has_value()); + auto const min_value = std::string{reinterpret_cast(stats.min_value.value().data()), + stats.min_value.value().size()}; + auto const max_value = std::string{reinterpret_cast(stats.max_value.value().data()), + stats.max_value.value().size()}; + EXPECT_EQ(min_value, std::string(min_val)); + EXPECT_EQ(max_value, std::string(max_val)); +} + TEST_F(ParquetReaderTest, RepeatedNoAnnotations) { constexpr unsigned char repeated_bytes[] = { From b17904dbaa4de1a162fcb4a0f64862f9f83b976f Mon Sep 17 00:00:00 2001 From: "Robert (Bobby) Evans" Date: Tue, 10 Oct 2023 19:51:02 -0500 Subject: [PATCH 142/150] Add in java bindings for DataSource (#14254) This PR adds DataSource Java bindings. It also fixes a small bug in CUDF that made it so the bindings would not work for anything but CSV. Authors: - Robert (Bobby) Evans (https://github.com/revans2) Approvers: - Jason Lowe (https://github.com/jlowe) - Vukasin Milovanovic (https://github.com/vuule) - David Wendt (https://github.com/davidwendt) URL: https://github.com/rapidsai/cudf/pull/14254 --- cpp/src/io/utilities/datasource.cpp | 8 + java/src/main/java/ai/rapids/cudf/Cuda.java | 24 +- .../main/java/ai/rapids/cudf/DataSource.java | 189 ++++++++++++++ .../java/ai/rapids/cudf/DataSourceHelper.java | 44 ++++ .../ai/rapids/cudf/DeviceMemoryBuffer.java | 6 +- .../ai/rapids/cudf/MultiBufferDataSource.java | 230 +++++++++++++++++ .../ai/rapids/cudf/ParquetChunkedReader.java | 45 +++- java/src/main/java/ai/rapids/cudf/Table.java | 99 +++++++- java/src/main/native/CMakeLists.txt | 1 + java/src/main/native/src/ChunkedReaderJni.cpp | 36 ++- java/src/main/native/src/CudfJni.cpp | 8 + .../main/native/src/DataSourceHelperJni.cpp | 237 ++++++++++++++++++ java/src/main/native/src/TableJni.cpp | 212 +++++++++++++++- java/src/main/native/src/cudf_jni_apis.hpp | 8 + .../test/java/ai/rapids/cudf/TableTest.java | 225 +++++++++++++++++ 15 files changed, 1358 insertions(+), 14 deletions(-) create mode 100644 java/src/main/java/ai/rapids/cudf/DataSource.java create mode 100644 java/src/main/java/ai/rapids/cudf/DataSourceHelper.java create mode 100644 java/src/main/java/ai/rapids/cudf/MultiBufferDataSource.java create mode 100644 java/src/main/native/src/DataSourceHelperJni.cpp diff --git a/cpp/src/io/utilities/datasource.cpp b/cpp/src/io/utilities/datasource.cpp index 7a7121aa91d..5cdd92ce3b7 100644 --- a/cpp/src/io/utilities/datasource.cpp +++ b/cpp/src/io/utilities/datasource.cpp @@ -375,6 +375,14 @@ class user_datasource_wrapper : public datasource { return source->device_read(offset, size, stream); } + std::future device_read_async(size_t offset, + size_t size, + uint8_t* dst, + rmm::cuda_stream_view stream) override + { + return source->device_read_async(offset, size, dst, stream); + } + [[nodiscard]] size_t size() const override { return source->size(); } private: diff --git a/java/src/main/java/ai/rapids/cudf/Cuda.java b/java/src/main/java/ai/rapids/cudf/Cuda.java index e1298e29925..7cc3d30a9cf 100755 --- a/java/src/main/java/ai/rapids/cudf/Cuda.java +++ b/java/src/main/java/ai/rapids/cudf/Cuda.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,9 +15,6 @@ */ package ai.rapids.cudf; -import ai.rapids.cudf.NvtxColor; -import ai.rapids.cudf.NvtxRange; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -90,6 +87,21 @@ private Stream() { this.id = -1; } + private Stream(long id) { + this.cleaner = null; + this.id = id; + } + + /** + * Wrap a given stream ID to make it accessible. + */ + static Stream wrap(long id) { + if (id == -1) { + return DEFAULT_STREAM; + } + return new Stream(id); + } + /** * Have this stream not execute new work until the work recorded in event completes. * @param event the event to wait on. @@ -122,7 +134,9 @@ public synchronized void close() { cleaner.delRef(); } if (closed) { - cleaner.logRefCountDebug("double free " + this); + if (cleaner != null) { + cleaner.logRefCountDebug("double free " + this); + } throw new IllegalStateException("Close called too many times " + this); } if (cleaner != null) { diff --git a/java/src/main/java/ai/rapids/cudf/DataSource.java b/java/src/main/java/ai/rapids/cudf/DataSource.java new file mode 100644 index 00000000000..1e5893235df --- /dev/null +++ b/java/src/main/java/ai/rapids/cudf/DataSource.java @@ -0,0 +1,189 @@ +/* + * + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ai.rapids.cudf; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.HashMap; + +/** + * Base class that can be used to provide data dynamically to CUDF. This follows somewhat + * closely with cudf::io::datasource. There are a few main differences. + *
+ * First this does not expose async device reads. It will call the non-async device read API + * instead. This might be added in the future, but there was no direct use case for it in java + * right now to warrant the added complexity. + *
+ * Second there is no implementation of the device read API that returns a buffer instead of + * writing into one. This is not used by CUDF yet so testing an implementation that isn't used + * didn't feel ideal. If it is needed we will add one in the future. + */ +public abstract class DataSource implements AutoCloseable { + private static final Logger log = LoggerFactory.getLogger(DataSource.class); + + /** + * This is used to keep track of the HostMemoryBuffers in java land so the C++ layer + * does not have to do it. + */ + private final HashMap cachedBuffers = new HashMap<>(); + + @Override + public void close() { + if (!cachedBuffers.isEmpty()) { + throw new IllegalStateException("DataSource closed before all returned host buffers were closed"); + } + } + + /** + * Get the size of the source in bytes. + */ + public abstract long size(); + + /** + * Read data from the source at the given offset. Return a HostMemoryBuffer for the data + * that was read. + * @param offset where to start reading from. + * @param amount the maximum number of bytes to read. + * @return a buffer that points to the data. + * @throws IOException on any error. + */ + public abstract HostMemoryBuffer hostRead(long offset, long amount) throws IOException; + + + /** + * Called when the buffer returned from hostRead is done. The default is to close the buffer. + */ + protected void onHostBufferDone(HostMemoryBuffer buffer) { + if (buffer != null) { + buffer.close(); + } + } + + /** + * Read data from the source at the given offset into dest. Note that dest should not be closed, + * and no reference to it can outlive the call to hostRead. The target amount to read is + * dest's length. + * @param offset the offset to start reading from in the source. + * @param dest where to write the data. + * @return the actual number of bytes written to dest. + */ + public abstract long hostRead(long offset, HostMemoryBuffer dest) throws IOException; + + /** + * Return true if this supports reading directly to the device else false. The default is + * no device support. This cannot change dynamically. It is typically read just once. + */ + public boolean supportsDeviceRead() { + return false; + } + + /** + * Get the size cutoff between device reads and host reads when device reads are supported. + * Anything larger than the cutoff will be a device read and anything smaller will be a + * host read. By default, the cutoff is 0 so all reads will be device reads if device reads + * are supported. + */ + public long getDeviceReadCutoff() { + return 0; + } + + /** + * Read data from the source at the given offset into dest. Note that dest should not be closed, + * and no reference to it can outlive the call to hostRead. The target amount to read is + * dest's length. + * @param offset the offset to start reading from + * @param dest where to write the data. + * @param stream the stream to do the copy on. + * @return the actual number of bytes written to dest. + */ + public long deviceRead(long offset, DeviceMemoryBuffer dest, + Cuda.Stream stream) throws IOException { + throw new IllegalStateException("Device read is not implemented"); + } + + ///////////////////////////////////////////////// + // Internal methods called from JNI + ///////////////////////////////////////////////// + + private static class NoopCleaner extends MemoryBuffer.MemoryBufferCleaner { + @Override + protected boolean cleanImpl(boolean logErrorIfNotClean) { + return true; + } + + @Override + public boolean isClean() { + return true; + } + } + private static final NoopCleaner cleaner = new NoopCleaner(); + + // Called from JNI + private void onHostBufferDone(long bufferId) { + HostMemoryBuffer hmb = cachedBuffers.remove(bufferId); + if (hmb != null) { + onHostBufferDone(hmb); + } else { + // Called from C++ destructor so avoid throwing... + log.warn("Got a close callback for a buffer we could not find " + bufferId); + } + } + + // Called from JNI + private long hostRead(long offset, long amount, long dst) throws IOException { + if (amount < 0) { + throw new IllegalArgumentException("Cannot allocate more than " + Long.MAX_VALUE + " bytes"); + } + try (HostMemoryBuffer dstBuffer = new HostMemoryBuffer(dst, amount, cleaner)) { + return hostRead(offset, dstBuffer); + } + } + + // Called from JNI + private long[] hostReadBuff(long offset, long amount) throws IOException { + if (amount < 0) { + throw new IllegalArgumentException("Cannot read more than " + Long.MAX_VALUE + " bytes"); + } + HostMemoryBuffer buff = hostRead(offset, amount); + long[] ret = new long[3]; + if (buff != null) { + long id = buff.id; + if (cachedBuffers.put(id, buff) != null) { + throw new IllegalStateException("Already had a buffer cached for " + buff); + } + ret[0] = buff.address; + ret[1] = buff.length; + ret[2] = id; + } // else they are all 0 because java does that already + return ret; + } + + // Called from JNI + private long deviceRead(long offset, long amount, long dst, long stream) throws IOException { + if (amount < 0) { + throw new IllegalArgumentException("Cannot read more than " + Long.MAX_VALUE + " bytes"); + } + Cuda.Stream strm = Cuda.Stream.wrap(stream); + try (DeviceMemoryBuffer dstBuffer = new DeviceMemoryBuffer(dst, amount, cleaner)) { + return deviceRead(offset, dstBuffer, strm); + } + } +} diff --git a/java/src/main/java/ai/rapids/cudf/DataSourceHelper.java b/java/src/main/java/ai/rapids/cudf/DataSourceHelper.java new file mode 100644 index 00000000000..5d4dcb8e4e7 --- /dev/null +++ b/java/src/main/java/ai/rapids/cudf/DataSourceHelper.java @@ -0,0 +1,44 @@ +/* + * + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ai.rapids.cudf; + +/** + * This is here because we need some JNI methods to work with a DataSource, but + * we also want to cache callback methods at startup for performance reasons. If + * we put both in the same class we will get a deadlock because of how we load + * the JNI. We have a static block that blocks loading the class until the JNI + * library is loaded and the JNI library cannot load until the class is loaded + * and cached. This breaks the loop. + */ +class DataSourceHelper { + static { + NativeDepsLoader.loadNativeDeps(); + } + + static long createWrapperDataSource(DataSource ds) { + return createWrapperDataSource(ds, ds.size(), ds.supportsDeviceRead(), + ds.getDeviceReadCutoff()); + } + + private static native long createWrapperDataSource(DataSource ds, long size, + boolean deviceReadSupport, + long deviceReadCutoff); + + static native void destroyWrapperDataSource(long handle); +} diff --git a/java/src/main/java/ai/rapids/cudf/DeviceMemoryBuffer.java b/java/src/main/java/ai/rapids/cudf/DeviceMemoryBuffer.java index c4d9bdb8f91..9eab607ed0b 100644 --- a/java/src/main/java/ai/rapids/cudf/DeviceMemoryBuffer.java +++ b/java/src/main/java/ai/rapids/cudf/DeviceMemoryBuffer.java @@ -1,6 +1,6 @@ /* * - * Copyright (c) 2019-2021, NVIDIA CORPORATION. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -112,6 +112,10 @@ public static DeviceMemoryBuffer fromRmm(long address, long lengthInBytes, long return new DeviceMemoryBuffer(address, lengthInBytes, rmmBufferAddress); } + DeviceMemoryBuffer(long address, long lengthInBytes, MemoryBufferCleaner cleaner) { + super(address, lengthInBytes, cleaner); + } + DeviceMemoryBuffer(long address, long lengthInBytes, long rmmBufferAddress) { super(address, lengthInBytes, new RmmDeviceBufferCleaner(rmmBufferAddress)); } diff --git a/java/src/main/java/ai/rapids/cudf/MultiBufferDataSource.java b/java/src/main/java/ai/rapids/cudf/MultiBufferDataSource.java new file mode 100644 index 00000000000..6986b6a7fec --- /dev/null +++ b/java/src/main/java/ai/rapids/cudf/MultiBufferDataSource.java @@ -0,0 +1,230 @@ +/* + * + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ai.rapids.cudf; + +/** + * This is a DataSource that can take multiple HostMemoryBuffers. They + * are treated as if they are all part of a single file connected end to end. + */ +public class MultiBufferDataSource extends DataSource { + private final long sizeInBytes; + private final HostMemoryBuffer[] hostBuffers; + private final long[] startOffsets; + private final HostMemoryAllocator allocator; + + // Metrics + private long hostReads = 0; + private long hostReadBytes = 0; + private long devReads = 0; + private long devReadBytes = 0; + + /** + * Create a new data source backed by multiple buffers. + * @param buffers the buffers that will back the data source. + */ + public MultiBufferDataSource(HostMemoryBuffer ... buffers) { + this(DefaultHostMemoryAllocator.get(), buffers); + } + + /** + * Create a new data source backed by multiple buffers. + * @param allocator the allocator to use for host buffers, if needed. + * @param buffers the buffers that will back the data source. + */ + public MultiBufferDataSource(HostMemoryAllocator allocator, HostMemoryBuffer ... buffers) { + int numBuffers = buffers.length; + hostBuffers = new HostMemoryBuffer[numBuffers]; + startOffsets = new long[numBuffers]; + + long currentOffset = 0; + for (int i = 0; i < numBuffers; i++) { + HostMemoryBuffer hmb = buffers[i]; + hmb.incRefCount(); + hostBuffers[i] = hmb; + startOffsets[i] = currentOffset; + currentOffset += hmb.getLength(); + } + sizeInBytes = currentOffset; + this.allocator = allocator; + } + + @Override + public long size() { + return sizeInBytes; + } + + private int getStartBufferIndexForOffset(long offset) { + assert (offset >= 0); + + // It is super common to read from the start or end of a file (the header or footer) + // so special case them + if (offset == 0) { + return 0; + } + int startIndex = 0; + int endIndex = startOffsets.length - 1; + if (offset >= startOffsets[endIndex]) { + return endIndex; + } + while (startIndex != endIndex) { + int midIndex = (int)(((long)startIndex + endIndex) / 2); + long midStartOffset = startOffsets[midIndex]; + if (offset >= midStartOffset) { + // It is either in mid or after mid. + if (midIndex == endIndex || offset <= startOffsets[midIndex + 1]) { + // We found it in mid + return midIndex; + } else { + // It is after mid + startIndex = midIndex + 1; + } + } else { + // It is before mid + endIndex = midIndex - 1; + } + } + return startIndex; + } + + + interface DoCopy { + void copyFromHostBuffer(T dest, long destOffset, HostMemoryBuffer src, + long srcOffset, long srcAmount); + } + + private long read(long offset, T dest, DoCopy doCopy) { + assert (offset >= 0); + long realOffset = Math.min(offset, sizeInBytes); + long realAmount = Math.min(sizeInBytes - realOffset, dest.getLength()); + + int index = getStartBufferIndexForOffset(realOffset); + + HostMemoryBuffer buffer = hostBuffers[index]; + long bufferOffset = realOffset - startOffsets[index]; + long bufferAmount = Math.min(buffer.length - bufferOffset, realAmount); + long remainingAmount = realAmount; + long currentOffset = realOffset; + long outputOffset = 0; + + while (remainingAmount > 0) { + doCopy.copyFromHostBuffer(dest, outputOffset, buffer, + bufferOffset, bufferAmount); + remainingAmount -= bufferAmount; + outputOffset += bufferAmount; + currentOffset += bufferAmount; + index++; + if (index < hostBuffers.length) { + buffer = hostBuffers[index]; + bufferOffset = currentOffset - startOffsets[index]; + bufferAmount = Math.min(buffer.length - bufferOffset, remainingAmount); + } + } + + return realAmount; + } + + @Override + public HostMemoryBuffer hostRead(long offset, long amount) { + assert (offset >= 0); + assert (amount >= 0); + long realOffset = Math.min(offset, sizeInBytes); + long realAmount = Math.min(sizeInBytes - realOffset, amount); + + int index = getStartBufferIndexForOffset(realOffset); + + HostMemoryBuffer buffer = hostBuffers[index]; + long bufferOffset = realOffset - startOffsets[index]; + long bufferAmount = Math.min(buffer.length - bufferOffset, realAmount); + if (bufferAmount == realAmount) { + hostReads += 1; + hostReadBytes += realAmount; + // It all fits in a single buffer, so do a zero copy operation + return buffer.slice(bufferOffset, bufferAmount); + } else { + // We will have to allocate a new buffer and copy data into it. + boolean success = false; + HostMemoryBuffer ret = allocator.allocate(realAmount, true); + try { + long amountRead = read(offset, ret, HostMemoryBuffer::copyFromHostBuffer); + assert(amountRead == realAmount); + hostReads += 1; + hostReadBytes += amountRead; + success = true; + return ret; + } finally { + if (!success) { + ret.close(); + } + } + } + } + + @Override + public long hostRead(long offset, HostMemoryBuffer dest) { + long ret = read(offset, dest, HostMemoryBuffer::copyFromHostBuffer); + hostReads += 1; + hostReadBytes += ret; + return ret; + } + + @Override + public boolean supportsDeviceRead() { + return true; + } + + @Override + public long deviceRead(long offset, DeviceMemoryBuffer dest, + Cuda.Stream stream) { + long ret = read(offset, dest, (destParam, destOffset, src, srcOffset, srcAmount) -> + destParam.copyFromHostBufferAsync(destOffset, src, srcOffset, srcAmount, stream)); + devReads += 1; + devReadBytes += ret; + return ret; + } + + + @Override + public void close() { + try { + super.close(); + } finally { + for (HostMemoryBuffer hmb: hostBuffers) { + if (hmb != null) { + hmb.close(); + } + } + } + } + + public long getHostReads() { + return hostReads; + } + + public long getHostReadBytes() { + return hostReadBytes; + } + + public long getDevReads() { + return devReads; + } + + public long getDevReadBytes() { + return devReadBytes; + } +} diff --git a/java/src/main/java/ai/rapids/cudf/ParquetChunkedReader.java b/java/src/main/java/ai/rapids/cudf/ParquetChunkedReader.java index c34336ac73f..17d59b757c3 100644 --- a/java/src/main/java/ai/rapids/cudf/ParquetChunkedReader.java +++ b/java/src/main/java/ai/rapids/cudf/ParquetChunkedReader.java @@ -1,6 +1,6 @@ /* * - * Copyright (c) 2022, NVIDIA CORPORATION. + * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -51,7 +51,7 @@ public ParquetChunkedReader(long chunkSizeByteLimit, ParquetOptions opts, File f handle = create(chunkSizeByteLimit, opts.getIncludeColumnNames(), opts.getReadBinaryAsString(), filePath.getAbsolutePath(), 0, 0, opts.timeUnit().typeId.getNativeId()); - if(handle == 0) { + if (handle == 0) { throw new IllegalStateException("Cannot create native chunked Parquet reader object."); } } @@ -71,18 +71,45 @@ public ParquetChunkedReader(long chunkSizeByteLimit, ParquetOptions opts, HostMe handle = create(chunkSizeByteLimit, opts.getIncludeColumnNames(), opts.getReadBinaryAsString(), null, buffer.getAddress() + offset, len, opts.timeUnit().typeId.getNativeId()); - if(handle == 0) { + if (handle == 0) { throw new IllegalStateException("Cannot create native chunked Parquet reader object."); } } + /** + * Construct a reader instance from a DataSource + * @param chunkSizeByteLimit Limit on total number of bytes to be returned per read, + * or 0 if there is no limit. + * @param opts The options for Parquet reading. + * @param ds the data source to read from + */ + public ParquetChunkedReader(long chunkSizeByteLimit, ParquetOptions opts, DataSource ds) { + dataSourceHandle = DataSourceHelper.createWrapperDataSource(ds); + if (dataSourceHandle == 0) { + throw new IllegalStateException("Cannot create native datasource object"); + } + + boolean passed = false; + try { + handle = createWithDataSource(chunkSizeByteLimit, opts.getIncludeColumnNames(), + opts.getReadBinaryAsString(), opts.timeUnit().typeId.getNativeId(), + dataSourceHandle); + passed = true; + } finally { + if (!passed) { + DataSourceHelper.destroyWrapperDataSource(dataSourceHandle); + dataSourceHandle = 0; + } + } + } + /** * Check if the given file has anything left to read. * * @return A boolean value indicating if there is more data to read from file. */ public boolean hasNext() { - if(handle == 0) { + if (handle == 0) { throw new IllegalStateException("Native chunked Parquet reader object may have been closed."); } @@ -104,7 +131,7 @@ public boolean hasNext() { * @return A table of new rows reading from the given file. */ public Table readChunk() { - if(handle == 0) { + if (handle == 0) { throw new IllegalStateException("Native chunked Parquet reader object may have been closed."); } @@ -118,6 +145,10 @@ public void close() { close(handle); handle = 0; } + if (dataSourceHandle != 0) { + DataSourceHelper.destroyWrapperDataSource(dataSourceHandle); + dataSourceHandle = 0; + } } @@ -131,6 +162,7 @@ public void close() { */ private long handle; + private long dataSourceHandle = 0; /** * Create a native chunked Parquet reader object on heap and return its memory address. @@ -147,6 +179,9 @@ public void close() { private static native long create(long chunkSizeByteLimit, String[] filterColumnNames, boolean[] binaryToString, String filePath, long bufferAddrs, long length, int timeUnit); + private static native long createWithDataSource(long chunkedSizeByteLimit, + String[] filterColumnNames, boolean[] binaryToString, int timeUnit, long dataSourceHandle); + private static native boolean hasNext(long handle); private static native long[] readChunk(long handle); diff --git a/java/src/main/java/ai/rapids/cudf/Table.java b/java/src/main/java/ai/rapids/cudf/Table.java index 51a33ebb72f..3bd1e3f25a7 100644 --- a/java/src/main/java/ai/rapids/cudf/Table.java +++ b/java/src/main/java/ai/rapids/cudf/Table.java @@ -235,6 +235,14 @@ private static native long[] readCSV(String[] columnNames, byte comment, String[] nullValues, String[] trueValues, String[] falseValues) throws CudfException; + private static native long[] readCSVFromDataSource(String[] columnNames, + int[] dTypeIds, int[] dTypeScales, + String[] filterColumnNames, + int headerRow, byte delim, int quoteStyle, byte quote, + byte comment, String[] nullValues, + String[] trueValues, String[] falseValues, + long dataSourceHandle) throws CudfException; + /** * read JSON data and return a pointer to a TableWithMeta object. */ @@ -244,6 +252,12 @@ private static native long readJSON(String[] columnNames, boolean dayFirst, boolean lines, boolean recoverWithNulls) throws CudfException; + private static native long readJSONFromDataSource(String[] columnNames, + int[] dTypeIds, int[] dTypeScales, + boolean dayFirst, boolean lines, + boolean recoverWithNulls, + long dsHandle) throws CudfException; + private static native long readAndInferJSON(long address, long length, boolean dayFirst, boolean lines, boolean recoverWithNulls) throws CudfException; @@ -260,6 +274,10 @@ private static native long readAndInferJSON(long address, long length, private static native long[] readParquet(String[] filterColumnNames, boolean[] binaryToString, String filePath, long address, long length, int timeUnit) throws CudfException; + private static native long[] readParquetFromDataSource(String[] filterColumnNames, + boolean[] binaryToString, int timeUnit, + long dataSourceHandle) throws CudfException; + /** * Read in Avro formatted data. * @param filterColumnNames name of the columns to read, or an empty array if we want to read @@ -271,6 +289,9 @@ private static native long[] readParquet(String[] filterColumnNames, boolean[] b private static native long[] readAvro(String[] filterColumnNames, String filePath, long address, long length) throws CudfException; + private static native long[] readAvroFromDataSource(String[] filterColumnNames, + long dataSourceHandle) throws CudfException; + /** * Setup everything to write parquet formatted data to a file. * @param columnNames names that correspond to the table columns @@ -372,6 +393,11 @@ private static native long[] readORC(String[] filterColumnNames, boolean usingNumPyTypes, int timeUnit, String[] decimal128Columns) throws CudfException; + private static native long[] readORCFromDataSource(String[] filterColumnNames, + boolean usingNumPyTypes, int timeUnit, + String[] decimal128Columns, + long dataSourceHandle) throws CudfException; + /** * Setup everything to write ORC formatted data to a file. * @param columnNames names that correspond to the table columns @@ -881,6 +907,27 @@ public static Table readCSV(Schema schema, CSVOptions opts, HostMemoryBuffer buf opts.getFalseValues())); } + public static Table readCSV(Schema schema, CSVOptions opts, DataSource ds) { + long dsHandle = DataSourceHelper.createWrapperDataSource(ds); + try { + return new Table(readCSVFromDataSource(schema.getColumnNames(), + schema.getTypeIds(), + schema.getTypeScales(), + opts.getIncludeColumnNames(), + opts.getHeaderRow(), + opts.getDelim(), + opts.getQuoteStyle().nativeId, + opts.getQuote(), + opts.getComment(), + opts.getNullValues(), + opts.getTrueValues(), + opts.getFalseValues(), + dsHandle)); + } finally { + DataSourceHelper.destroyWrapperDataSource(dsHandle); + } + } + private static native void writeCSVToFile(long table, String[] columnNames, boolean includeHeader, @@ -1128,6 +1175,24 @@ public static Table readJSON(Schema schema, JSONOptions opts, HostMemoryBuffer b } } + /** + * Read JSON formatted data. + * @param schema the schema of the data. You may use Schema.INFERRED to infer the schema. + * @param opts various JSON parsing options. + * @param ds the DataSource to read from. + * @return the data parsed as a table on the GPU. + */ + public static Table readJSON(Schema schema, JSONOptions opts, DataSource ds) { + long dsHandle = DataSourceHelper.createWrapperDataSource(ds); + try (TableWithMeta twm = new TableWithMeta(readJSONFromDataSource(schema.getColumnNames(), + schema.getTypeIds(), schema.getTypeScales(), opts.isDayFirst(), opts.isLines(), + opts.isRecoverWithNull(), dsHandle))) { + return gatherJSONColumns(schema, twm); + } finally { + DataSourceHelper.destroyWrapperDataSource(dsHandle); + } + } + /** * Read a Parquet file using the default ParquetOptions. * @param path the local file to read. @@ -1214,6 +1279,17 @@ public static Table readParquet(ParquetOptions opts, HostMemoryBuffer buffer, null, buffer.getAddress() + offset, len, opts.timeUnit().typeId.getNativeId())); } + public static Table readParquet(ParquetOptions opts, DataSource ds) { + long dataSourceHandle = DataSourceHelper.createWrapperDataSource(ds); + try { + return new Table(readParquetFromDataSource(opts.getIncludeColumnNames(), + opts.getReadBinaryAsString(), opts.timeUnit().typeId.getNativeId(), + dataSourceHandle)); + } finally { + DataSourceHelper.destroyWrapperDataSource(dataSourceHandle); + } + } + /** * Read an Avro file using the default AvroOptions. * @param path the local file to read. @@ -1297,6 +1373,16 @@ public static Table readAvro(AvroOptions opts, HostMemoryBuffer buffer, null, buffer.getAddress() + offset, len)); } + public static Table readAvro(AvroOptions opts, DataSource ds) { + long dataSourceHandle = DataSourceHelper.createWrapperDataSource(ds); + try { + return new Table(readAvroFromDataSource(opts.getIncludeColumnNames(), + dataSourceHandle)); + } finally { + DataSourceHelper.destroyWrapperDataSource(dataSourceHandle); + } + } + /** * Read a ORC file using the default ORCOptions. * @param path the local file to read. @@ -1388,6 +1474,17 @@ public static Table readORC(ORCOptions opts, HostMemoryBuffer buffer, opts.getDecimal128Columns())); } + public static Table readORC(ORCOptions opts, DataSource ds) { + long dataSourceHandle = DataSourceHelper.createWrapperDataSource(ds); + try { + return new Table(readORCFromDataSource(opts.getIncludeColumnNames(), + opts.usingNumPyTypes(), opts.timeUnit().typeId.getNativeId(), + opts.getDecimal128Columns(), dataSourceHandle)); + } finally { + DataSourceHelper.destroyWrapperDataSource(dataSourceHandle); + } + } + private static class ParquetTableWriter extends TableWriter { HostBufferConsumer consumer; @@ -2262,7 +2359,7 @@ public Table dropDuplicates(int[] keyColumns, DuplicateKeepOption keep, boolean /** * Count how many rows in the table are distinct from one another. - * @param nullEqual if nulls should be considered equal to each other or not. + * @param nullsEqual if nulls should be considered equal to each other or not. */ public int distinctCount(NullEquality nullsEqual) { return distinctCount(nativeHandle, nullsEqual.nullsEqual); diff --git a/java/src/main/native/CMakeLists.txt b/java/src/main/native/CMakeLists.txt index 0dcfee2cffe..01161a03dd4 100644 --- a/java/src/main/native/CMakeLists.txt +++ b/java/src/main/native/CMakeLists.txt @@ -135,6 +135,7 @@ add_library( src/ColumnViewJni.cu src/CompiledExpression.cpp src/ContiguousTableJni.cpp + src/DataSourceHelperJni.cpp src/HashJoinJni.cpp src/HostMemoryBufferNativeUtilsJni.cpp src/NvcompJni.cpp diff --git a/java/src/main/native/src/ChunkedReaderJni.cpp b/java/src/main/native/src/ChunkedReaderJni.cpp index 8d0a8bdbfe7..0044385f267 100644 --- a/java/src/main/native/src/ChunkedReaderJni.cpp +++ b/java/src/main/native/src/ChunkedReaderJni.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, NVIDIA CORPORATION. + * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -85,6 +85,40 @@ JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ParquetChunkedReader_create( CATCH_STD(env, 0); } +JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ParquetChunkedReader_createWithDataSource( + JNIEnv *env, jclass, jlong chunk_read_limit, jobjectArray filter_col_names, + jbooleanArray j_col_binary_read, jint unit, jlong ds_handle) { + JNI_NULL_CHECK(env, j_col_binary_read, "Null col_binary_read", 0); + JNI_NULL_CHECK(env, ds_handle, "Null DataSouurce", 0); + + try { + cudf::jni::auto_set_device(env); + + cudf::jni::native_jstringArray n_filter_col_names(env, filter_col_names); + + // TODO: This variable is unused now, but we still don't know what to do with it yet. + // As such, it needs to stay here for a little more time before we decide to use it again, + // or remove it completely. + cudf::jni::native_jbooleanArray n_col_binary_read(env, j_col_binary_read); + (void)n_col_binary_read; + + auto ds = reinterpret_cast(ds_handle); + cudf::io::source_info source{ds}; + + auto opts_builder = cudf::io::parquet_reader_options::builder(source); + if (n_filter_col_names.size() > 0) { + opts_builder = opts_builder.columns(n_filter_col_names.as_cpp_vector()); + } + auto const read_opts = opts_builder.convert_strings_to_categories(false) + .timestamp_type(cudf::data_type(static_cast(unit))) + .build(); + + return reinterpret_cast(new cudf::io::chunked_parquet_reader( + static_cast(chunk_read_limit), read_opts)); + } + CATCH_STD(env, 0); +} + JNIEXPORT jboolean JNICALL Java_ai_rapids_cudf_ParquetChunkedReader_hasNext(JNIEnv *env, jclass, jlong handle) { JNI_NULL_CHECK(env, handle, "handle is null", false); diff --git a/java/src/main/native/src/CudfJni.cpp b/java/src/main/native/src/CudfJni.cpp index 0f143086451..d0a25d449a6 100644 --- a/java/src/main/native/src/CudfJni.cpp +++ b/java/src/main/native/src/CudfJni.cpp @@ -175,6 +175,14 @@ JNIEXPORT jint JNI_OnLoad(JavaVM *vm, void *) { return JNI_ERR; } + if (!cudf::jni::cache_data_source_jni(env)) { + if (!env->ExceptionCheck()) { + env->ThrowNew(env->FindClass("java/lang/RuntimeException"), + "Unable to locate data source helper methods needed by JNI"); + } + return JNI_ERR; + } + return cudf::jni::MINIMUM_JNI_VERSION; } diff --git a/java/src/main/native/src/DataSourceHelperJni.cpp b/java/src/main/native/src/DataSourceHelperJni.cpp new file mode 100644 index 00000000000..8d0e4d36413 --- /dev/null +++ b/java/src/main/native/src/DataSourceHelperJni.cpp @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "cudf_jni_apis.hpp" +#include "jni_utils.hpp" + +namespace { + +#define DATA_SOURCE_CLASS "ai/rapids/cudf/DataSource" + +jclass DataSource_jclass; +jmethodID hostRead_method; +jmethodID hostReadBuff_method; +jmethodID onHostBufferDone_method; +jmethodID deviceRead_method; + +} // anonymous namespace + +namespace cudf { +namespace jni { +bool cache_data_source_jni(JNIEnv *env) { + jclass cls = env->FindClass(DATA_SOURCE_CLASS); + if (cls == nullptr) { + return false; + } + + hostRead_method = env->GetMethodID(cls, "hostRead", "(JJJ)J"); + if (hostRead_method == nullptr) { + return false; + } + + hostReadBuff_method = env->GetMethodID(cls, "hostReadBuff", "(JJ)[J"); + if (hostReadBuff_method == nullptr) { + return false; + } + + onHostBufferDone_method = env->GetMethodID(cls, "onHostBufferDone", "(J)V"); + if (onHostBufferDone_method == nullptr) { + return false; + } + + deviceRead_method = env->GetMethodID(cls, "deviceRead", "(JJJJ)J"); + if (deviceRead_method == nullptr) { + return false; + } + + // Convert local reference to global so it cannot be garbage collected. + DataSource_jclass = static_cast(env->NewGlobalRef(cls)); + if (DataSource_jclass == nullptr) { + return false; + } + return true; +} + +void release_data_source_jni(JNIEnv *env) { + DataSource_jclass = cudf::jni::del_global_ref(env, DataSource_jclass); +} + +class host_buffer_done_callback { +public: + explicit host_buffer_done_callback(JavaVM *jvm, jobject ds, long id) : jvm(jvm), ds(ds), id(id) {} + + host_buffer_done_callback(host_buffer_done_callback const &other) = delete; + host_buffer_done_callback(host_buffer_done_callback &&other) + : jvm(other.jvm), ds(other.ds), id(other.id) { + other.jvm = nullptr; + other.ds = nullptr; + other.id = -1; + } + + host_buffer_done_callback &operator=(host_buffer_done_callback &&other) = delete; + host_buffer_done_callback &operator=(host_buffer_done_callback const &other) = delete; + + ~host_buffer_done_callback() { + // because we are in a destructor we cannot throw an exception, so for now we are + // just going to keep the java exceptions around and have them be thrown when this + // thread returns to the JVM. It might be kind of confusing, but we will not lose + // them. + if (jvm != nullptr) { + // We cannot throw an exception in the destructor, so this is really best effort + JNIEnv *env = nullptr; + if (jvm->GetEnv(reinterpret_cast(&env), cudf::jni::MINIMUM_JNI_VERSION) == JNI_OK) { + env->CallVoidMethod(this->ds, onHostBufferDone_method, id); + } + } + } + +private: + JavaVM *jvm; + jobject ds; + long id; +}; + +class jni_datasource : public cudf::io::datasource { +public: + explicit jni_datasource(JNIEnv *env, jobject ds, size_t ds_size, bool device_read_supported, + size_t device_read_cutoff) + : ds_size(ds_size), device_read_supported(device_read_supported), + device_read_cutoff(device_read_cutoff) { + if (env->GetJavaVM(&jvm) < 0) { + throw std::runtime_error("GetJavaVM failed"); + } + this->ds = add_global_ref(env, ds); + } + + virtual ~jni_datasource() { + JNIEnv *env = nullptr; + if (jvm->GetEnv(reinterpret_cast(&env), cudf::jni::MINIMUM_JNI_VERSION) == JNI_OK) { + ds = del_global_ref(env, ds); + } + ds = nullptr; + } + + std::unique_ptr host_read(size_t offset, size_t size) override { + JNIEnv *env = nullptr; + if (jvm->GetEnv(reinterpret_cast(&env), cudf::jni::MINIMUM_JNI_VERSION) != JNI_OK) { + throw cudf::jni::jni_exception("Could not load JNIEnv"); + } + + jlongArray jbuffer_info = + static_cast(env->CallObjectMethod(this->ds, hostReadBuff_method, offset, size)); + if (env->ExceptionOccurred()) { + throw cudf::jni::jni_exception("Java exception in hostRead"); + } + + cudf::jni::native_jlongArray buffer_info(env, jbuffer_info); + auto ptr = reinterpret_cast(buffer_info[0]); + size_t length = buffer_info[1]; + long id = buffer_info[2]; + + cudf::jni::host_buffer_done_callback cb(this->jvm, this->ds, id); + return std::make_unique>(std::move(cb), ptr, + length); + } + + size_t host_read(size_t offset, size_t size, uint8_t *dst) override { + JNIEnv *env = nullptr; + if (jvm->GetEnv(reinterpret_cast(&env), cudf::jni::MINIMUM_JNI_VERSION) != JNI_OK) { + throw cudf::jni::jni_exception("Could not load JNIEnv"); + } + + jlong amount_read = + env->CallLongMethod(this->ds, hostRead_method, offset, size, reinterpret_cast(dst)); + if (env->ExceptionOccurred()) { + throw cudf::jni::jni_exception("Java exception in hostRead"); + } + return amount_read; + } + + size_t size() const override { return ds_size; } + + bool supports_device_read() const override { return device_read_supported; } + + bool is_device_read_preferred(size_t size) const override { + return device_read_supported && size >= device_read_cutoff; + } + + size_t device_read(size_t offset, size_t size, uint8_t *dst, + rmm::cuda_stream_view stream) override { + JNIEnv *env = nullptr; + if (jvm->GetEnv(reinterpret_cast(&env), cudf::jni::MINIMUM_JNI_VERSION) != JNI_OK) { + throw cudf::jni::jni_exception("Could not load JNIEnv"); + } + + jlong amount_read = + env->CallLongMethod(this->ds, deviceRead_method, offset, size, reinterpret_cast(dst), + reinterpret_cast(stream.value())); + if (env->ExceptionOccurred()) { + throw cudf::jni::jni_exception("Java exception in deviceRead"); + } + return amount_read; + } + + std::future device_read_async(size_t offset, size_t size, uint8_t *dst, + rmm::cuda_stream_view stream) override { + auto amount_read = device_read(offset, size, dst, stream); + // This is a bit ugly, but we don't have a good way or a need to return + // a future for the read + std::promise ret; + ret.set_value(amount_read); + return ret.get_future(); + } + +private: + size_t ds_size; + bool device_read_supported; + size_t device_read_cutoff; + JavaVM *jvm; + jobject ds; +}; +} // namespace jni +} // namespace cudf + +extern "C" { + +JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_DataSourceHelper_createWrapperDataSource( + JNIEnv *env, jclass, jobject ds, jlong ds_size, jboolean device_read_supported, + jlong device_read_cutoff) { + JNI_NULL_CHECK(env, ds, "Null data source", 0); + try { + cudf::jni::auto_set_device(env); + auto source = + new cudf::jni::jni_datasource(env, ds, ds_size, device_read_supported, device_read_cutoff); + return reinterpret_cast(source); + } + CATCH_STD(env, 0); +} + +JNIEXPORT void JNICALL Java_ai_rapids_cudf_DataSourceHelper_destroyWrapperDataSource(JNIEnv *env, + jclass, + jlong handle) { + try { + cudf::jni::auto_set_device(env); + if (handle != 0) { + auto source = reinterpret_cast(handle); + delete (source); + } + } + CATCH_STD(env, ); +} + +} // extern "C" diff --git a/java/src/main/native/src/TableJni.cpp b/java/src/main/native/src/TableJni.cpp index b208ef8f381..fad19bdf895 100644 --- a/java/src/main/native/src/TableJni.cpp +++ b/java/src/main/native/src/TableJni.cpp @@ -1135,6 +1135,67 @@ JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_merge(JNIEnv *env, jclass CATCH_STD(env, NULL); } +JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_readCSVFromDataSource( + JNIEnv *env, jclass, jobjectArray col_names, jintArray j_types, jintArray j_scales, + jobjectArray filter_col_names, jint header_row, jbyte delim, jint j_quote_style, jbyte quote, + jbyte comment, jobjectArray null_values, jobjectArray true_values, jobjectArray false_values, + jlong ds_handle) { + JNI_NULL_CHECK(env, null_values, "null_values must be supplied, even if it is empty", NULL); + JNI_NULL_CHECK(env, ds_handle, "no data source handle given", NULL); + + try { + cudf::jni::auto_set_device(env); + cudf::jni::native_jstringArray n_col_names(env, col_names); + cudf::jni::native_jintArray n_types(env, j_types); + cudf::jni::native_jintArray n_scales(env, j_scales); + if (n_types.is_null() != n_scales.is_null()) { + JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "types and scales must match null", + NULL); + } + std::vector data_types; + if (!n_types.is_null()) { + if (n_types.size() != n_scales.size()) { + JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "types and scales must match size", + NULL); + } + data_types.reserve(n_types.size()); + std::transform(n_types.begin(), n_types.end(), n_scales.begin(), + std::back_inserter(data_types), [](auto type, auto scale) { + return cudf::data_type{static_cast(type), scale}; + }); + } + + cudf::jni::native_jstringArray n_null_values(env, null_values); + cudf::jni::native_jstringArray n_true_values(env, true_values); + cudf::jni::native_jstringArray n_false_values(env, false_values); + cudf::jni::native_jstringArray n_filter_col_names(env, filter_col_names); + + auto ds = reinterpret_cast(ds_handle); + cudf::io::source_info source{ds}; + + auto const quote_style = static_cast(j_quote_style); + + cudf::io::csv_reader_options opts = cudf::io::csv_reader_options::builder(source) + .delimiter(delim) + .header(header_row) + .names(n_col_names.as_cpp_vector()) + .dtypes(data_types) + .use_cols_names(n_filter_col_names.as_cpp_vector()) + .true_values(n_true_values.as_cpp_vector()) + .false_values(n_false_values.as_cpp_vector()) + .na_values(n_null_values.as_cpp_vector()) + .keep_default_na(false) + .na_filter(n_null_values.size() > 0) + .quoting(quote_style) + .quotechar(quote) + .comment(comment) + .build(); + + return convert_table_for_return(env, cudf::io::read_csv(opts).tbl); + } + CATCH_STD(env, NULL); +} + JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_readCSV( JNIEnv *env, jclass, jobjectArray col_names, jintArray j_types, jintArray j_scales, jobjectArray filter_col_names, jstring inputfilepath, jlong buffer, jlong buffer_length, @@ -1407,6 +1468,72 @@ JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_TableWithMeta_releaseTable(JNIE CATCH_STD(env, nullptr); } +JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_readJSONFromDataSource( + JNIEnv *env, jclass, jobjectArray col_names, jintArray j_types, jintArray j_scales, + jboolean day_first, jboolean lines, jboolean recover_with_null, jlong ds_handle) { + + JNI_NULL_CHECK(env, ds_handle, "no data source handle given", 0); + + try { + cudf::jni::auto_set_device(env); + cudf::jni::native_jstringArray n_col_names(env, col_names); + cudf::jni::native_jintArray n_types(env, j_types); + cudf::jni::native_jintArray n_scales(env, j_scales); + if (n_types.is_null() != n_scales.is_null()) { + JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "types and scales must match null", + 0); + } + std::vector data_types; + if (!n_types.is_null()) { + if (n_types.size() != n_scales.size()) { + JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "types and scales must match size", + 0); + } + data_types.reserve(n_types.size()); + std::transform(n_types.begin(), n_types.end(), n_scales.begin(), + std::back_inserter(data_types), [](auto const &type, auto const &scale) { + return cudf::data_type{static_cast(type), scale}; + }); + } + + auto ds = reinterpret_cast(ds_handle); + cudf::io::source_info source{ds}; + + cudf::io::json_recovery_mode_t recovery_mode = + recover_with_null ? cudf::io::json_recovery_mode_t::RECOVER_WITH_NULL : + cudf::io::json_recovery_mode_t::FAIL; + cudf::io::json_reader_options_builder opts = cudf::io::json_reader_options::builder(source) + .dayfirst(static_cast(day_first)) + .lines(static_cast(lines)) + .recovery_mode(recovery_mode); + + if (!n_col_names.is_null() && data_types.size() > 0) { + if (n_col_names.size() != n_types.size()) { + JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", + "types and column names must match size", 0); + } + + std::map map; + + auto col_names_vec = n_col_names.as_cpp_vector(); + std::transform(col_names_vec.begin(), col_names_vec.end(), data_types.begin(), + std::inserter(map, map.end()), + [](std::string a, cudf::data_type b) { return std::make_pair(a, b); }); + opts.dtypes(map); + } else if (data_types.size() > 0) { + opts.dtypes(data_types); + } else { + // should infer the types + } + + auto result = + std::make_unique(cudf::io::read_json(opts.build())); + + return reinterpret_cast(result.release()); + } + CATCH_STD(env, 0); +} + JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_readJSON( JNIEnv *env, jclass, jobjectArray col_names, jintArray j_types, jintArray j_scales, jstring inputfilepath, jlong buffer, jlong buffer_length, jboolean day_first, jboolean lines, @@ -1489,6 +1616,36 @@ JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_readJSON( CATCH_STD(env, 0); } +JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_readParquetFromDataSource( + JNIEnv *env, jclass, jobjectArray filter_col_names, jbooleanArray j_col_binary_read, jint unit, + jlong ds_handle) { + + JNI_NULL_CHECK(env, ds_handle, "no data source handle given", 0); + JNI_NULL_CHECK(env, j_col_binary_read, "null col_binary_read", 0); + + try { + cudf::jni::auto_set_device(env); + + cudf::jni::native_jstringArray n_filter_col_names(env, filter_col_names); + cudf::jni::native_jbooleanArray n_col_binary_read(env, j_col_binary_read); + + auto ds = reinterpret_cast(ds_handle); + cudf::io::source_info source{ds}; + + auto builder = cudf::io::parquet_reader_options::builder(source); + if (n_filter_col_names.size() > 0) { + builder = builder.columns(n_filter_col_names.as_cpp_vector()); + } + + cudf::io::parquet_reader_options opts = + builder.convert_strings_to_categories(false) + .timestamp_type(cudf::data_type(static_cast(unit))) + .build(); + return convert_table_for_return(env, cudf::io::read_parquet(opts).tbl); + } + CATCH_STD(env, NULL); +} + JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_readParquet( JNIEnv *env, jclass, jobjectArray filter_col_names, jbooleanArray j_col_binary_read, jstring inputfilepath, jlong buffer, jlong buffer_length, jint unit) { @@ -1535,10 +1692,31 @@ JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_readParquet( CATCH_STD(env, NULL); } +JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_readAvroFromDataSource( + JNIEnv *env, jclass, jobjectArray filter_col_names, jlong ds_handle) { + + JNI_NULL_CHECK(env, ds_handle, "no data source handle given", 0); + + try { + cudf::jni::auto_set_device(env); + + cudf::jni::native_jstringArray n_filter_col_names(env, filter_col_names); + + auto ds = reinterpret_cast(ds_handle); + cudf::io::source_info source{ds}; + + cudf::io::avro_reader_options opts = cudf::io::avro_reader_options::builder(source) + .columns(n_filter_col_names.as_cpp_vector()) + .build(); + return convert_table_for_return(env, cudf::io::read_avro(opts).tbl); + } + CATCH_STD(env, NULL); +} + JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_readAvro(JNIEnv *env, jclass, jobjectArray filter_col_names, jstring inputfilepath, jlong buffer, - jlong buffer_length, jint unit) { + jlong buffer_length) { const bool read_buffer = (buffer != 0); if (!read_buffer) { @@ -1715,6 +1893,38 @@ JNIEXPORT void JNICALL Java_ai_rapids_cudf_Table_writeParquetEnd(JNIEnv *env, jc CATCH_STD(env, ) } +JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_readORCFromDataSource( + JNIEnv *env, jclass, jobjectArray filter_col_names, jboolean usingNumPyTypes, jint unit, + jobjectArray dec128_col_names, jlong ds_handle) { + + JNI_NULL_CHECK(env, ds_handle, "no data source handle given", 0); + + try { + cudf::jni::auto_set_device(env); + + cudf::jni::native_jstringArray n_filter_col_names(env, filter_col_names); + + cudf::jni::native_jstringArray n_dec128_col_names(env, dec128_col_names); + + auto ds = reinterpret_cast(ds_handle); + cudf::io::source_info source{ds}; + + auto builder = cudf::io::orc_reader_options::builder(source); + if (n_filter_col_names.size() > 0) { + builder = builder.columns(n_filter_col_names.as_cpp_vector()); + } + + cudf::io::orc_reader_options opts = + builder.use_index(false) + .use_np_dtypes(static_cast(usingNumPyTypes)) + .timestamp_type(cudf::data_type(static_cast(unit))) + .decimal128_columns(n_dec128_col_names.as_cpp_vector()) + .build(); + return convert_table_for_return(env, cudf::io::read_orc(opts).tbl); + } + CATCH_STD(env, NULL); +} + JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_readORC( JNIEnv *env, jclass, jobjectArray filter_col_names, jstring inputfilepath, jlong buffer, jlong buffer_length, jboolean usingNumPyTypes, jint unit, jobjectArray dec128_col_names) { diff --git a/java/src/main/native/src/cudf_jni_apis.hpp b/java/src/main/native/src/cudf_jni_apis.hpp index 867df80b722..bd82bbd2899 100644 --- a/java/src/main/native/src/cudf_jni_apis.hpp +++ b/java/src/main/native/src/cudf_jni_apis.hpp @@ -134,5 +134,13 @@ void auto_set_device(JNIEnv *env); */ void device_memset_async(JNIEnv *env, rmm::device_buffer &buf, char value); +// +// DataSource APIs +// + +bool cache_data_source_jni(JNIEnv *env); + +void release_data_source_jni(JNIEnv *env); + } // namespace jni } // namespace cudf diff --git a/java/src/test/java/ai/rapids/cudf/TableTest.java b/java/src/test/java/ai/rapids/cudf/TableTest.java index faa73ac4322..b0dd4122b0e 100644 --- a/java/src/test/java/ai/rapids/cudf/TableTest.java +++ b/java/src/test/java/ai/rapids/cudf/TableTest.java @@ -327,6 +327,25 @@ void testReadJSONFile() { } } + @Test + void testReadJSONFromDataSource() throws IOException { + Schema schema = Schema.builder() + .column(DType.STRING, "name") + .column(DType.INT32, "age") + .build(); + JSONOptions opts = JSONOptions.builder() + .withLines(true) + .build(); + try (Table expected = new Table.TestBuilder() + .column("Michael", "Andy", "Justin") + .column(null, 30, 19) + .build(); + MultiBufferDataSource source = sourceFrom(TEST_SIMPLE_JSON_FILE); + Table table = Table.readJSON(schema, opts, source)) { + assertTablesAreEqual(expected, table); + } + } + @Test void testReadJSONFileWithInvalidLines() { Schema schema = Schema.builder() @@ -560,6 +579,126 @@ void testReadCSVBuffer() { } } + byte[][] sliceBytes(byte[] data, int slices) { + slices = Math.min(data.length, slices); + // We are not going to worry about making it super even here. + // The last one gets the extras. + int bytesPerSlice = data.length / slices; + byte[][] ret = new byte[slices][]; + int startingAt = 0; + for (int i = 0; i < (slices - 1); i++) { + ret[i] = new byte[bytesPerSlice]; + System.arraycopy(data, startingAt, ret[i], 0, bytesPerSlice); + startingAt += bytesPerSlice; + } + // Now for the last one + ret[slices - 1] = new byte[data.length - startingAt]; + System.arraycopy(data, startingAt, ret[slices - 1], 0, data.length - startingAt); + return ret; + } + + @Test + void testReadCSVBufferMultiBuffer() { + CSVOptions opts = CSVOptions.builder() + .includeColumn("A") + .includeColumn("B") + .hasHeader() + .withDelim('|') + .withQuote('\'') + .withNullValue("NULL") + .build(); + byte[][] data = sliceBytes(CSV_DATA_BUFFER, 10); + try (Table expected = new Table.TestBuilder() + .column(0, 1, 2, 3, 4, 5, 6, 7, 8, 9) + .column(110.0, 111.0, 112.0, 113.0, 114.0, 115.0, 116.0, null, 118.2, 119.8) + .build(); + MultiBufferDataSource source = sourceFrom(data); + Table table = Table.readCSV(TableTest.CSV_DATA_BUFFER_SCHEMA, opts, source)) { + assertTablesAreEqual(expected, table); + } + } + + public static byte[] arrayFrom(File f) throws IOException { + long len = f.length(); + if (len > Integer.MAX_VALUE) { + throw new IllegalArgumentException("Sorry cannot read " + f + + " into an array it does not fit"); + } + int remaining = (int)len; + byte[] ret = new byte[remaining]; + try (java.io.FileInputStream fin = new java.io.FileInputStream(f)) { + int at = 0; + while (remaining > 0) { + int amount = fin.read(ret, at, remaining); + at += amount; + remaining -= amount; + } + } + return ret; + } + + public static MultiBufferDataSource sourceFrom(File f) throws IOException { + long len = f.length(); + byte[] tmp = new byte[(int)Math.min(32 * 1024, len)]; + try (HostMemoryBuffer buffer = HostMemoryBuffer.allocate(len)) { + try (java.io.FileInputStream fin = new java.io.FileInputStream(f)) { + long at = 0; + while (at < len) { + int amount = fin.read(tmp); + buffer.setBytes(at, tmp, 0, amount); + at += amount; + } + } + return new MultiBufferDataSource(buffer); + } + } + + public static MultiBufferDataSource sourceFrom(byte[] data) { + long len = data.length; + try (HostMemoryBuffer buffer = HostMemoryBuffer.allocate(len)) { + buffer.setBytes(0, data, 0, len); + return new MultiBufferDataSource(buffer); + } + } + + public static MultiBufferDataSource sourceFrom(byte[][] data) { + HostMemoryBuffer[] buffers = new HostMemoryBuffer[data.length]; + try { + for (int i = 0; i < data.length; i++) { + byte[] subData = data[i]; + buffers[i] = HostMemoryBuffer.allocate(subData.length); + buffers[i].setBytes(0, subData, 0, subData.length); + } + return new MultiBufferDataSource(buffers); + } finally { + for (HostMemoryBuffer buffer: buffers) { + if (buffer != null) { + buffer.close(); + } + } + } + } + + @Test + void testReadCSVDataSource() { + CSVOptions opts = CSVOptions.builder() + .includeColumn("A") + .includeColumn("B") + .hasHeader() + .withDelim('|') + .withQuote('\'') + .withNullValue("NULL") + .build(); + try (Table expected = new Table.TestBuilder() + .column(0, 1, 2, 3, 4, 5, 6, 7, 8, 9) + .column(110.0, 111.0, 112.0, 113.0, 114.0, 115.0, 116.0, null, 118.2, 119.8) + .build(); + MultiBufferDataSource source = sourceFrom(TableTest.CSV_DATA_BUFFER); + Table table = Table.readCSV(TableTest.CSV_DATA_BUFFER_SCHEMA, opts, source)) { + assertTablesAreEqual(expected, table); + } + } + @Test void testReadCSVWithOffset() { CSVOptions opts = CSVOptions.builder() @@ -864,6 +1003,37 @@ void testReadParquet() { } } + @Test + void testReadParquetFromDataSource() throws IOException { + ParquetOptions opts = ParquetOptions.builder() + .includeColumn("loan_id") + .includeColumn("zip") + .includeColumn("num_units") + .build(); + try (MultiBufferDataSource source = sourceFrom(TEST_PARQUET_FILE); + Table table = Table.readParquet(opts, source)) { + long rows = table.getRowCount(); + assertEquals(1000, rows); + assertTableTypes(new DType[]{DType.INT64, DType.INT32, DType.INT32}, table); + } + } + + @Test + void testReadParquetMultiBuffer() throws IOException { + ParquetOptions opts = ParquetOptions.builder() + .includeColumn("loan_id") + .includeColumn("zip") + .includeColumn("num_units") + .build(); + byte [][] data = sliceBytes(arrayFrom(TEST_PARQUET_FILE), 10); + try (MultiBufferDataSource source = sourceFrom(data); + Table table = Table.readParquet(opts, source)) { + long rows = table.getRowCount(); + assertEquals(1000, rows); + assertTableTypes(new DType[]{DType.INT64, DType.INT32, DType.INT32}, table); + } + } + @Test void testReadParquetBinary() { ParquetOptions opts = ParquetOptions.builder() @@ -1018,6 +1188,23 @@ void testChunkedReadParquet() { } } + @Test + void testChunkedReadParquetFromDataSource() throws IOException { + try (MultiBufferDataSource source = sourceFrom(TEST_PARQUET_FILE_CHUNKED_READ); + ParquetChunkedReader reader = new ParquetChunkedReader(240000, ParquetOptions.DEFAULT, source)) { + int numChunks = 0; + long totalRows = 0; + while(reader.hasNext()) { + ++numChunks; + try(Table chunk = reader.readChunk()) { + totalRows += chunk.getRowCount(); + } + } + assertEquals(2, numChunks); + assertEquals(40000, totalRows); + } + } + @Test void testReadAvro() { AvroOptions opts = AvroOptions.builder() @@ -1037,6 +1224,26 @@ void testReadAvro() { } } + @Test + void testReadAvroFromDataSource() throws IOException { + AvroOptions opts = AvroOptions.builder() + .includeColumn("bool_col") + .includeColumn("int_col") + .includeColumn("timestamp_col") + .build(); + + try (Table expected = new Table.TestBuilder() + .column(true, false, true, false, true, false, true, false) + .column(0, 1, 0, 1, 0, 1, 0, 1) + .column(1235865600000000L, 1235865660000000L, 1238544000000000L, 1238544060000000L, + 1233446400000000L, 1233446460000000L, 1230768000000000L, 1230768060000000L) + .build(); + MultiBufferDataSource source = sourceFrom(TEST_ALL_TYPES_PLAIN_AVRO_FILE); + Table table = Table.readAvro(opts, source)) { + assertTablesAreEqual(expected, table); + } + } + @Test void testReadAvroBuffer() throws IOException{ AvroOptions opts = AvroOptions.builder() @@ -1094,6 +1301,24 @@ void testReadORC() { } } + @Test + void testReadORCFromDataSource() throws IOException { + ORCOptions opts = ORCOptions.builder() + .includeColumn("string1") + .includeColumn("float1") + .includeColumn("int1") + .build(); + try (Table expected = new Table.TestBuilder() + .column("hi","bye") + .column(1.0f,2.0f) + .column(65536,65536) + .build(); + MultiBufferDataSource source = sourceFrom(TEST_ORC_FILE); + Table table = Table.readORC(opts, source)) { + assertTablesAreEqual(expected, table); + } + } + @Test void testReadORCBuffer() throws IOException { ORCOptions opts = ORCOptions.builder() From 15baa00693ab4aa59f99ccb417c613880789d047 Mon Sep 17 00:00:00 2001 From: Elias Stehle <3958403+elstehle@users.noreply.github.com> Date: Wed, 11 Oct 2023 11:31:39 +0200 Subject: [PATCH 143/150] Fixes behaviour for incomplete lines when `recover_with_nulls` is enabled (#14252) Closes https://github.com/rapidsai/cudf/issues/14227. Adapts the behaviour of the JSON finite-state transducer (FST) when `recover_with_nulls` is `true` to be more strict and reject lines that contain incomplete JSON objects (aka records) or JSON arrays (aka lists). Authors: - Elias Stehle (https://github.com/elstehle) Approvers: - Vukasin Milovanovic (https://github.com/vuule) - Nghia Truong (https://github.com/ttnghia) URL: https://github.com/rapidsai/cudf/pull/14252 --- cpp/src/io/json/nested_json_gpu.cu | 703 +++++++++++++++-------------- cpp/tests/io/json_test.cpp | 45 +- cpp/tests/io/nested_json_test.cpp | 23 +- 3 files changed, 401 insertions(+), 370 deletions(-) diff --git a/cpp/src/io/json/nested_json_gpu.cu b/cpp/src/io/json/nested_json_gpu.cu index 06ac11485cb..c9107357239 100644 --- a/cpp/src/io/json/nested_json_gpu.cu +++ b/cpp/src/io/json/nested_json_gpu.cu @@ -660,13 +660,13 @@ auto get_transition_table(json_format_cfg_t format) PD_ERR, PD_ERR, PD_ERR, PD_PVL, PD_ERR, PD_ERR, PD_BOV, PD_ERR, PD_PVL, PD_BOV, PD_LON, PD_ERR, PD_ERR, PD_PVL, PD_ERR, PD_ERR, PD_ERR, PD_BFN, PD_ERR, PD_PVL, PD_BOV, PD_LON}; pda_tt[static_cast(pda_state_t::PD_STR)] = { - PD_STR, PD_STR, PD_STR, PD_STR, PD_PVL, PD_SCE, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, - PD_STR, PD_STR, PD_STR, PD_STR, PD_PVL, PD_SCE, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, - PD_STR, PD_STR, PD_STR, PD_STR, PD_PVL, PD_SCE, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR}; + PD_STR, PD_STR, PD_STR, PD_STR, PD_PVL, PD_SCE, PD_STR, PD_STR, PD_STR, PD_BOV, PD_STR, + PD_STR, PD_STR, PD_STR, PD_STR, PD_PVL, PD_SCE, PD_STR, PD_STR, PD_STR, PD_BOV, PD_STR, + PD_STR, PD_STR, PD_STR, PD_STR, PD_PVL, PD_SCE, PD_STR, PD_STR, PD_STR, PD_BOV, PD_STR}; pda_tt[static_cast(pda_state_t::PD_SCE)] = { - PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, - PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, - PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR}; + PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_BOV, PD_STR, + PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_BOV, PD_STR, + PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_STR, PD_BOV, PD_STR}; pda_tt[static_cast(pda_state_t::PD_PVL)] = { PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_PVL, PD_BOV, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_PVL, PD_ERR, PD_ERR, PD_BOV, PD_ERR, PD_PVL, PD_BOV, PD_ERR, @@ -680,9 +680,9 @@ auto get_transition_table(json_format_cfg_t format) PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_BOV, PD_ERR, PD_FLN, PD_FLN, PD_FLN, PD_FLN, PD_PFN, PD_FNE, PD_FLN, PD_FLN, PD_FLN, PD_BOV, PD_FLN}; pda_tt[static_cast(pda_state_t::PD_FNE)] = { - PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, - PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, - PD_FLN, PD_FLN, PD_FLN, PD_FLN, PD_FLN, PD_FLN, PD_FLN, PD_FLN, PD_FLN, PD_FLN, PD_FLN}; + PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_BOV, PD_ERR, + PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_BOV, PD_ERR, + PD_FLN, PD_FLN, PD_FLN, PD_FLN, PD_FLN, PD_FLN, PD_FLN, PD_FLN, PD_FLN, PD_BOV, PD_FLN}; pda_tt[static_cast(pda_state_t::PD_PFN)] = { PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_BOV, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_ERR, PD_BOV, PD_ERR, @@ -697,8 +697,11 @@ auto get_transition_table(json_format_cfg_t format) /** * @brief Getting the translation table + * @param recover_from_error Whether or not the tokenizer should recover from invalid lines. If + * `recover_from_error` is true, invalid JSON lines end with the token sequence (`ErrorBegin`, + * `LineEn`) and incomplete JSON lines (e.g., `{"a":123\n`) are treated as invalid lines. */ -auto get_translation_table(bool include_line_delimiter) +auto get_translation_table(bool recover_from_error) { constexpr auto StructBegin = token_t::StructBegin; constexpr auto StructEnd = token_t::StructEnd; @@ -715,76 +718,83 @@ auto get_translation_table(bool include_line_delimiter) constexpr auto ErrorBegin = token_t::ErrorBegin; /** - * @brief Appends token_t::LineEnd token to the given token sequence, if and only if - * `include_line_delimiter` is true. + * @brief Instead of specifying the verbose translation tables twice (i.e., once when + * `recover_from_error` is true and once when it is false), we use `nl_tokens` to specialize the + * translation table where it differs depending on the `recover_from_error` option. If and only if + * `recover_from_error` is true, `recovering_tokens` are returned along with a token_t::LineEnd + * token, otherwise `regular_tokens` is returned. */ - auto nl_tokens = [include_line_delimiter](std::vector tokens) { - if (include_line_delimiter) { tokens.push_back(token_t::LineEnd); } - return tokens; + auto nl_tokens = [recover_from_error](std::vector regular_tokens, + std::vector recovering_tokens) { + if (recover_from_error) { + recovering_tokens.push_back(token_t::LineEnd); + return recovering_tokens; + } + return regular_tokens; }; std::array, NUM_PDA_SGIDS>, PD_NUM_STATES> pda_tlt; - pda_tlt[static_cast(pda_state_t::PD_BOV)] = {{ /*ROOT*/ + pda_tlt[static_cast(pda_state_t::PD_BOV)] = {{ /*ROOT*/ + {StructBegin}, // OPENING_BRACE + {ListBegin}, // OPENING_BRACKET + {ErrorBegin}, // CLOSING_BRACE + {ErrorBegin}, // CLOSING_BRACKET + {StringBegin}, // QUOTE + {ErrorBegin}, // ESCAPE + {ErrorBegin}, // COMMA + {ErrorBegin}, // COLON + {}, // WHITE_SPACE + nl_tokens({}, {}), // LINE_BREAK + {ValueBegin}, // OTHER + /*LIST*/ {StructBegin}, // OPENING_BRACE {ListBegin}, // OPENING_BRACKET {ErrorBegin}, // CLOSING_BRACE - {ErrorBegin}, // CLOSING_BRACKET + {ListEnd}, // CLOSING_BRACKET {StringBegin}, // QUOTE {ErrorBegin}, // ESCAPE {ErrorBegin}, // COMMA {ErrorBegin}, // COLON {}, // WHITE_SPACE - nl_tokens({}), // LINE_BREAK - {ValueBegin}, // OTHER - /*LIST*/ + nl_tokens({}, {ErrorBegin}), // LINE_BREAK + {ValueBegin}, // OTHER + /*STRUCT*/ {StructBegin}, // OPENING_BRACE {ListBegin}, // OPENING_BRACKET {ErrorBegin}, // CLOSING_BRACE - {ListEnd}, // CLOSING_BRACKET + {ErrorBegin}, // CLOSING_BRACKET {StringBegin}, // QUOTE {ErrorBegin}, // ESCAPE {ErrorBegin}, // COMMA {ErrorBegin}, // COLON {}, // WHITE_SPACE - nl_tokens({}), // LINE_BREAK - {ValueBegin}, // OTHER - /*STRUCT*/ - {StructBegin}, // OPENING_BRACE - {ListBegin}, // OPENING_BRACKET - {ErrorBegin}, // CLOSING_BRACE - {ErrorBegin}, // CLOSING_BRACKET - {StringBegin}, // QUOTE - {ErrorBegin}, // ESCAPE - {ErrorBegin}, // COMMA - {ErrorBegin}, // COLON - {}, // WHITE_SPACE - nl_tokens({}), // LINE_BREAK - {ValueBegin}}}; // OTHER + nl_tokens({}, {ErrorBegin}), // LINE_BREAK + {ValueBegin}}}; // OTHER pda_tlt[static_cast(pda_state_t::PD_BOA)] = { - { /*ROOT*/ - {ErrorBegin}, // OPENING_BRACE - {ErrorBegin}, // OPENING_BRACKET - {ErrorBegin}, // CLOSING_BRACE - {ErrorBegin}, // CLOSING_BRACKET - {ErrorBegin}, // QUOTE - {ErrorBegin}, // ESCAPE - {ErrorBegin}, // COMMA - {ErrorBegin}, // COLON - {ErrorBegin}, // WHITE_SPACE - nl_tokens({ErrorBegin}), // LINE_BREAK - {ErrorBegin}, // OTHER + { /*ROOT*/ + {ErrorBegin}, // OPENING_BRACE + {ErrorBegin}, // OPENING_BRACKET + {ErrorBegin}, // CLOSING_BRACE + {ErrorBegin}, // CLOSING_BRACKET + {ErrorBegin}, // QUOTE + {ErrorBegin}, // ESCAPE + {ErrorBegin}, // COMMA + {ErrorBegin}, // COLON + {ErrorBegin}, // WHITE_SPACE + nl_tokens({ErrorBegin}, {ErrorBegin}), // LINE_BREAK + {ErrorBegin}, // OTHER /*LIST*/ - {StructBegin}, // OPENING_BRACE - {ListBegin}, // OPENING_BRACKET - {ErrorBegin}, // CLOSING_BRACE - {ListEnd}, // CLOSING_BRACKET - {StringBegin}, // QUOTE - {ErrorBegin}, // ESCAPE - {ErrorBegin}, // COMMA - {ErrorBegin}, // COLON - {}, // WHITE_SPACE - nl_tokens({}), // LINE_BREAK - {ValueBegin}, // OTHER + {StructBegin}, // OPENING_BRACE + {ListBegin}, // OPENING_BRACKET + {ErrorBegin}, // CLOSING_BRACE + {ListEnd}, // CLOSING_BRACKET + {StringBegin}, // QUOTE + {ErrorBegin}, // ESCAPE + {ErrorBegin}, // COMMA + {ErrorBegin}, // COLON + {}, // WHITE_SPACE + nl_tokens({}, {ErrorBegin}), // LINE_BREAK + {ValueBegin}, // OTHER /*STRUCT*/ {ErrorBegin}, // OPENING_BRACE {ErrorBegin}, // OPENING_BRACKET @@ -795,33 +805,33 @@ auto get_translation_table(bool include_line_delimiter) {ErrorBegin}, // COMMA {ErrorBegin}, // COLON {}, // WHITE_SPACE - nl_tokens({}), // LINE_BREAK + nl_tokens({}, {ErrorBegin}), // LINE_BREAK {ErrorBegin}}}; // OTHER pda_tlt[static_cast(pda_state_t::PD_LON)] = { - { /*ROOT*/ - {ErrorBegin}, // OPENING_BRACE - {ErrorBegin}, // OPENING_BRACKET - {ErrorBegin}, // CLOSING_BRACE - {ErrorBegin}, // CLOSING_BRACKET - {ErrorBegin}, // QUOTE - {ErrorBegin}, // ESCAPE - {ErrorBegin}, // COMMA - {ErrorBegin}, // COLON - {ValueEnd}, // WHITE_SPACE - nl_tokens({ValueEnd}), // LINE_BREAK - {}, // OTHER + { /*ROOT*/ + {ErrorBegin}, // OPENING_BRACE + {ErrorBegin}, // OPENING_BRACKET + {ErrorBegin}, // CLOSING_BRACE + {ErrorBegin}, // CLOSING_BRACKET + {ErrorBegin}, // QUOTE + {ErrorBegin}, // ESCAPE + {ErrorBegin}, // COMMA + {ErrorBegin}, // COLON + {ValueEnd}, // WHITE_SPACE + nl_tokens({ValueEnd}, {ErrorBegin}), // LINE_BREAK + {}, // OTHER /*LIST*/ - {ErrorBegin}, // OPENING_BRACE - {ErrorBegin}, // OPENING_BRACKET - {ErrorBegin}, // CLOSING_BRACE - {ValueEnd, ListEnd}, // CLOSING_BRACKET - {ErrorBegin}, // QUOTE - {ErrorBegin}, // ESCAPE - {ValueEnd}, // COMMA - {ErrorBegin}, // COLON - {ValueEnd}, // WHITE_SPACE - nl_tokens({ValueEnd}), // LINE_BREAK - {}, // OTHER + {ErrorBegin}, // OPENING_BRACE + {ErrorBegin}, // OPENING_BRACKET + {ErrorBegin}, // CLOSING_BRACE + {ValueEnd, ListEnd}, // CLOSING_BRACKET + {ErrorBegin}, // QUOTE + {ErrorBegin}, // ESCAPE + {ValueEnd}, // COMMA + {ErrorBegin}, // COLON + {ValueEnd}, // WHITE_SPACE + nl_tokens({ValueEnd}, {ErrorBegin}), // LINE_BREAK + {}, // OTHER /*STRUCT*/ {ErrorBegin}, // OPENING_BRACE {ErrorBegin}, // OPENING_BRACKET @@ -832,108 +842,108 @@ auto get_translation_table(bool include_line_delimiter) {ValueEnd, StructMemberEnd}, // COMMA {ErrorBegin}, // COLON {ValueEnd}, // WHITE_SPACE - nl_tokens({ValueEnd}), // LINE_BREAK + nl_tokens({ValueEnd}, {ErrorBegin}), // LINE_BREAK {}}}; // OTHER - pda_tlt[static_cast(pda_state_t::PD_STR)] = {{ /*ROOT*/ - {}, // OPENING_BRACE - {}, // OPENING_BRACKET - {}, // CLOSING_BRACE - {}, // CLOSING_BRACKET - {StringEnd}, // QUOTE - {}, // ESCAPE - {}, // COMMA - {}, // COLON - {}, // WHITE_SPACE - nl_tokens({}), // LINE_BREAK - {}, // OTHER + pda_tlt[static_cast(pda_state_t::PD_STR)] = {{ /*ROOT*/ + {}, // OPENING_BRACE + {}, // OPENING_BRACKET + {}, // CLOSING_BRACE + {}, // CLOSING_BRACKET + {StringEnd}, // QUOTE + {}, // ESCAPE + {}, // COMMA + {}, // COLON + {}, // WHITE_SPACE + nl_tokens({}, {ErrorBegin}), // LINE_BREAK + {}, // OTHER /*LIST*/ - {}, // OPENING_BRACE - {}, // OPENING_BRACKET - {}, // CLOSING_BRACE - {}, // CLOSING_BRACKET - {StringEnd}, // QUOTE - {}, // ESCAPE - {}, // COMMA - {}, // COLON - {}, // WHITE_SPACE - nl_tokens({}), // LINE_BREAK - {}, // OTHER + {}, // OPENING_BRACE + {}, // OPENING_BRACKET + {}, // CLOSING_BRACE + {}, // CLOSING_BRACKET + {StringEnd}, // QUOTE + {}, // ESCAPE + {}, // COMMA + {}, // COLON + {}, // WHITE_SPACE + nl_tokens({}, {ErrorBegin}), // LINE_BREAK + {}, // OTHER /*STRUCT*/ - {}, // OPENING_BRACE - {}, // OPENING_BRACKET - {}, // CLOSING_BRACE - {}, // CLOSING_BRACKET - {StringEnd}, // QUOTE - {}, // ESCAPE - {}, // COMMA - {}, // COLON - {}, // WHITE_SPACE - nl_tokens({}), // LINE_BREAK - {}}}; // OTHER - - pda_tlt[static_cast(pda_state_t::PD_SCE)] = {{ /*ROOT*/ - {}, // OPENING_BRACE - {}, // OPENING_BRACKET - {}, // CLOSING_BRACE - {}, // CLOSING_BRACKET - {}, // QUOTE - {}, // ESCAPE - {}, // COMMA - {}, // COLON - {}, // WHITE_SPACE - nl_tokens({}), // LINE_BREAK - {}, // OTHER + {}, // OPENING_BRACE + {}, // OPENING_BRACKET + {}, // CLOSING_BRACE + {}, // CLOSING_BRACKET + {StringEnd}, // QUOTE + {}, // ESCAPE + {}, // COMMA + {}, // COLON + {}, // WHITE_SPACE + nl_tokens({}, {ErrorBegin}), // LINE_BREAK + {}}}; // OTHER + + pda_tlt[static_cast(pda_state_t::PD_SCE)] = {{ /*ROOT*/ + {}, // OPENING_BRACE + {}, // OPENING_BRACKET + {}, // CLOSING_BRACE + {}, // CLOSING_BRACKET + {}, // QUOTE + {}, // ESCAPE + {}, // COMMA + {}, // COLON + {}, // WHITE_SPACE + nl_tokens({}, {ErrorBegin}), // LINE_BREAK + {}, // OTHER /*LIST*/ - {}, // OPENING_BRACE - {}, // OPENING_BRACKET - {}, // CLOSING_BRACE - {}, // CLOSING_BRACKET - {}, // QUOTE - {}, // ESCAPE - {}, // COMMA - {}, // COLON - {}, // WHITE_SPACE - nl_tokens({}), // LINE_BREAK - {}, // OTHER + {}, // OPENING_BRACE + {}, // OPENING_BRACKET + {}, // CLOSING_BRACE + {}, // CLOSING_BRACKET + {}, // QUOTE + {}, // ESCAPE + {}, // COMMA + {}, // COLON + {}, // WHITE_SPACE + nl_tokens({}, {ErrorBegin}), // LINE_BREAK + {}, // OTHER /*STRUCT*/ - {}, // OPENING_BRACE - {}, // OPENING_BRACKET - {}, // CLOSING_BRACE - {}, // CLOSING_BRACKET - {}, // QUOTE - {}, // ESCAPE - {}, // COMMA - {}, // COLON - {}, // WHITE_SPACE - nl_tokens({}), // LINE_BREAK - {}}}; // OTHER + {}, // OPENING_BRACE + {}, // OPENING_BRACKET + {}, // CLOSING_BRACE + {}, // CLOSING_BRACKET + {}, // QUOTE + {}, // ESCAPE + {}, // COMMA + {}, // COLON + {}, // WHITE_SPACE + nl_tokens({}, {ErrorBegin}), // LINE_BREAK + {}}}; // OTHER pda_tlt[static_cast(pda_state_t::PD_PVL)] = { - { /*ROOT*/ - {ErrorBegin}, // OPENING_BRACE - {ErrorBegin}, // OPENING_BRACKET - {ErrorBegin}, // CLOSING_BRACE - {ErrorBegin}, // CLOSING_BRACKET - {ErrorBegin}, // QUOTE - {ErrorBegin}, // ESCAPE - {ErrorBegin}, // COMMA - {ErrorBegin}, // COLON - {}, // WHITE_SPACE - nl_tokens({}), // LINE_BREAK - {ErrorBegin}, // OTHER + { /*ROOT*/ + {ErrorBegin}, // OPENING_BRACE + {ErrorBegin}, // OPENING_BRACKET + {ErrorBegin}, // CLOSING_BRACE + {ErrorBegin}, // CLOSING_BRACKET + {ErrorBegin}, // QUOTE + {ErrorBegin}, // ESCAPE + {ErrorBegin}, // COMMA + {ErrorBegin}, // COLON + {}, // WHITE_SPACE + nl_tokens({}, {}), // LINE_BREAK + {ErrorBegin}, // OTHER /*LIST*/ - {ErrorBegin}, // OPENING_BRACE - {ErrorBegin}, // OPENING_BRACKET - {ErrorBegin}, // CLOSING_BRACE - {ListEnd}, // CLOSING_BRACKET - {ErrorBegin}, // QUOTE - {ErrorBegin}, // ESCAPE - {}, // COMMA - {ErrorBegin}, // COLON - {}, // WHITE_SPACE - nl_tokens({}), // LINE_BREAK - {ErrorBegin}, // OTHER + {ErrorBegin}, // OPENING_BRACE + {ErrorBegin}, // OPENING_BRACKET + {ErrorBegin}, // CLOSING_BRACE + {ListEnd}, // CLOSING_BRACKET + {ErrorBegin}, // QUOTE + {ErrorBegin}, // ESCAPE + {}, // COMMA + {ErrorBegin}, // COLON + {}, // WHITE_SPACE + nl_tokens({}, {ErrorBegin}), // LINE_BREAK + {ErrorBegin}, // OTHER /*STRUCT*/ {ErrorBegin}, // OPENING_BRACE {ErrorBegin}, // OPENING_BRACKET @@ -944,34 +954,34 @@ auto get_translation_table(bool include_line_delimiter) {StructMemberEnd}, // COMMA {ErrorBegin}, // COLON {}, // WHITE_SPACE - nl_tokens({}), // LINE_BREAK + nl_tokens({}, {ErrorBegin}), // LINE_BREAK {ErrorBegin}}}; // OTHER pda_tlt[static_cast(pda_state_t::PD_BFN)] = { - { /*ROOT*/ - {ErrorBegin}, // OPENING_BRACE - {ErrorBegin}, // OPENING_BRACKET - {ErrorBegin}, // CLOSING_BRACE - {ErrorBegin}, // CLOSING_BRACKET - {ErrorBegin}, // QUOTE - {ErrorBegin}, // ESCAPE - {ErrorBegin}, // COMMA - {ErrorBegin}, // COLON - {ErrorBegin}, // WHITE_SPACE - nl_tokens({ErrorBegin}), // LINE_BREAK - {ErrorBegin}, // OTHER + { /*ROOT*/ + {ErrorBegin}, // OPENING_BRACE + {ErrorBegin}, // OPENING_BRACKET + {ErrorBegin}, // CLOSING_BRACE + {ErrorBegin}, // CLOSING_BRACKET + {ErrorBegin}, // QUOTE + {ErrorBegin}, // ESCAPE + {ErrorBegin}, // COMMA + {ErrorBegin}, // COLON + {ErrorBegin}, // WHITE_SPACE + nl_tokens({ErrorBegin}, {ErrorBegin}), // LINE_BREAK + {ErrorBegin}, // OTHER /*LIST*/ - {ErrorBegin}, // OPENING_BRACE - {ErrorBegin}, // OPENING_BRACKET - {ErrorBegin}, // CLOSING_BRACE - {ErrorBegin}, // CLOSING_BRACKET - {ErrorBegin}, // QUOTE - {ErrorBegin}, // ESCAPE - {ErrorBegin}, // COMMA - {ErrorBegin}, // COLON - {ErrorBegin}, // WHITE_SPACE - nl_tokens({ErrorBegin}), // LINE_BREAK - {ErrorBegin}, // OTHER + {ErrorBegin}, // OPENING_BRACE + {ErrorBegin}, // OPENING_BRACKET + {ErrorBegin}, // CLOSING_BRACE + {ErrorBegin}, // CLOSING_BRACKET + {ErrorBegin}, // QUOTE + {ErrorBegin}, // ESCAPE + {ErrorBegin}, // COMMA + {ErrorBegin}, // COLON + {ErrorBegin}, // WHITE_SPACE + nl_tokens({ErrorBegin}, {ErrorBegin}), // LINE_BREAK + {ErrorBegin}, // OTHER /*STRUCT*/ {ErrorBegin}, // OPENING_BRACE {ErrorBegin}, // OPENING_BRACKET @@ -982,156 +992,159 @@ auto get_translation_table(bool include_line_delimiter) {ErrorBegin}, // COMMA {ErrorBegin}, // COLON {}, // WHITE_SPACE - nl_tokens({}), // LINE_BREAK + nl_tokens({}, {ErrorBegin}), // LINE_BREAK {ErrorBegin}}}; // OTHER - pda_tlt[static_cast(pda_state_t::PD_FLN)] = {{ /*ROOT*/ - {ErrorBegin}, // OPENING_BRACE - {ErrorBegin}, // OPENING_BRACKET - {ErrorBegin}, // CLOSING_BRACE - {ErrorBegin}, // CLOSING_BRACKET - {ErrorBegin}, // QUOTE - {ErrorBegin}, // ESCAPE - {ErrorBegin}, // COMMA - {ErrorBegin}, // COLON - {ErrorBegin}, // WHITE_SPACE - nl_tokens({ErrorBegin}), // LINE_BREAK - {ErrorBegin}, // OTHER - /*LIST*/ - {ErrorBegin}, // OPENING_BRACE - {ErrorBegin}, // OPENING_BRACKET - {ErrorBegin}, // CLOSING_BRACE - {ErrorBegin}, // CLOSING_BRACKET - {ErrorBegin}, // QUOTE - {ErrorBegin}, // ESCAPE - {ErrorBegin}, // COMMA - {ErrorBegin}, // COLON - {ErrorBegin}, // WHITE_SPACE - nl_tokens({ErrorBegin}), // LINE_BREAK - {ErrorBegin}, // OTHER - /*STRUCT*/ - {}, // OPENING_BRACE - {}, // OPENING_BRACKET - {}, // CLOSING_BRACE - {}, // CLOSING_BRACKET - {FieldNameEnd}, // QUOTE - {}, // ESCAPE - {}, // COMMA - {}, // COLON - {}, // WHITE_SPACE - nl_tokens({}), // LINE_BREAK - {}}}; // OTHER - - pda_tlt[static_cast(pda_state_t::PD_FNE)] = {{ /*ROOT*/ - {ErrorBegin}, // OPENING_BRACE - {ErrorBegin}, // OPENING_BRACKET - {ErrorBegin}, // CLOSING_BRACE - {ErrorBegin}, // CLOSING_BRACKET - {ErrorBegin}, // QUOTE - {ErrorBegin}, // ESCAPE - {ErrorBegin}, // COMMA - {ErrorBegin}, // COLON - {ErrorBegin}, // WHITE_SPACE - nl_tokens({ErrorBegin}), // LINE_BREAK - {ErrorBegin}, // OTHER - /*LIST*/ - {ErrorBegin}, // OPENING_BRACE - {ErrorBegin}, // OPENING_BRACKET - {ErrorBegin}, // CLOSING_BRACE - {ErrorBegin}, // CLOSING_BRACKET - {ErrorBegin}, // QUOTE - {ErrorBegin}, // ESCAPE - {ErrorBegin}, // COMMA - {ErrorBegin}, // COLON - {ErrorBegin}, // WHITE_SPACE - nl_tokens({ErrorBegin}), // LINE_BREAK - {ErrorBegin}, // OTHER - /*STRUCT*/ - {}, // OPENING_BRACE - {}, // OPENING_BRACKET - {}, // CLOSING_BRACE - {}, // CLOSING_BRACKET - {}, // QUOTE - {}, // ESCAPE - {}, // COMMA - {}, // COLON - {}, // WHITE_SPACE - nl_tokens({}), // LINE_BREAK - {}}}; // OTHER - - pda_tlt[static_cast(pda_state_t::PD_PFN)] = {{ /*ROOT*/ - {ErrorBegin}, // OPENING_BRACE - {ErrorBegin}, // OPENING_BRACKET - {ErrorBegin}, // CLOSING_BRACE - {ErrorBegin}, // CLOSING_BRACKET - {ErrorBegin}, // QUOTE - {ErrorBegin}, // ESCAPE - {ErrorBegin}, // COMMA - {ErrorBegin}, // COLON - {ErrorBegin}, // WHITE_SPACE - nl_tokens({ErrorBegin}), // LINE_BREAK - {ErrorBegin}, // OTHER - /*LIST*/ - {ErrorBegin}, // OPENING_BRACE - {ErrorBegin}, // OPENING_BRACKET - {ErrorBegin}, // CLOSING_BRACE - {ErrorBegin}, // CLOSING_BRACKET - {ErrorBegin}, // QUOTE - {ErrorBegin}, // ESCAPE - {ErrorBegin}, // COMMA - {ErrorBegin}, // COLON - {ErrorBegin}, // WHITE_SPACE - nl_tokens({ErrorBegin}), // LINE_BREAK - {ErrorBegin}, // OTHER - /*STRUCT*/ - {ErrorBegin}, // OPENING_BRACE - {ErrorBegin}, // OPENING_BRACKET - {ErrorBegin}, // CLOSING_BRACE - {ErrorBegin}, // CLOSING_BRACKET - {ErrorBegin}, // QUOTE - {ErrorBegin}, // ESCAPE - {ErrorBegin}, // COMMA - {}, // COLON - {}, // WHITE_SPACE - nl_tokens({}), // LINE_BREAK - {ErrorBegin}}}; // OTHER - - pda_tlt[static_cast(pda_state_t::PD_ERR)] = {{ /*ROOT*/ - {}, // OPENING_BRACE - {}, // OPENING_BRACKET - {}, // CLOSING_BRACE - {}, // CLOSING_BRACKET - {}, // QUOTE - {}, // ESCAPE - {}, // COMMA - {}, // COLON - {}, // WHITE_SPACE - nl_tokens({}), // LINE_BREAK - {}, // OTHER + pda_tlt[static_cast(pda_state_t::PD_FLN)] = { + { /*ROOT*/ + {ErrorBegin}, // OPENING_BRACE + {ErrorBegin}, // OPENING_BRACKET + {ErrorBegin}, // CLOSING_BRACE + {ErrorBegin}, // CLOSING_BRACKET + {ErrorBegin}, // QUOTE + {ErrorBegin}, // ESCAPE + {ErrorBegin}, // COMMA + {ErrorBegin}, // COLON + {ErrorBegin}, // WHITE_SPACE + nl_tokens({ErrorBegin}, {ErrorBegin}), // LINE_BREAK + {ErrorBegin}, // OTHER + /*LIST*/ + {ErrorBegin}, // OPENING_BRACE + {ErrorBegin}, // OPENING_BRACKET + {ErrorBegin}, // CLOSING_BRACE + {ErrorBegin}, // CLOSING_BRACKET + {ErrorBegin}, // QUOTE + {ErrorBegin}, // ESCAPE + {ErrorBegin}, // COMMA + {ErrorBegin}, // COLON + {ErrorBegin}, // WHITE_SPACE + nl_tokens({ErrorBegin}, {ErrorBegin}), // LINE_BREAK + {ErrorBegin}, // OTHER + /*STRUCT*/ + {}, // OPENING_BRACE + {}, // OPENING_BRACKET + {}, // CLOSING_BRACE + {}, // CLOSING_BRACKET + {FieldNameEnd}, // QUOTE + {}, // ESCAPE + {}, // COMMA + {}, // COLON + {}, // WHITE_SPACE + nl_tokens({}, {ErrorBegin}), // LINE_BREAK + {}}}; // OTHER + + pda_tlt[static_cast(pda_state_t::PD_FNE)] = { + { /*ROOT*/ + {ErrorBegin}, // OPENING_BRACE + {ErrorBegin}, // OPENING_BRACKET + {ErrorBegin}, // CLOSING_BRACE + {ErrorBegin}, // CLOSING_BRACKET + {ErrorBegin}, // QUOTE + {ErrorBegin}, // ESCAPE + {ErrorBegin}, // COMMA + {ErrorBegin}, // COLON + {ErrorBegin}, // WHITE_SPACE + nl_tokens({ErrorBegin}, {ErrorBegin}), // LINE_BREAK + {ErrorBegin}, // OTHER + /*LIST*/ + {ErrorBegin}, // OPENING_BRACE + {ErrorBegin}, // OPENING_BRACKET + {ErrorBegin}, // CLOSING_BRACE + {ErrorBegin}, // CLOSING_BRACKET + {ErrorBegin}, // QUOTE + {ErrorBegin}, // ESCAPE + {ErrorBegin}, // COMMA + {ErrorBegin}, // COLON + {ErrorBegin}, // WHITE_SPACE + nl_tokens({ErrorBegin}, {ErrorBegin}), // LINE_BREAK + {ErrorBegin}, // OTHER + /*STRUCT*/ + {}, // OPENING_BRACE + {}, // OPENING_BRACKET + {}, // CLOSING_BRACE + {}, // CLOSING_BRACKET + {}, // QUOTE + {}, // ESCAPE + {}, // COMMA + {}, // COLON + {}, // WHITE_SPACE + nl_tokens({}, {ErrorBegin}), // LINE_BREAK + {}}}; // OTHER + + pda_tlt[static_cast(pda_state_t::PD_PFN)] = { + { /*ROOT*/ + {ErrorBegin}, // OPENING_BRACE + {ErrorBegin}, // OPENING_BRACKET + {ErrorBegin}, // CLOSING_BRACE + {ErrorBegin}, // CLOSING_BRACKET + {ErrorBegin}, // QUOTE + {ErrorBegin}, // ESCAPE + {ErrorBegin}, // COMMA + {ErrorBegin}, // COLON + {ErrorBegin}, // WHITE_SPACE + nl_tokens({ErrorBegin}, {ErrorBegin}), // LINE_BREAK + {ErrorBegin}, // OTHER + /*LIST*/ + {ErrorBegin}, // OPENING_BRACE + {ErrorBegin}, // OPENING_BRACKET + {ErrorBegin}, // CLOSING_BRACE + {ErrorBegin}, // CLOSING_BRACKET + {ErrorBegin}, // QUOTE + {ErrorBegin}, // ESCAPE + {ErrorBegin}, // COMMA + {ErrorBegin}, // COLON + {ErrorBegin}, // WHITE_SPACE + nl_tokens({ErrorBegin}, {ErrorBegin}), // LINE_BREAK + {ErrorBegin}, // OTHER + /*STRUCT*/ + {ErrorBegin}, // OPENING_BRACE + {ErrorBegin}, // OPENING_BRACKET + {ErrorBegin}, // CLOSING_BRACE + {ErrorBegin}, // CLOSING_BRACKET + {ErrorBegin}, // QUOTE + {ErrorBegin}, // ESCAPE + {ErrorBegin}, // COMMA + {}, // COLON + {}, // WHITE_SPACE + nl_tokens({}, {ErrorBegin}), // LINE_BREAK + {ErrorBegin}}}; // OTHER + + pda_tlt[static_cast(pda_state_t::PD_ERR)] = {{ /*ROOT*/ + {}, // OPENING_BRACE + {}, // OPENING_BRACKET + {}, // CLOSING_BRACE + {}, // CLOSING_BRACKET + {}, // QUOTE + {}, // ESCAPE + {}, // COMMA + {}, // COLON + {}, // WHITE_SPACE + nl_tokens({}, {}), // LINE_BREAK + {}, // OTHER /*LIST*/ - {}, // OPENING_BRACE - {}, // OPENING_BRACKET - {}, // CLOSING_BRACE - {}, // CLOSING_BRACKET - {}, // QUOTE - {}, // ESCAPE - {}, // COMMA - {}, // COLON - {}, // WHITE_SPACE - nl_tokens({}), // LINE_BREAK - {}, // OTHER + {}, // OPENING_BRACE + {}, // OPENING_BRACKET + {}, // CLOSING_BRACE + {}, // CLOSING_BRACKET + {}, // QUOTE + {}, // ESCAPE + {}, // COMMA + {}, // COLON + {}, // WHITE_SPACE + nl_tokens({}, {}), // LINE_BREAK + {}, // OTHER /*STRUCT*/ - {}, // OPENING_BRACE - {}, // OPENING_BRACKET - {}, // CLOSING_BRACE - {}, // CLOSING_BRACKET - {}, // QUOTE - {}, // ESCAPE - {}, // COMMA - {}, // COLON - {}, // WHITE_SPACE - nl_tokens({}), // LINE_BREAK - {}}}; // OTHER + {}, // OPENING_BRACE + {}, // OPENING_BRACKET + {}, // CLOSING_BRACE + {}, // CLOSING_BRACKET + {}, // QUOTE + {}, // ESCAPE + {}, // COMMA + {}, // COLON + {}, // WHITE_SPACE + nl_tokens({}, {}), // LINE_BREAK + {}}}; // OTHER return pda_tlt; } diff --git a/cpp/tests/io/json_test.cpp b/cpp/tests/io/json_test.cpp index 7c911ac2e04..2ddb0b76544 100644 --- a/cpp/tests/io/json_test.cpp +++ b/cpp/tests/io/json_test.cpp @@ -1962,7 +1962,31 @@ TEST_F(JsonReaderTest, JSONLinesRecovering) "\n" "\n" // 4 -> a: 123 (valid) - R"({"a":123})"; + R"({"a":4})" + "\n" + // 5 -> (invalid) + R"({"a":5)" + "\n" + // 6 -> (invalid) + R"({"a":6 )" + "\n" + // 7 -> (invalid) + R"({"b":[7 )" + "\n" + // 8 -> a: 8 (valid) + R"({"a":8})" + "\n" + // 9 -> (invalid) + R"({"d":{"unterminated_field_name)" + "\n" + // 10 -> (invalid) + R"({"d":{)" + "\n" + // 11 -> (invalid) + R"({"d":{"123",)" + "\n" + // 12 -> a: 12 (valid) + R"({"a":12})"; auto filepath = temp_env->get_temp_dir() + "RecoveringLines.json"; { @@ -1978,17 +2002,22 @@ TEST_F(JsonReaderTest, JSONLinesRecovering) cudf::io::table_with_metadata result = cudf::io::read_json(in_options); EXPECT_EQ(result.tbl->num_columns(), 2); - EXPECT_EQ(result.tbl->num_rows(), 5); + EXPECT_EQ(result.tbl->num_rows(), 13); EXPECT_EQ(result.tbl->get_column(0).type().id(), cudf::type_id::INT64); EXPECT_EQ(result.tbl->get_column(1).type().id(), cudf::type_id::FLOAT64); - std::vector a_validity{true, false, false, false, true}; - std::vector c_validity{false, false, false, true, false}; + std::vector a_validity{ + true, false, false, false, true, false, false, false, true, false, false, false, true}; + std::vector c_validity{ + false, false, false, true, false, false, false, false, false, false, false, false, false}; - CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(0), - int64_wrapper{{-2, 0, 0, 0, 123}, a_validity.cbegin()}); - CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(1), - float64_wrapper{{0.0, 0.0, 0.0, 1.2, 0.0}, c_validity.cbegin()}); + CUDF_TEST_EXPECT_COLUMNS_EQUAL( + result.tbl->get_column(0), + int64_wrapper{{-2, 0, 0, 0, 4, 0, 0, 0, 8, 0, 0, 0, 12}, a_validity.cbegin()}); + CUDF_TEST_EXPECT_COLUMNS_EQUAL( + result.tbl->get_column(1), + float64_wrapper{{0.0, 0.0, 0.0, 1.2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}, + c_validity.cbegin()}); } CUDF_TEST_PROGRAM_MAIN() diff --git a/cpp/tests/io/nested_json_test.cpp b/cpp/tests/io/nested_json_test.cpp index 00d657108b8..3cb7e1f287a 100644 --- a/cpp/tests/io/nested_json_test.cpp +++ b/cpp/tests/io/nested_json_test.cpp @@ -569,23 +569,12 @@ TEST_F(JsonTest, RecoveringTokenStream) // Line 0 (invalid) {0, token_t::StructBegin}, {0, token_t::StructEnd}, - // Line 1 (valid) - {10, token_t::StructBegin}, - {11, token_t::StructMemberBegin}, - {11, token_t::FieldNameBegin}, - {13, token_t::FieldNameEnd}, - // Line 2 (valid) - {16, token_t::StructBegin}, - {17, token_t::StructMemberBegin}, - {17, token_t::FieldNameBegin}, - {19, token_t::FieldNameEnd}, - {21, token_t::StructBegin}, - {22, token_t::StructMemberBegin}, - {22, token_t::FieldNameBegin}, - {24, token_t::FieldNameEnd}, - {26, token_t::ListBegin}, - {27, token_t::ValueBegin}, - {30, token_t::ValueEnd}, + // Line 1 (invalid) + {0, token_t::StructBegin}, + {0, token_t::StructEnd}, + // Line 2 (invalid) + {0, token_t::StructBegin}, + {0, token_t::StructEnd}, // Line 3 (valid) {31, token_t::StructBegin}, {32, token_t::StructMemberBegin}, From 135879368a8fcecda0a1d85bcf18b7e15cd0269d Mon Sep 17 00:00:00 2001 From: Ray Douglass Date: Wed, 11 Oct 2023 10:28:59 -0400 Subject: [PATCH 144/150] Update Changelog [skip ci] --- CHANGELOG.md | 262 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 262 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 76abf241d96..ecd547ab5b3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,265 @@ +# cuDF 23.10.00 (11 Oct 2023) + +## 🚨 Breaking Changes + +- Expose stream parameter in public nvtext ngram APIs ([#14061](https://github.com/rapidsai/cudf/pull/14061)) [@davidwendt](https://github.com/davidwendt) +- Raise `MixedTypeError` when a column of mixed-dtype is being constructed ([#14050](https://github.com/rapidsai/cudf/pull/14050)) [@galipremsagar](https://github.com/galipremsagar) +- Raise `NotImplementedError` for `MultiIndex.to_series` ([#14049](https://github.com/rapidsai/cudf/pull/14049)) [@galipremsagar](https://github.com/galipremsagar) +- Create table_input_metadata from a table_metadata ([#13920](https://github.com/rapidsai/cudf/pull/13920)) [@etseidl](https://github.com/etseidl) +- Enable RLE boolean encoding for v2 Parquet files ([#13886](https://github.com/rapidsai/cudf/pull/13886)) [@etseidl](https://github.com/etseidl) +- Change `NA` to `NaT` for `datetime` and `timedelta` types ([#13868](https://github.com/rapidsai/cudf/pull/13868)) [@galipremsagar](https://github.com/galipremsagar) +- Fix `any`, `all` reduction behavior for `axis=None` and warn for other reductions ([#13831](https://github.com/rapidsai/cudf/pull/13831)) [@galipremsagar](https://github.com/galipremsagar) +- Add minhash support for MurmurHash3_x64_128 ([#13796](https://github.com/rapidsai/cudf/pull/13796)) [@davidwendt](https://github.com/davidwendt) +- Remove the libcudf cudf::offset_type type ([#13788](https://github.com/rapidsai/cudf/pull/13788)) [@davidwendt](https://github.com/davidwendt) +- Raise error when trying to join `datetime` and `timedelta` types with other types ([#13786](https://github.com/rapidsai/cudf/pull/13786)) [@galipremsagar](https://github.com/galipremsagar) +- Update to Cython 3.0.0 ([#13777](https://github.com/rapidsai/cudf/pull/13777)) [@vyasr](https://github.com/vyasr) +- Raise error on constructing an array from mixed type inputs ([#13768](https://github.com/rapidsai/cudf/pull/13768)) [@galipremsagar](https://github.com/galipremsagar) +- Enforce deprecations in `23.10` ([#13732](https://github.com/rapidsai/cudf/pull/13732)) [@galipremsagar](https://github.com/galipremsagar) +- Upgrade to arrow 12 ([#13728](https://github.com/rapidsai/cudf/pull/13728)) [@galipremsagar](https://github.com/galipremsagar) +- Remove Arrow dependency from the `datasource.hpp` public header ([#13698](https://github.com/rapidsai/cudf/pull/13698)) [@vuule](https://github.com/vuule) + +## 🐛 Bug Fixes + +- Fix inaccurate ceil/floor and inaccurate rescaling casts of fixed-point values. ([#14242](https://github.com/rapidsai/cudf/pull/14242)) [@bdice](https://github.com/bdice) +- Fix inaccuracy in decimal128 rounding. ([#14233](https://github.com/rapidsai/cudf/pull/14233)) [@bdice](https://github.com/bdice) +- Workaround for illegal instruction error in sm90 for warp instrinsics with mask ([#14201](https://github.com/rapidsai/cudf/pull/14201)) [@karthikeyann](https://github.com/karthikeyann) +- Fix pytorch related pytest ([#14198](https://github.com/rapidsai/cudf/pull/14198)) [@galipremsagar](https://github.com/galipremsagar) +- Pin to `aws-sdk-cpp<1.11` ([#14173](https://github.com/rapidsai/cudf/pull/14173)) [@pentschev](https://github.com/pentschev) +- Fix assert failure for range window functions ([#14168](https://github.com/rapidsai/cudf/pull/14168)) [@mythrocks](https://github.com/mythrocks) +- Fix Memcheck error found in JSON_TEST JsonReaderTest.ErrorStrings ([#14164](https://github.com/rapidsai/cudf/pull/14164)) [@karthikeyann](https://github.com/karthikeyann) +- Fix calls to copy_bitmask to pass stream parameter ([#14158](https://github.com/rapidsai/cudf/pull/14158)) [@davidwendt](https://github.com/davidwendt) +- Fix DataFrame from Series with different CategoricalIndexes ([#14157](https://github.com/rapidsai/cudf/pull/14157)) [@mroeschke](https://github.com/mroeschke) +- Pin to numpy<1.25 and numba<0.58 to avoid errors and deprecation warnings-as-errors. ([#14156](https://github.com/rapidsai/cudf/pull/14156)) [@bdice](https://github.com/bdice) +- Fix kernel launch error for cudf::io::orc::gpu::rowgroup_char_counts_kernel ([#14139](https://github.com/rapidsai/cudf/pull/14139)) [@davidwendt](https://github.com/davidwendt) +- Don't sort columns for DataFrame init from list of Series ([#14136](https://github.com/rapidsai/cudf/pull/14136)) [@mroeschke](https://github.com/mroeschke) +- Fix DataFrame.values with no columns but index ([#14134](https://github.com/rapidsai/cudf/pull/14134)) [@mroeschke](https://github.com/mroeschke) +- Avoid circular cimports in _lib/cpp/reduce.pxd ([#14125](https://github.com/rapidsai/cudf/pull/14125)) [@vyasr](https://github.com/vyasr) +- Add support for nested dict in `DataFrame` constructor ([#14119](https://github.com/rapidsai/cudf/pull/14119)) [@galipremsagar](https://github.com/galipremsagar) +- Restrict iterables of `DataFrame`'s as input to `DataFrame` constructor ([#14118](https://github.com/rapidsai/cudf/pull/14118)) [@galipremsagar](https://github.com/galipremsagar) +- Allow `numeric_only=True` for reduction operations on numeric types ([#14111](https://github.com/rapidsai/cudf/pull/14111)) [@galipremsagar](https://github.com/galipremsagar) +- Preserve name of the column while initializing a `DataFrame` ([#14110](https://github.com/rapidsai/cudf/pull/14110)) [@galipremsagar](https://github.com/galipremsagar) +- Correct numerous 20054-D: dynamic initialization errors found on arm+12.2 ([#14108](https://github.com/rapidsai/cudf/pull/14108)) [@robertmaynard](https://github.com/robertmaynard) +- Drop `kwargs` from `Series.count` ([#14106](https://github.com/rapidsai/cudf/pull/14106)) [@galipremsagar](https://github.com/galipremsagar) +- Fix naming issues with `Index.to_frame` and `MultiIndex.to_frame` APIs ([#14105](https://github.com/rapidsai/cudf/pull/14105)) [@galipremsagar](https://github.com/galipremsagar) +- Only use memory resources that haven't been freed ([#14103](https://github.com/rapidsai/cudf/pull/14103)) [@robertmaynard](https://github.com/robertmaynard) +- Add support for `__round__` in `Series` and `DataFrame` ([#14099](https://github.com/rapidsai/cudf/pull/14099)) [@galipremsagar](https://github.com/galipremsagar) +- Validate ignore_index type in drop_duplicates ([#14098](https://github.com/rapidsai/cudf/pull/14098)) [@mroeschke](https://github.com/mroeschke) +- Fix renaming `Series` and `Index` ([#14080](https://github.com/rapidsai/cudf/pull/14080)) [@galipremsagar](https://github.com/galipremsagar) +- Raise NotImplementedError in to_datetime if Z (or tz component) in string ([#14074](https://github.com/rapidsai/cudf/pull/14074)) [@mroeschke](https://github.com/mroeschke) +- Raise NotImplementedError for datetime strings with UTC offset ([#14070](https://github.com/rapidsai/cudf/pull/14070)) [@mroeschke](https://github.com/mroeschke) +- Update pyarrow-related dispatch logic in dask_cudf ([#14069](https://github.com/rapidsai/cudf/pull/14069)) [@rjzamora](https://github.com/rjzamora) +- Use `conda mambabuild` rather than `mamba mambabuild` ([#14067](https://github.com/rapidsai/cudf/pull/14067)) [@wence-](https://github.com/wence-) +- Raise NotImplementedError in to_datetime with dayfirst without infer_format ([#14058](https://github.com/rapidsai/cudf/pull/14058)) [@mroeschke](https://github.com/mroeschke) +- Fix various issues in `Index.intersection` ([#14054](https://github.com/rapidsai/cudf/pull/14054)) [@galipremsagar](https://github.com/galipremsagar) +- Fix `Index.difference` to match with pandas ([#14053](https://github.com/rapidsai/cudf/pull/14053)) [@galipremsagar](https://github.com/galipremsagar) +- Fix empty string column construction ([#14052](https://github.com/rapidsai/cudf/pull/14052)) [@galipremsagar](https://github.com/galipremsagar) +- Fix `IntervalIndex.union` to preserve type-metadata ([#14051](https://github.com/rapidsai/cudf/pull/14051)) [@galipremsagar](https://github.com/galipremsagar) +- Raise `MixedTypeError` when a column of mixed-dtype is being constructed ([#14050](https://github.com/rapidsai/cudf/pull/14050)) [@galipremsagar](https://github.com/galipremsagar) +- Raise `NotImplementedError` for `MultiIndex.to_series` ([#14049](https://github.com/rapidsai/cudf/pull/14049)) [@galipremsagar](https://github.com/galipremsagar) +- Ignore compile_commands.json ([#14048](https://github.com/rapidsai/cudf/pull/14048)) [@harrism](https://github.com/harrism) +- Raise TypeError for any non-parseable argument in to_datetime ([#14044](https://github.com/rapidsai/cudf/pull/14044)) [@mroeschke](https://github.com/mroeschke) +- Raise NotImplementedError for to_datetime with z format ([#14037](https://github.com/rapidsai/cudf/pull/14037)) [@mroeschke](https://github.com/mroeschke) +- Implement `sort_remaining` for `sort_index` ([#14033](https://github.com/rapidsai/cudf/pull/14033)) [@wence-](https://github.com/wence-) +- Raise NotImplementedError for Categoricals with timezones ([#14032](https://github.com/rapidsai/cudf/pull/14032)) [@mroeschke](https://github.com/mroeschke) +- Temporary fix Parquet metadata with empty value string being ignored from writing ([#14026](https://github.com/rapidsai/cudf/pull/14026)) [@ttnghia](https://github.com/ttnghia) +- Preserve types of scalar being returned when possible in `quantile` ([#14014](https://github.com/rapidsai/cudf/pull/14014)) [@galipremsagar](https://github.com/galipremsagar) +- Fix return type of `MultiIndex.difference` ([#14009](https://github.com/rapidsai/cudf/pull/14009)) [@galipremsagar](https://github.com/galipremsagar) +- Raise an error when timezone subtypes are encountered in `pd.IntervalDtype` ([#14006](https://github.com/rapidsai/cudf/pull/14006)) [@galipremsagar](https://github.com/galipremsagar) +- Fix map column can not be non-nullable for java ([#14003](https://github.com/rapidsai/cudf/pull/14003)) [@res-life](https://github.com/res-life) +- Fix `name` selection in `Index.difference` and `Index.intersection` ([#13986](https://github.com/rapidsai/cudf/pull/13986)) [@galipremsagar](https://github.com/galipremsagar) +- Restore column type metadata with `dropna` to fix `factorize` API ([#13980](https://github.com/rapidsai/cudf/pull/13980)) [@galipremsagar](https://github.com/galipremsagar) +- Use thread_index_type to avoid out of bounds accesses in conditional joins ([#13971](https://github.com/rapidsai/cudf/pull/13971)) [@vyasr](https://github.com/vyasr) +- Fix `MultiIndex.to_numpy` to return numpy array with tuples ([#13966](https://github.com/rapidsai/cudf/pull/13966)) [@galipremsagar](https://github.com/galipremsagar) +- Use cudf::thread_index_type in get_json_object and tdigest kernels ([#13962](https://github.com/rapidsai/cudf/pull/13962)) [@nvdbaranec](https://github.com/nvdbaranec) +- Fix an issue with `IntervalIndex.repr` when null values are present ([#13958](https://github.com/rapidsai/cudf/pull/13958)) [@galipremsagar](https://github.com/galipremsagar) +- Fix type metadata issue preservation with `Column.unique` ([#13957](https://github.com/rapidsai/cudf/pull/13957)) [@galipremsagar](https://github.com/galipremsagar) +- Handle `Interval` scalars when passed in list-like inputs to `cudf.Index` ([#13956](https://github.com/rapidsai/cudf/pull/13956)) [@galipremsagar](https://github.com/galipremsagar) +- Fix setting of categories order when `dtype` is passed to a `CategoricalColumn` ([#13955](https://github.com/rapidsai/cudf/pull/13955)) [@galipremsagar](https://github.com/galipremsagar) +- Handle `as_index` in `GroupBy.apply` ([#13951](https://github.com/rapidsai/cudf/pull/13951)) [@brandon-b-miller](https://github.com/brandon-b-miller) +- Raise error for string types in `nsmallest` and `nlargest` ([#13946](https://github.com/rapidsai/cudf/pull/13946)) [@galipremsagar](https://github.com/galipremsagar) +- Fix `index` of `Groupby.apply` results when it is performed on empty objects ([#13944](https://github.com/rapidsai/cudf/pull/13944)) [@galipremsagar](https://github.com/galipremsagar) +- Fix integer overflow in shim `device_sum` functions ([#13943](https://github.com/rapidsai/cudf/pull/13943)) [@brandon-b-miller](https://github.com/brandon-b-miller) +- Fix type mismatch in groupby reduction for empty objects ([#13942](https://github.com/rapidsai/cudf/pull/13942)) [@galipremsagar](https://github.com/galipremsagar) +- Fixed processed bytes calculation in APPLY_BOOLEAN_MASK benchmark. ([#13937](https://github.com/rapidsai/cudf/pull/13937)) [@Blonck](https://github.com/Blonck) +- Fix construction of `Grouping` objects ([#13932](https://github.com/rapidsai/cudf/pull/13932)) [@galipremsagar](https://github.com/galipremsagar) +- Fix an issue with `loc` when column names is `MultiIndex` ([#13929](https://github.com/rapidsai/cudf/pull/13929)) [@galipremsagar](https://github.com/galipremsagar) +- Fix handling of typecasting in `searchsorted` ([#13925](https://github.com/rapidsai/cudf/pull/13925)) [@galipremsagar](https://github.com/galipremsagar) +- Preserve index `name` in `reindex` ([#13917](https://github.com/rapidsai/cudf/pull/13917)) [@galipremsagar](https://github.com/galipremsagar) +- Use `cudf::thread_index_type` in cuIO to prevent overflow in row indexing ([#13910](https://github.com/rapidsai/cudf/pull/13910)) [@vuule](https://github.com/vuule) +- Fix for encodings listed in the Parquet column chunk metadata ([#13907](https://github.com/rapidsai/cudf/pull/13907)) [@etseidl](https://github.com/etseidl) +- Use cudf::thread_index_type in concatenate.cu. ([#13906](https://github.com/rapidsai/cudf/pull/13906)) [@bdice](https://github.com/bdice) +- Use cudf::thread_index_type in replace.cu. ([#13905](https://github.com/rapidsai/cudf/pull/13905)) [@bdice](https://github.com/bdice) +- Add noSanitizer tag to Java reduction tests failing with sanitizer in CUDA 12 ([#13904](https://github.com/rapidsai/cudf/pull/13904)) [@jlowe](https://github.com/jlowe) +- Remove the internal use of the cudf's default stream in cuIO ([#13903](https://github.com/rapidsai/cudf/pull/13903)) [@vuule](https://github.com/vuule) +- Use cuda-nvtx-dev CUDA 12 package. ([#13901](https://github.com/rapidsai/cudf/pull/13901)) [@bdice](https://github.com/bdice) +- Use `thread_index_type` to avoid index overflow in grid-stride loops ([#13895](https://github.com/rapidsai/cudf/pull/13895)) [@PointKernel](https://github.com/PointKernel) +- Fix memory access error in cudf::shift for sliced strings ([#13894](https://github.com/rapidsai/cudf/pull/13894)) [@davidwendt](https://github.com/davidwendt) +- Raise error when trying to construct a `DataFrame` with mixed types ([#13889](https://github.com/rapidsai/cudf/pull/13889)) [@galipremsagar](https://github.com/galipremsagar) +- Return `nan` when one variable to be correlated has zero variance in JIT GroupBy Apply ([#13884](https://github.com/rapidsai/cudf/pull/13884)) [@brandon-b-miller](https://github.com/brandon-b-miller) +- Correctly detect the BOM mark in `read_csv` with compressed input ([#13881](https://github.com/rapidsai/cudf/pull/13881)) [@vuule](https://github.com/vuule) +- Check for the presence of all values in `MultiIndex.isin` ([#13879](https://github.com/rapidsai/cudf/pull/13879)) [@galipremsagar](https://github.com/galipremsagar) +- Fix nvtext::generate_character_ngrams performance regression for longer strings ([#13874](https://github.com/rapidsai/cudf/pull/13874)) [@davidwendt](https://github.com/davidwendt) +- Fix return type of `MultiIndex.levels` ([#13870](https://github.com/rapidsai/cudf/pull/13870)) [@galipremsagar](https://github.com/galipremsagar) +- Fix List's missing children metadata in JSON writer ([#13869](https://github.com/rapidsai/cudf/pull/13869)) [@karthikeyann](https://github.com/karthikeyann) +- Disable construction of Index when `freq` is set in pandas-compatibility mode ([#13857](https://github.com/rapidsai/cudf/pull/13857)) [@galipremsagar](https://github.com/galipremsagar) +- Fix an issue with fetching `NA` from a `TimedeltaColumn` ([#13853](https://github.com/rapidsai/cudf/pull/13853)) [@galipremsagar](https://github.com/galipremsagar) +- Simplify implementation of interval_range() and fix behaviour for floating `freq` ([#13844](https://github.com/rapidsai/cudf/pull/13844)) [@shwina](https://github.com/shwina) +- Fix binary operations between `Series` and `Index` ([#13842](https://github.com/rapidsai/cudf/pull/13842)) [@galipremsagar](https://github.com/galipremsagar) +- Update make_lists_column_from_scalar to use make_offsets_child_column utility ([#13841](https://github.com/rapidsai/cudf/pull/13841)) [@davidwendt](https://github.com/davidwendt) +- Fix read out of bounds in string concatenate ([#13838](https://github.com/rapidsai/cudf/pull/13838)) [@pentschev](https://github.com/pentschev) +- Raise error for more cases when `timezone-aware` data is passed to `as_column` ([#13835](https://github.com/rapidsai/cudf/pull/13835)) [@galipremsagar](https://github.com/galipremsagar) +- Fix `any`, `all` reduction behavior for `axis=None` and warn for other reductions ([#13831](https://github.com/rapidsai/cudf/pull/13831)) [@galipremsagar](https://github.com/galipremsagar) +- Raise error when trying to construct time-zone aware timestamps ([#13830](https://github.com/rapidsai/cudf/pull/13830)) [@galipremsagar](https://github.com/galipremsagar) +- Fix cuFile I/O factories ([#13829](https://github.com/rapidsai/cudf/pull/13829)) [@vuule](https://github.com/vuule) +- DataFrame with namedtuples uses ._field as column names ([#13824](https://github.com/rapidsai/cudf/pull/13824)) [@mroeschke](https://github.com/mroeschke) +- Branch 23.10 merge 23.08 ([#13822](https://github.com/rapidsai/cudf/pull/13822)) [@vyasr](https://github.com/vyasr) +- Return a Series from JIT GroupBy apply, rather than a DataFrame ([#13820](https://github.com/rapidsai/cudf/pull/13820)) [@brandon-b-miller](https://github.com/brandon-b-miller) +- No need to dlsym EnsureS3Finalized we can call it directly ([#13819](https://github.com/rapidsai/cudf/pull/13819)) [@robertmaynard](https://github.com/robertmaynard) +- Raise error when mixed types are being constructed ([#13816](https://github.com/rapidsai/cudf/pull/13816)) [@galipremsagar](https://github.com/galipremsagar) +- Fix unbounded sequence issue in `DataFrame` constructor ([#13811](https://github.com/rapidsai/cudf/pull/13811)) [@galipremsagar](https://github.com/galipremsagar) +- Fix Byte-Pair-Encoding usage of cuco static-map for storing merge-pairs ([#13807](https://github.com/rapidsai/cudf/pull/13807)) [@davidwendt](https://github.com/davidwendt) +- Fix for Parquet writer when requested pages per row is smaller than fragment size ([#13806](https://github.com/rapidsai/cudf/pull/13806)) [@etseidl](https://github.com/etseidl) +- Remove hangs from trying to construct un-bounded sequences ([#13799](https://github.com/rapidsai/cudf/pull/13799)) [@galipremsagar](https://github.com/galipremsagar) +- Bug/update libcudf to handle arrow12 changes ([#13794](https://github.com/rapidsai/cudf/pull/13794)) [@robertmaynard](https://github.com/robertmaynard) +- Update get_arrow to arrows 12 CMake target name of arrow::xsimd ([#13790](https://github.com/rapidsai/cudf/pull/13790)) [@robertmaynard](https://github.com/robertmaynard) +- Raise error when trying to join `datetime` and `timedelta` types with other types ([#13786](https://github.com/rapidsai/cudf/pull/13786)) [@galipremsagar](https://github.com/galipremsagar) +- Fix negative unary operation for boolean type ([#13780](https://github.com/rapidsai/cudf/pull/13780)) [@galipremsagar](https://github.com/galipremsagar) +- Fix contains(`in`) method for `Series` ([#13779](https://github.com/rapidsai/cudf/pull/13779)) [@galipremsagar](https://github.com/galipremsagar) +- Fix binary operation column ordering and missing column issues ([#13778](https://github.com/rapidsai/cudf/pull/13778)) [@galipremsagar](https://github.com/galipremsagar) +- Cast only time of day to nanos to avoid an overflow in Parquet INT96 write ([#13776](https://github.com/rapidsai/cudf/pull/13776)) [@gerashegalov](https://github.com/gerashegalov) +- Preserve names of column object in various APIs ([#13772](https://github.com/rapidsai/cudf/pull/13772)) [@galipremsagar](https://github.com/galipremsagar) +- Raise error on constructing an array from mixed type inputs ([#13768](https://github.com/rapidsai/cudf/pull/13768)) [@galipremsagar](https://github.com/galipremsagar) +- Fix construction of DataFrames from dict when columns are provided ([#13766](https://github.com/rapidsai/cudf/pull/13766)) [@wence-](https://github.com/wence-) +- Provide our own Cython declaration for make_unique ([#13746](https://github.com/rapidsai/cudf/pull/13746)) [@wence-](https://github.com/wence-) + +## 📖 Documentation + +- Fix typo in docstring: metadata. ([#14025](https://github.com/rapidsai/cudf/pull/14025)) [@bdice](https://github.com/bdice) +- Fix typo in parquet/page_decode.cuh ([#13849](https://github.com/rapidsai/cudf/pull/13849)) [@XinyuZeng](https://github.com/XinyuZeng) +- Simplify Python doc configuration ([#13826](https://github.com/rapidsai/cudf/pull/13826)) [@vyasr](https://github.com/vyasr) +- Update documentation to reflect recent changes in JSON reader and writer ([#13791](https://github.com/rapidsai/cudf/pull/13791)) [@vuule](https://github.com/vuule) +- Fix all warnings in Python docs ([#13789](https://github.com/rapidsai/cudf/pull/13789)) [@vyasr](https://github.com/vyasr) + +## 🚀 New Features + +- [Java] Add JNI bindings for `integers_to_hex` ([#14205](https://github.com/rapidsai/cudf/pull/14205)) [@razajafri](https://github.com/razajafri) +- Propagate errors from Parquet reader kernels back to host ([#14167](https://github.com/rapidsai/cudf/pull/14167)) [@vuule](https://github.com/vuule) +- JNI for `HISTOGRAM` and `MERGE_HISTOGRAM` aggregations ([#14154](https://github.com/rapidsai/cudf/pull/14154)) [@ttnghia](https://github.com/ttnghia) +- Expose streams in all public sorting APIs ([#14146](https://github.com/rapidsai/cudf/pull/14146)) [@vyasr](https://github.com/vyasr) +- Enable direct ingestion and production of Arrow scalars ([#14121](https://github.com/rapidsai/cudf/pull/14121)) [@vyasr](https://github.com/vyasr) +- Implement `GroupBy.value_counts` to match pandas API ([#14114](https://github.com/rapidsai/cudf/pull/14114)) [@stmio](https://github.com/stmio) +- Refactor parquet thrift reader ([#14097](https://github.com/rapidsai/cudf/pull/14097)) [@etseidl](https://github.com/etseidl) +- Refactor `hash_reduce_by_row` ([#14095](https://github.com/rapidsai/cudf/pull/14095)) [@ttnghia](https://github.com/ttnghia) +- Support negative preceding/following for ROW window functions ([#14093](https://github.com/rapidsai/cudf/pull/14093)) [@mythrocks](https://github.com/mythrocks) +- Support for progressive parquet chunked reading. ([#14079](https://github.com/rapidsai/cudf/pull/14079)) [@nvdbaranec](https://github.com/nvdbaranec) +- Implement `HISTOGRAM` and `MERGE_HISTOGRAM` aggregations ([#14045](https://github.com/rapidsai/cudf/pull/14045)) [@ttnghia](https://github.com/ttnghia) +- Expose streams in public search APIs ([#14034](https://github.com/rapidsai/cudf/pull/14034)) [@vyasr](https://github.com/vyasr) +- Expose streams in public replace APIs ([#14010](https://github.com/rapidsai/cudf/pull/14010)) [@vyasr](https://github.com/vyasr) +- Add stream parameter to public cudf::strings::split APIs ([#13997](https://github.com/rapidsai/cudf/pull/13997)) [@davidwendt](https://github.com/davidwendt) +- Expose streams in public filling APIs ([#13990](https://github.com/rapidsai/cudf/pull/13990)) [@vyasr](https://github.com/vyasr) +- Expose streams in public concatenate APIs ([#13987](https://github.com/rapidsai/cudf/pull/13987)) [@vyasr](https://github.com/vyasr) +- Use HostMemoryAllocator in jni::allocate_host_buffer ([#13975](https://github.com/rapidsai/cudf/pull/13975)) [@gerashegalov](https://github.com/gerashegalov) +- Enable fractional null probability for hashing benchmark ([#13967](https://github.com/rapidsai/cudf/pull/13967)) [@Blonck](https://github.com/Blonck) +- Switch pylibcudf-enabled types to use enum class in Cython ([#13931](https://github.com/rapidsai/cudf/pull/13931)) [@vyasr](https://github.com/vyasr) +- Add nvtext::tokenize_with_vocabulary API ([#13930](https://github.com/rapidsai/cudf/pull/13930)) [@davidwendt](https://github.com/davidwendt) +- Rewrite `DataFrame.stack` to support multi level column names ([#13927](https://github.com/rapidsai/cudf/pull/13927)) [@isVoid](https://github.com/isVoid) +- Add HostMemoryAllocator interface ([#13924](https://github.com/rapidsai/cudf/pull/13924)) [@gerashegalov](https://github.com/gerashegalov) +- Global stream pool ([#13922](https://github.com/rapidsai/cudf/pull/13922)) [@etseidl](https://github.com/etseidl) +- Create table_input_metadata from a table_metadata ([#13920](https://github.com/rapidsai/cudf/pull/13920)) [@etseidl](https://github.com/etseidl) +- Translate column size overflow exception to JNI ([#13911](https://github.com/rapidsai/cudf/pull/13911)) [@mythrocks](https://github.com/mythrocks) +- Enable RLE boolean encoding for v2 Parquet files ([#13886](https://github.com/rapidsai/cudf/pull/13886)) [@etseidl](https://github.com/etseidl) +- Exclude some tests from running with the compute sanitizer ([#13872](https://github.com/rapidsai/cudf/pull/13872)) [@firestarman](https://github.com/firestarman) +- Expand statistics support in ORC writer ([#13848](https://github.com/rapidsai/cudf/pull/13848)) [@vuule](https://github.com/vuule) +- Register the memory mapped buffer in `datasource` to improve H2D throughput ([#13814](https://github.com/rapidsai/cudf/pull/13814)) [@vuule](https://github.com/vuule) +- Add cudf::strings::find function with target per row ([#13808](https://github.com/rapidsai/cudf/pull/13808)) [@davidwendt](https://github.com/davidwendt) +- Add minhash support for MurmurHash3_x64_128 ([#13796](https://github.com/rapidsai/cudf/pull/13796)) [@davidwendt](https://github.com/davidwendt) +- Remove unnecessary pointer copying in JIT GroupBy Apply ([#13792](https://github.com/rapidsai/cudf/pull/13792)) [@brandon-b-miller](https://github.com/brandon-b-miller) +- Add 'poll' function to custreamz kafka consumer ([#13782](https://github.com/rapidsai/cudf/pull/13782)) [@jdye64](https://github.com/jdye64) +- Support `corr` in `GroupBy.apply` through the jit engine ([#13767](https://github.com/rapidsai/cudf/pull/13767)) [@shwina](https://github.com/shwina) +- Optionally write version 2 page headers in Parquet writer ([#13751](https://github.com/rapidsai/cudf/pull/13751)) [@etseidl](https://github.com/etseidl) +- Support more numeric types in `Groupby.apply` with `engine='jit'` ([#13729](https://github.com/rapidsai/cudf/pull/13729)) [@brandon-b-miller](https://github.com/brandon-b-miller) +- [FEA] Add DELTA_BINARY_PACKED decoding support to Parquet reader ([#13637](https://github.com/rapidsai/cudf/pull/13637)) [@etseidl](https://github.com/etseidl) +- Read FIXED_LEN_BYTE_ARRAY as binary in parquet reader ([#13437](https://github.com/rapidsai/cudf/pull/13437)) [@PointKernel](https://github.com/PointKernel) + +## 🛠️ Improvements + +- Pin `dask` and `distributed` for `23.10` release ([#14225](https://github.com/rapidsai/cudf/pull/14225)) [@galipremsagar](https://github.com/galipremsagar) +- update rmm tag path ([#14195](https://github.com/rapidsai/cudf/pull/14195)) [@AyodeAwe](https://github.com/AyodeAwe) +- Disable `Recently Updated` Check ([#14193](https://github.com/rapidsai/cudf/pull/14193)) [@ajschmidt8](https://github.com/ajschmidt8) +- Move cpp/src/hash/hash_allocator.cuh to include/cudf/hashing/detail ([#14163](https://github.com/rapidsai/cudf/pull/14163)) [@davidwendt](https://github.com/davidwendt) +- Add Parquet reader benchmarks for row selection ([#14147](https://github.com/rapidsai/cudf/pull/14147)) [@vuule](https://github.com/vuule) +- Update image names ([#14145](https://github.com/rapidsai/cudf/pull/14145)) [@AyodeAwe](https://github.com/AyodeAwe) +- Support callables in DataFrame.assign ([#14142](https://github.com/rapidsai/cudf/pull/14142)) [@wence-](https://github.com/wence-) +- Reduce memory usage of as_categorical_column ([#14138](https://github.com/rapidsai/cudf/pull/14138)) [@wence-](https://github.com/wence-) +- Replace Python scalar conversions with libcudf ([#14124](https://github.com/rapidsai/cudf/pull/14124)) [@vyasr](https://github.com/vyasr) +- Update to clang 16.0.6. ([#14120](https://github.com/rapidsai/cudf/pull/14120)) [@bdice](https://github.com/bdice) +- Fix type of empty `Index` and raise warning in `Series` constructor ([#14116](https://github.com/rapidsai/cudf/pull/14116)) [@galipremsagar](https://github.com/galipremsagar) +- Add stream parameter to external dict APIs ([#14115](https://github.com/rapidsai/cudf/pull/14115)) [@SurajAralihalli](https://github.com/SurajAralihalli) +- Add fallback matrix for nvcomp. ([#14082](https://github.com/rapidsai/cudf/pull/14082)) [@bdice](https://github.com/bdice) +- [Java] Add recoverWithNull to JSONOptions and pass to Table.readJSON ([#14078](https://github.com/rapidsai/cudf/pull/14078)) [@andygrove](https://github.com/andygrove) +- Remove header tests ([#14072](https://github.com/rapidsai/cudf/pull/14072)) [@ajschmidt8](https://github.com/ajschmidt8) +- Refactor `contains_table` with cuco::static_set ([#14064](https://github.com/rapidsai/cudf/pull/14064)) [@PointKernel](https://github.com/PointKernel) +- Remove debug print in a Parquet test ([#14063](https://github.com/rapidsai/cudf/pull/14063)) [@vuule](https://github.com/vuule) +- Expose stream parameter in public nvtext ngram APIs ([#14061](https://github.com/rapidsai/cudf/pull/14061)) [@davidwendt](https://github.com/davidwendt) +- Expose stream parameter in public strings find APIs ([#14060](https://github.com/rapidsai/cudf/pull/14060)) [@davidwendt](https://github.com/davidwendt) +- Update doxygen to 1.9.1 ([#14059](https://github.com/rapidsai/cudf/pull/14059)) [@vyasr](https://github.com/vyasr) +- Remove the mr from the base fixture ([#14057](https://github.com/rapidsai/cudf/pull/14057)) [@vyasr](https://github.com/vyasr) +- Expose streams in public strings case APIs ([#14056](https://github.com/rapidsai/cudf/pull/14056)) [@davidwendt](https://github.com/davidwendt) +- Refactor libcudf indexalator to typed normalator ([#14043](https://github.com/rapidsai/cudf/pull/14043)) [@davidwendt](https://github.com/davidwendt) +- Use cudf::make_empty_column instead of column_view constructor ([#14030](https://github.com/rapidsai/cudf/pull/14030)) [@davidwendt](https://github.com/davidwendt) +- Remove quadratic runtime due to accessing Frame._dtypes in loop ([#14028](https://github.com/rapidsai/cudf/pull/14028)) [@wence-](https://github.com/wence-) +- Explicitly depend on zlib in conda recipes ([#14018](https://github.com/rapidsai/cudf/pull/14018)) [@wence-](https://github.com/wence-) +- Use grid_stride for stride computations. ([#13996](https://github.com/rapidsai/cudf/pull/13996)) [@bdice](https://github.com/bdice) +- Fix an issue where casting null-array to `object` dtype will result in a failure ([#13994](https://github.com/rapidsai/cudf/pull/13994)) [@galipremsagar](https://github.com/galipremsagar) +- Add tab as literal to cudf::test::to_string output ([#13993](https://github.com/rapidsai/cudf/pull/13993)) [@davidwendt](https://github.com/davidwendt) +- Enable `codes` dtype parity in pandas-compatibility mode for `factorize` API ([#13982](https://github.com/rapidsai/cudf/pull/13982)) [@galipremsagar](https://github.com/galipremsagar) +- Fix `CategoricalIndex` ordering in `Groupby.agg` when pandas-compatibility mode is enabled ([#13978](https://github.com/rapidsai/cudf/pull/13978)) [@galipremsagar](https://github.com/galipremsagar) +- Produce a fatal error if cudf is unable to find pyarrow include directory ([#13976](https://github.com/rapidsai/cudf/pull/13976)) [@cwharris](https://github.com/cwharris) +- Use `thread_index_type` in `partitioning.cu` ([#13973](https://github.com/rapidsai/cudf/pull/13973)) [@divyegala](https://github.com/divyegala) +- Use `cudf::thread_index_type` in `merge.cu` ([#13972](https://github.com/rapidsai/cudf/pull/13972)) [@divyegala](https://github.com/divyegala) +- Use `copy-pr-bot` ([#13970](https://github.com/rapidsai/cudf/pull/13970)) [@ajschmidt8](https://github.com/ajschmidt8) +- Use cudf::thread_index_type in strings custom kernels ([#13968](https://github.com/rapidsai/cudf/pull/13968)) [@davidwendt](https://github.com/davidwendt) +- Add `bytes_per_second` to hash_partition benchmark ([#13965](https://github.com/rapidsai/cudf/pull/13965)) [@Blonck](https://github.com/Blonck) +- Added pinned pool reservation API for java ([#13964](https://github.com/rapidsai/cudf/pull/13964)) [@revans2](https://github.com/revans2) +- Simplify wheel build scripts and allow alphas of RAPIDS dependencies ([#13963](https://github.com/rapidsai/cudf/pull/13963)) [@vyasr](https://github.com/vyasr) +- Add `bytes_per_second` to copy_if_else benchmark ([#13960](https://github.com/rapidsai/cudf/pull/13960)) [@Blonck](https://github.com/Blonck) +- Add pandas compatible output to `Series.unique` ([#13959](https://github.com/rapidsai/cudf/pull/13959)) [@galipremsagar](https://github.com/galipremsagar) +- Add `bytes_per_second` to compiled binaryop benchmark ([#13938](https://github.com/rapidsai/cudf/pull/13938)) [@Blonck](https://github.com/Blonck) +- Unpin `dask` and `distributed` for `23.10` development ([#13935](https://github.com/rapidsai/cudf/pull/13935)) [@galipremsagar](https://github.com/galipremsagar) +- Make HostColumnVector.getRefCount public ([#13934](https://github.com/rapidsai/cudf/pull/13934)) [@abellina](https://github.com/abellina) +- Use cuco::static_set in JSON tree algorithm ([#13928](https://github.com/rapidsai/cudf/pull/13928)) [@karthikeyann](https://github.com/karthikeyann) +- Add java API to get size of host memory needed to copy column view ([#13919](https://github.com/rapidsai/cudf/pull/13919)) [@revans2](https://github.com/revans2) +- Use cudf::size_type instead of int32 where appropriate in nvtext functions ([#13915](https://github.com/rapidsai/cudf/pull/13915)) [@davidwendt](https://github.com/davidwendt) +- Enable hugepage for arrow host allocations ([#13914](https://github.com/rapidsai/cudf/pull/13914)) [@madsbk](https://github.com/madsbk) +- Improve performance of nvtext::edit_distance ([#13912](https://github.com/rapidsai/cudf/pull/13912)) [@davidwendt](https://github.com/davidwendt) +- Ensure cudf internals use pylibcudf in pure Python mode ([#13909](https://github.com/rapidsai/cudf/pull/13909)) [@vyasr](https://github.com/vyasr) +- Use `empty()` instead of `size()` where possible ([#13908](https://github.com/rapidsai/cudf/pull/13908)) [@vuule](https://github.com/vuule) +- [JNI] Adds HostColumnVector.EventHandler for spillability checks ([#13898](https://github.com/rapidsai/cudf/pull/13898)) [@abellina](https://github.com/abellina) +- Return `Timestamp` & `Timedelta` for fetching scalars in `DatetimeIndex` & `TimedeltaIndex` ([#13896](https://github.com/rapidsai/cudf/pull/13896)) [@galipremsagar](https://github.com/galipremsagar) +- Allow explicit `shuffle="p2p"` within dask-cudf API ([#13893](https://github.com/rapidsai/cudf/pull/13893)) [@rjzamora](https://github.com/rjzamora) +- Disable creation of `DatetimeIndex` when `freq` is passed to `cudf.date_range` ([#13890](https://github.com/rapidsai/cudf/pull/13890)) [@galipremsagar](https://github.com/galipremsagar) +- Bring parity with pandas for `datetime` & `timedelta` comparison operations ([#13877](https://github.com/rapidsai/cudf/pull/13877)) [@galipremsagar](https://github.com/galipremsagar) +- Change `NA` to `NaT` for `datetime` and `timedelta` types ([#13868](https://github.com/rapidsai/cudf/pull/13868)) [@galipremsagar](https://github.com/galipremsagar) +- Raise error when `astype(object)` is called in pandas compatibility mode ([#13862](https://github.com/rapidsai/cudf/pull/13862)) [@galipremsagar](https://github.com/galipremsagar) +- Fixes a performance regression in FST ([#13850](https://github.com/rapidsai/cudf/pull/13850)) [@elstehle](https://github.com/elstehle) +- Set native handles to null on close in Java wrapper classes ([#13818](https://github.com/rapidsai/cudf/pull/13818)) [@jlowe](https://github.com/jlowe) +- Avoid use of CUDF_EXPECTS in libcudf unit tests outside of helper functions with return values ([#13812](https://github.com/rapidsai/cudf/pull/13812)) [@vuule](https://github.com/vuule) +- Update `lists::contains` to experimental row comparator ([#13810](https://github.com/rapidsai/cudf/pull/13810)) [@divyegala](https://github.com/divyegala) +- Reduce `lists::contains` dispatches for scalars ([#13805](https://github.com/rapidsai/cudf/pull/13805)) [@divyegala](https://github.com/divyegala) +- Long string optimization for string column parsing in JSON reader ([#13803](https://github.com/rapidsai/cudf/pull/13803)) [@karthikeyann](https://github.com/karthikeyann) +- Raise NotImplementedError for pd.SparseDtype ([#13798](https://github.com/rapidsai/cudf/pull/13798)) [@mroeschke](https://github.com/mroeschke) +- Remove the libcudf cudf::offset_type type ([#13788](https://github.com/rapidsai/cudf/pull/13788)) [@davidwendt](https://github.com/davidwendt) +- Move Spark-indpendent Table debug to cudf Java ([#13783](https://github.com/rapidsai/cudf/pull/13783)) [@gerashegalov](https://github.com/gerashegalov) +- Update to Cython 3.0.0 ([#13777](https://github.com/rapidsai/cudf/pull/13777)) [@vyasr](https://github.com/vyasr) +- Refactor Parquet reader handling of V2 page header info ([#13775](https://github.com/rapidsai/cudf/pull/13775)) [@etseidl](https://github.com/etseidl) +- Branch 23.10 merge 23.08 ([#13773](https://github.com/rapidsai/cudf/pull/13773)) [@vyasr](https://github.com/vyasr) +- Restructure JSON code to correctly reflect legacy/experimental status ([#13757](https://github.com/rapidsai/cudf/pull/13757)) [@vuule](https://github.com/vuule) +- Branch 23.10 merge 23.08 ([#13753](https://github.com/rapidsai/cudf/pull/13753)) [@vyasr](https://github.com/vyasr) +- Enforce deprecations in `23.10` ([#13732](https://github.com/rapidsai/cudf/pull/13732)) [@galipremsagar](https://github.com/galipremsagar) +- Upgrade to arrow 12 ([#13728](https://github.com/rapidsai/cudf/pull/13728)) [@galipremsagar](https://github.com/galipremsagar) +- Refactors JSON reader's pushdown automaton ([#13716](https://github.com/rapidsai/cudf/pull/13716)) [@elstehle](https://github.com/elstehle) +- Remove Arrow dependency from the `datasource.hpp` public header ([#13698](https://github.com/rapidsai/cudf/pull/13698)) [@vuule](https://github.com/vuule) + # cuDF 23.08.00 (9 Aug 2023) ## 🚨 Breaking Changes From aa598bc28e6e2459ca6bcfa58f2056134e6591ea Mon Sep 17 00:00:00 2001 From: David Wendt <45795991+davidwendt@users.noreply.github.com> Date: Wed, 11 Oct 2023 15:34:59 -0400 Subject: [PATCH 145/150] Expose stream parameter in public strings split/partition APIs (#14247) Follow on to PR #13997 which did not include all the split APIs or a stream test. Add stream parameter to public APIs: - `cudf::strings::partition()` - `cudf::strings::rpartition()` - `cudf::strings::split_re()` - `cudf::strings::rsplit_re()` - `cudf::strings::split_record_re()` - `cudf::strings::rsplit_record_re()` Also cleaned up some of the doxygen comments. Reference #13744 Authors: - David Wendt (https://github.com/davidwendt) Approvers: - Mark Harris (https://github.com/harrism) - Bradley Dice (https://github.com/bdice) - Nghia Truong (https://github.com/ttnghia) URL: https://github.com/rapidsai/cudf/pull/14247 --- cpp/include/cudf/strings/split/partition.hpp | 22 +++++---- cpp/include/cudf/strings/split/split_re.hpp | 16 +++++-- cpp/src/strings/split/partition.cu | 10 ++-- cpp/src/strings/split/split_re.cu | 12 +++-- cpp/tests/CMakeLists.txt | 2 +- cpp/tests/streams/strings/split_test.cpp | 49 ++++++++++++++++++++ 6 files changed, 89 insertions(+), 22 deletions(-) create mode 100644 cpp/tests/streams/strings/split_test.cpp diff --git a/cpp/include/cudf/strings/split/partition.hpp b/cpp/include/cudf/strings/split/partition.hpp index 52ffb735eb7..25eedf1e86b 100644 --- a/cpp/include/cudf/strings/split/partition.hpp +++ b/cpp/include/cudf/strings/split/partition.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -51,15 +51,17 @@ namespace strings { * r[2] is ["cd","g_h"] * @endcode * - * @param strings Strings instance for this operation. + * @param input Strings instance for this operation * @param delimiter UTF-8 encoded string indicating where to split each string. * Default of empty string indicates split on whitespace. - * @param mr Device memory resource used to allocate the returned table's device memory. - * @return New table of strings columns. + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned table's device memory + * @return New table of strings columns */ std::unique_ptr
partition( - strings_column_view const& strings, + strings_column_view const& input, string_scalar const& delimiter = string_scalar(""), + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -83,15 +85,17 @@ std::unique_ptr
partition( * r[2] is ["cd","h"] * @endcode * - * @param strings Strings instance for this operation. + * @param input Strings instance for this operation * @param delimiter UTF-8 encoded string indicating where to split each string. * Default of empty string indicates split on whitespace. - * @param mr Device memory resource used to allocate the returned table's device memory. - * @return New strings columns. + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned table's device memory + * @return New strings columns */ std::unique_ptr
rpartition( - strings_column_view const& strings, + strings_column_view const& input, string_scalar const& delimiter = string_scalar(""), + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of doxygen group diff --git a/cpp/include/cudf/strings/split/split_re.hpp b/cpp/include/cudf/strings/split/split_re.hpp index 14fcfaecdcd..f1736cb7e0c 100644 --- a/cpp/include/cudf/strings/split/split_re.hpp +++ b/cpp/include/cudf/strings/split/split_re.hpp @@ -75,6 +75,7 @@ struct regex_program; * @param prog Regex program instance * @param maxsplit Maximum number of splits to perform. * Default of -1 indicates all possible splits on each string. + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned result's device memory * @return A table of columns of strings */ @@ -82,6 +83,7 @@ std::unique_ptr
split_re( strings_column_view const& input, regex_program const& prog, size_type maxsplit = -1, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -125,17 +127,19 @@ std::unique_ptr
split_re( * * @throw cudf::logic_error if `pattern` is empty. * - * @param input A column of string elements to be split. + * @param input A column of string elements to be split * @param prog Regex program instance * @param maxsplit Maximum number of splits to perform. * Default of -1 indicates all possible splits on each string. - * @param mr Device memory resource used to allocate the returned result's device memory. - * @return A table of columns of strings. + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned result's device memory + * @return A table of columns of strings */ std::unique_ptr
rsplit_re( strings_column_view const& input, regex_program const& prog, size_type maxsplit = -1, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -185,13 +189,15 @@ std::unique_ptr
rsplit_re( * @param prog Regex program instance * @param maxsplit Maximum number of splits to perform. * Default of -1 indicates all possible splits on each string. + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned result's device memory - * @return Lists column of strings. + * @return Lists column of strings */ std::unique_ptr split_record_re( strings_column_view const& input, regex_program const& prog, size_type maxsplit = -1, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -243,6 +249,7 @@ std::unique_ptr split_record_re( * @param prog Regex program instance * @param maxsplit Maximum number of splits to perform. * Default of -1 indicates all possible splits on each string. + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned result's device memory * @return Lists column of strings */ @@ -250,6 +257,7 @@ std::unique_ptr rsplit_record_re( strings_column_view const& input, regex_program const& prog, size_type maxsplit = -1, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of doxygen group diff --git a/cpp/src/strings/split/partition.cu b/cpp/src/strings/split/partition.cu index 0c7d119ea38..16e6402cfef 100644 --- a/cpp/src/strings/split/partition.cu +++ b/cpp/src/strings/split/partition.cu @@ -239,20 +239,22 @@ std::unique_ptr
rpartition(strings_column_view const& strings, // external APIs -std::unique_ptr
partition(strings_column_view const& strings, +std::unique_ptr
partition(strings_column_view const& input, string_scalar const& delimiter, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::partition(strings, delimiter, cudf::get_default_stream(), mr); + return detail::partition(input, delimiter, stream, mr); } -std::unique_ptr
rpartition(strings_column_view const& strings, +std::unique_ptr
rpartition(strings_column_view const& input, string_scalar const& delimiter, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::rpartition(strings, delimiter, cudf::get_default_stream(), mr); + return detail::rpartition(input, delimiter, stream, mr); } } // namespace strings diff --git a/cpp/src/strings/split/split_re.cu b/cpp/src/strings/split/split_re.cu index 3be5937297f..913aec79758 100644 --- a/cpp/src/strings/split/split_re.cu +++ b/cpp/src/strings/split/split_re.cu @@ -340,37 +340,41 @@ std::unique_ptr rsplit_record_re(strings_column_view const& input, std::unique_ptr
split_re(strings_column_view const& input, regex_program const& prog, size_type maxsplit, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::split_re(input, prog, maxsplit, cudf::get_default_stream(), mr); + return detail::split_re(input, prog, maxsplit, stream, mr); } std::unique_ptr split_record_re(strings_column_view const& input, regex_program const& prog, size_type maxsplit, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::split_record_re(input, prog, maxsplit, cudf::get_default_stream(), mr); + return detail::split_record_re(input, prog, maxsplit, stream, mr); } std::unique_ptr
rsplit_re(strings_column_view const& input, regex_program const& prog, size_type maxsplit, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::rsplit_re(input, prog, maxsplit, cudf::get_default_stream(), mr); + return detail::rsplit_re(input, prog, maxsplit, stream, mr); } std::unique_ptr rsplit_record_re(strings_column_view const& input, regex_program const& prog, size_type maxsplit, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::rsplit_record_re(input, prog, maxsplit, cudf::get_default_stream(), mr); + return detail::rsplit_record_re(input, prog, maxsplit, stream, mr); } } // namespace strings diff --git a/cpp/tests/CMakeLists.txt b/cpp/tests/CMakeLists.txt index b15a6c41d39..4de18fceac1 100644 --- a/cpp/tests/CMakeLists.txt +++ b/cpp/tests/CMakeLists.txt @@ -634,7 +634,7 @@ ConfigureTest(STREAM_SEARCH_TEST streams/search_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_DICTIONARY_TEST streams/dictionary_test.cpp STREAM_MODE testing) ConfigureTest( STREAM_STRINGS_TEST streams/strings/case_test.cpp streams/strings/find_test.cpp - streams/strings/strings_tests.cpp STREAM_MODE testing + streams/strings/split_test.cpp streams/strings/strings_tests.cpp STREAM_MODE testing ) ConfigureTest(STREAM_SORTING_TEST streams/sorting_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_TEXT_TEST streams/text/ngrams_test.cpp STREAM_MODE testing) diff --git a/cpp/tests/streams/strings/split_test.cpp b/cpp/tests/streams/strings/split_test.cpp new file mode 100644 index 00000000000..24247f6f79c --- /dev/null +++ b/cpp/tests/streams/strings/split_test.cpp @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include +#include +#include +#include + +#include + +class StringsSplitTest : public cudf::test::BaseFixture {}; + +TEST_F(StringsSplitTest, SplitPartition) +{ + auto input = cudf::test::strings_column_wrapper({"Héllo thesé", "tést strings", ""}); + auto view = cudf::strings_column_view(input); + + auto const delimiter = cudf::string_scalar("é", true, cudf::test::get_default_stream()); + cudf::strings::split(view, delimiter, -1, cudf::test::get_default_stream()); + cudf::strings::rsplit(view, delimiter, -1, cudf::test::get_default_stream()); + cudf::strings::split_record(view, delimiter, -1, cudf::test::get_default_stream()); + cudf::strings::rsplit_record(view, delimiter, -1, cudf::test::get_default_stream()); + cudf::strings::partition(view, delimiter, cudf::test::get_default_stream()); + cudf::strings::rpartition(view, delimiter, cudf::test::get_default_stream()); + + auto const pattern = std::string("\\s"); + auto const prog = cudf::strings::regex_program::create(pattern); + cudf::strings::split_re(view, *prog, -1, cudf::test::get_default_stream()); + cudf::strings::split_record_re(view, *prog, -1, cudf::test::get_default_stream()); + cudf::strings::rsplit_re(view, *prog, -1, cudf::test::get_default_stream()); + cudf::strings::rsplit_record_re(view, *prog, -1, cudf::test::get_default_stream()); +} From 737b7593a58679fc59fd68e23eaf92195b9bd34c Mon Sep 17 00:00:00 2001 From: Bradley Dice Date: Thu, 12 Oct 2023 08:13:11 -0700 Subject: [PATCH 146/150] Use branch-23.12 workflows. (#14271) This PR switches back to using `branch-23.12` for CI workflows because the CUDA 12 ARM conda migration is complete. Authors: - Bradley Dice (https://github.com/bdice) Approvers: - Jake Awe (https://github.com/AyodeAwe) URL: https://github.com/rapidsai/cudf/pull/14271 --- .github/workflows/build.yaml | 16 ++++++++-------- .github/workflows/pr.yaml | 28 ++++++++++++++-------------- .github/workflows/test.yaml | 16 ++++++++-------- 3 files changed, 30 insertions(+), 30 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index dc2c81d1c77..ab028eb89cc 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -28,7 +28,7 @@ concurrency: jobs: cpp-build: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-build.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-build.yaml@branch-23.12 with: build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} @@ -37,7 +37,7 @@ jobs: python-build: needs: [cpp-build] secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-build.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-build.yaml@branch-23.12 with: build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} @@ -46,7 +46,7 @@ jobs: upload-conda: needs: [cpp-build, python-build] secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-upload-packages.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/conda-upload-packages.yaml@branch-23.12 with: build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} @@ -57,7 +57,7 @@ jobs: if: github.ref_type == 'branch' needs: python-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.12 with: arch: "amd64" branch: ${{ inputs.branch }} @@ -69,7 +69,7 @@ jobs: sha: ${{ inputs.sha }} wheel-build-cudf: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-build.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-build.yaml@branch-23.12 with: build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} @@ -79,7 +79,7 @@ jobs: wheel-publish-cudf: needs: wheel-build-cudf secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-publish.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-publish.yaml@branch-23.12 with: build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} @@ -89,7 +89,7 @@ jobs: wheel-build-dask-cudf: needs: wheel-publish-cudf secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-build.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-build.yaml@branch-23.12 with: matrix_filter: map(select(.ARCH == "amd64" and .PY_VER == "3.10" and (.CUDA_VER == "11.8.0" or .CUDA_VER == "12.0.1"))) build_type: ${{ inputs.build_type || 'branch' }} @@ -100,7 +100,7 @@ jobs: wheel-publish-dask-cudf: needs: wheel-build-dask-cudf secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-publish.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-publish.yaml@branch-23.12 with: build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index 047b80f2e5c..214f9c90b41 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -26,34 +26,34 @@ jobs: - wheel-build-dask-cudf - wheel-tests-dask-cudf secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/pr-builder.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/pr-builder.yaml@branch-23.12 checks: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/checks.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/checks.yaml@branch-23.12 with: enable_check_generated_files: false conda-cpp-build: needs: checks secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-build.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-build.yaml@branch-23.12 with: build_type: pull-request conda-cpp-tests: needs: conda-cpp-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-tests.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-tests.yaml@branch-23.12 with: build_type: pull-request conda-python-build: needs: conda-cpp-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-build.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-build.yaml@branch-23.12 with: build_type: pull-request conda-python-cudf-tests: needs: conda-python-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@branch-23.12 with: build_type: pull-request test_script: "ci/test_python_cudf.sh" @@ -61,14 +61,14 @@ jobs: # Tests for dask_cudf, custreamz, cudf_kafka are separated for CI parallelism needs: conda-python-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@branch-23.12 with: build_type: pull-request test_script: "ci/test_python_other.sh" conda-java-tests: needs: conda-cpp-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.12 with: build_type: pull-request node_type: "gpu-v100-latest-1" @@ -78,7 +78,7 @@ jobs: conda-notebook-tests: needs: conda-python-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.12 with: build_type: pull-request node_type: "gpu-v100-latest-1" @@ -88,7 +88,7 @@ jobs: docs-build: needs: conda-python-build secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.12 with: build_type: pull-request node_type: "gpu-v100-latest-1" @@ -98,21 +98,21 @@ jobs: wheel-build-cudf: needs: checks secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-build.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-build.yaml@branch-23.12 with: build_type: pull-request script: "ci/build_wheel_cudf.sh" wheel-tests-cudf: needs: wheel-build-cudf secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-test.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-test.yaml@branch-23.12 with: build_type: pull-request script: ci/test_wheel_cudf.sh wheel-build-dask-cudf: needs: wheel-tests-cudf secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-build.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-build.yaml@branch-23.12 with: matrix_filter: map(select(.ARCH == "amd64" and .PY_VER == "3.10" and (.CUDA_VER == "11.8.0" or .CUDA_VER == "12.0.1"))) build_type: pull-request @@ -120,7 +120,7 @@ jobs: wheel-tests-dask-cudf: needs: wheel-build-dask-cudf secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-test.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-test.yaml@branch-23.12 with: matrix_filter: map(select(.ARCH == "amd64" and .PY_VER == "3.10" and (.CUDA_VER == "11.8.0" or .CUDA_VER == "12.0.1"))) build_type: pull-request diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index e58227c30dc..9ca32bcfe03 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -16,7 +16,7 @@ on: jobs: conda-cpp-tests: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-tests.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-tests.yaml@branch-23.12 with: build_type: nightly branch: ${{ inputs.branch }} @@ -24,7 +24,7 @@ jobs: sha: ${{ inputs.sha }} conda-cpp-memcheck-tests: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.12 with: build_type: nightly branch: ${{ inputs.branch }} @@ -36,7 +36,7 @@ jobs: run_script: "ci/test_cpp_memcheck.sh" conda-python-cudf-tests: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@branch-23.12 with: build_type: nightly branch: ${{ inputs.branch }} @@ -46,7 +46,7 @@ jobs: conda-python-other-tests: # Tests for dask_cudf, custreamz, cudf_kafka are separated for CI parallelism secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@branch-23.12 with: build_type: nightly branch: ${{ inputs.branch }} @@ -55,7 +55,7 @@ jobs: test_script: "ci/test_python_other.sh" conda-java-tests: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.12 with: build_type: nightly branch: ${{ inputs.branch }} @@ -67,7 +67,7 @@ jobs: run_script: "ci/test_java.sh" conda-notebook-tests: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/custom-job.yaml@branch-23.12 with: build_type: nightly branch: ${{ inputs.branch }} @@ -79,7 +79,7 @@ jobs: run_script: "ci/test_notebooks.sh" wheel-tests-cudf: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-test.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-test.yaml@branch-23.12 with: build_type: nightly branch: ${{ inputs.branch }} @@ -88,7 +88,7 @@ jobs: script: ci/test_wheel_cudf.sh wheel-tests-dask-cudf: secrets: inherit - uses: rapidsai/shared-action-workflows/.github/workflows/wheels-test.yaml@cuda-120-arm + uses: rapidsai/shared-action-workflows/.github/workflows/wheels-test.yaml@branch-23.12 with: matrix_filter: map(select(.ARCH == "amd64" and .PY_VER == "3.10" and (.CUDA_VER == "11.8.0" or .CUDA_VER == "12.0.1"))) build_type: nightly From fa4e8ab1af4acfd2c88a619b4d9693f4a5fda168 Mon Sep 17 00:00:00 2001 From: David Wendt <45795991+davidwendt@users.noreply.github.com> Date: Thu, 12 Oct 2023 17:11:51 -0400 Subject: [PATCH 147/150] Expose stream parameter in public strings replace APIs (#14261) Add stream parameter to public APIs: - `cudf::strings::replace()` (x2) - `cudf::strings::replace_slice()` - `cudf::strings::replace_re()` (x2) - `cudf::strings::replace_with_backrefs()` Also cleaned up some of the doxygen comments and added stream-tests. Reference #13744 Authors: - David Wendt (https://github.com/davidwendt) Approvers: - Bradley Dice (https://github.com/bdice) - Mike Wilson (https://github.com/hyperbolic2346) - Nghia Truong (https://github.com/ttnghia) URL: https://github.com/rapidsai/cudf/pull/14261 --- cpp/include/cudf/strings/replace.hpp | 42 +++++++----- cpp/include/cudf/strings/replace_re.hpp | 28 +++++--- cpp/src/strings/replace/backref_re.cu | 3 +- cpp/src/strings/replace/multi.cu | 3 +- cpp/src/strings/replace/multi_re.cu | 3 +- cpp/src/strings/replace/replace.cu | 8 ++- cpp/src/strings/replace/replace_re.cu | 4 +- cpp/tests/CMakeLists.txt | 10 ++- cpp/tests/streams/strings/replace_test.cpp | 80 ++++++++++++++++++++++ 9 files changed, 142 insertions(+), 39 deletions(-) create mode 100644 cpp/tests/streams/strings/replace_test.cpp diff --git a/cpp/include/cudf/strings/replace.hpp b/cpp/include/cudf/strings/replace.hpp index 22818f7542e..2476a41e886 100644 --- a/cpp/include/cudf/strings/replace.hpp +++ b/cpp/include/cudf/strings/replace.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -54,19 +54,21 @@ namespace strings { * * @throw cudf::logic_error if target is an empty string. * - * @param strings Strings column for this operation. - * @param target String to search for within each string. - * @param repl Replacement string if target is found. + * @param input Strings column for this operation + * @param target String to search for within each string + * @param repl Replacement string if target is found * @param maxrepl Maximum times to replace if target appears multiple times in the input string. * Default of -1 specifies replace all occurrences of target in each string. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New strings column. + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New strings column */ std::unique_ptr replace( - strings_column_view const& strings, + strings_column_view const& input, string_scalar const& target, string_scalar const& repl, - int32_t maxrepl = -1, + cudf::size_type maxrepl = -1, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -92,21 +94,23 @@ std::unique_ptr replace( * * @throw cudf::logic_error if start is greater than stop. * - * @param strings Strings column for this operation. + * @param input Strings column for this operation. * @param repl Replacement string for specified positions found. * Default is empty string. * @param start Start position where repl will be added. * Default is 0, first character position. * @param stop End position (exclusive) to use for replacement. * Default of -1 specifies the end of each string. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New strings column. + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New strings column */ std::unique_ptr replace_slice( - strings_column_view const& strings, + strings_column_view const& input, string_scalar const& repl = string_scalar(""), size_type start = 0, size_type stop = -1, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -141,16 +145,18 @@ std::unique_ptr replace_slice( * if repls is a single string. * @throw cudf::logic_error if targets or repls contain null entries. * - * @param strings Strings column for this operation. - * @param targets Strings to search for in each string. - * @param repls Corresponding replacement strings for target strings. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New strings column. + * @param input Strings column for this operation + * @param targets Strings to search for in each string + * @param repls Corresponding replacement strings for target strings + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New strings column */ std::unique_ptr replace( - strings_column_view const& strings, + strings_column_view const& input, strings_column_view const& targets, strings_column_view const& repls, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of doxygen group diff --git a/cpp/include/cudf/strings/replace_re.hpp b/cpp/include/cudf/strings/replace_re.hpp index bc6659835c3..77db2882253 100644 --- a/cpp/include/cudf/strings/replace_re.hpp +++ b/cpp/include/cudf/strings/replace_re.hpp @@ -43,20 +43,22 @@ struct regex_program; * * See the @ref md_regex "Regex Features" page for details on patterns supported by this API. * - * @param strings Strings instance for this operation + * @param input Strings instance for this operation * @param prog Regex program instance * @param replacement The string used to replace the matched sequence in each string. * Default is an empty string. * @param max_replace_count The maximum number of times to replace the matched pattern * within each string. Default replaces every substring that is matched. + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return New strings column */ std::unique_ptr replace_re( - strings_column_view const& strings, + strings_column_view const& input, regex_program const& prog, string_scalar const& replacement = string_scalar(""), std::optional max_replace_count = std::nullopt, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -67,18 +69,20 @@ std::unique_ptr replace_re( * * See the @ref md_regex "Regex Features" page for details on patterns supported by this API. * - * @param strings Strings instance for this operation. - * @param patterns The regular expression patterns to search within each string. - * @param replacements The strings used for replacement. - * @param flags Regex flags for interpreting special characters in the patterns. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New strings column. + * @param input Strings instance for this operation + * @param patterns The regular expression patterns to search within each string + * @param replacements The strings used for replacement + * @param flags Regex flags for interpreting special characters in the patterns + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New strings column */ std::unique_ptr replace_re( - strings_column_view const& strings, + strings_column_view const& input, std::vector const& patterns, strings_column_view const& replacements, regex_flags const flags = regex_flags::DEFAULT, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -92,16 +96,18 @@ std::unique_ptr replace_re( * @throw cudf::logic_error if capture index values in `replacement` are not in range 0-99, and also * if the index exceeds the group count specified in the pattern * - * @param strings Strings instance for this operation + * @param input Strings instance for this operation * @param prog Regex program instance * @param replacement The replacement template for creating the output string + * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return New strings column */ std::unique_ptr replace_with_backrefs( - strings_column_view const& strings, + strings_column_view const& input, regex_program const& prog, std::string_view replacement, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); } // namespace strings diff --git a/cpp/src/strings/replace/backref_re.cu b/cpp/src/strings/replace/backref_re.cu index 31e06aac72b..74f38cbcc20 100644 --- a/cpp/src/strings/replace/backref_re.cu +++ b/cpp/src/strings/replace/backref_re.cu @@ -148,10 +148,11 @@ std::unique_ptr replace_with_backrefs(strings_column_view const& input, std::unique_ptr replace_with_backrefs(strings_column_view const& strings, regex_program const& prog, std::string_view replacement, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::replace_with_backrefs(strings, prog, replacement, cudf::get_default_stream(), mr); + return detail::replace_with_backrefs(strings, prog, replacement, stream, mr); } } // namespace strings diff --git a/cpp/src/strings/replace/multi.cu b/cpp/src/strings/replace/multi.cu index 92ace4e7bc7..ee47932100a 100644 --- a/cpp/src/strings/replace/multi.cu +++ b/cpp/src/strings/replace/multi.cu @@ -490,10 +490,11 @@ std::unique_ptr replace(strings_column_view const& input, std::unique_ptr replace(strings_column_view const& strings, strings_column_view const& targets, strings_column_view const& repls, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::replace(strings, targets, repls, cudf::get_default_stream(), mr); + return detail::replace(strings, targets, repls, stream, mr); } } // namespace strings diff --git a/cpp/src/strings/replace/multi_re.cu b/cpp/src/strings/replace/multi_re.cu index 867b443c036..3375cb7a789 100644 --- a/cpp/src/strings/replace/multi_re.cu +++ b/cpp/src/strings/replace/multi_re.cu @@ -206,10 +206,11 @@ std::unique_ptr replace_re(strings_column_view const& strings, std::vector const& patterns, strings_column_view const& replacements, regex_flags const flags, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::replace_re(strings, patterns, replacements, flags, cudf::get_default_stream(), mr); + return detail::replace_re(strings, patterns, replacements, flags, stream, mr); } } // namespace strings diff --git a/cpp/src/strings/replace/replace.cu b/cpp/src/strings/replace/replace.cu index acc1502f4d6..a6a14f27dec 100644 --- a/cpp/src/strings/replace/replace.cu +++ b/cpp/src/strings/replace/replace.cu @@ -751,21 +751,23 @@ std::unique_ptr replace_nulls(strings_column_view const& strings, std::unique_ptr replace(strings_column_view const& strings, string_scalar const& target, string_scalar const& repl, - int32_t maxrepl, + cudf::size_type maxrepl, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::replace(strings, target, repl, maxrepl, cudf::get_default_stream(), mr); + return detail::replace(strings, target, repl, maxrepl, stream, mr); } std::unique_ptr replace_slice(strings_column_view const& strings, string_scalar const& repl, size_type start, size_type stop, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::replace_slice(strings, repl, start, stop, cudf::get_default_stream(), mr); + return detail::replace_slice(strings, repl, start, stop, stream, mr); } } // namespace strings diff --git a/cpp/src/strings/replace/replace_re.cu b/cpp/src/strings/replace/replace_re.cu index 81ddb937be5..502d5f1a52e 100644 --- a/cpp/src/strings/replace/replace_re.cu +++ b/cpp/src/strings/replace/replace_re.cu @@ -134,11 +134,11 @@ std::unique_ptr replace_re(strings_column_view const& strings, regex_program const& prog, string_scalar const& replacement, std::optional max_replace_count, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::replace_re( - strings, prog, replacement, max_replace_count, cudf::get_default_stream(), mr); + return detail::replace_re(strings, prog, replacement, max_replace_count, stream, mr); } } // namespace strings diff --git a/cpp/tests/CMakeLists.txt b/cpp/tests/CMakeLists.txt index 4de18fceac1..f36fcbc9246 100644 --- a/cpp/tests/CMakeLists.txt +++ b/cpp/tests/CMakeLists.txt @@ -633,8 +633,14 @@ ConfigureTest(STREAM_REPLACE_TEST streams/replace_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_SEARCH_TEST streams/search_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_DICTIONARY_TEST streams/dictionary_test.cpp STREAM_MODE testing) ConfigureTest( - STREAM_STRINGS_TEST streams/strings/case_test.cpp streams/strings/find_test.cpp - streams/strings/split_test.cpp streams/strings/strings_tests.cpp STREAM_MODE testing + STREAM_STRINGS_TEST + streams/strings/case_test.cpp + streams/strings/find_test.cpp + streams/strings/replace_test.cpp + streams/strings/split_test.cpp + streams/strings/strings_tests.cpp + STREAM_MODE + testing ) ConfigureTest(STREAM_SORTING_TEST streams/sorting_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_TEXT_TEST streams/text/ngrams_test.cpp STREAM_MODE testing) diff --git a/cpp/tests/streams/strings/replace_test.cpp b/cpp/tests/streams/strings/replace_test.cpp new file mode 100644 index 00000000000..fc87460b706 --- /dev/null +++ b/cpp/tests/streams/strings/replace_test.cpp @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include +#include +#include + +#include + +class StringsReplaceTest : public cudf::test::BaseFixture {}; + +TEST_F(StringsReplaceTest, Replace) +{ + auto input = cudf::test::strings_column_wrapper({"Héllo", "thesé", "tést strings", ""}); + auto view = cudf::strings_column_view(input); + + auto const target = cudf::string_scalar("é", true, cudf::test::get_default_stream()); + auto const repl = cudf::string_scalar(" ", true, cudf::test::get_default_stream()); + cudf::strings::replace(view, target, repl, -1, cudf::test::get_default_stream()); + cudf::strings::replace(view, view, view, cudf::test::get_default_stream()); + cudf::strings::replace_slice(view, repl, 1, 2, cudf::test::get_default_stream()); + + auto const pattern = std::string("[a-z]"); + auto const prog = cudf::strings::regex_program::create(pattern); + cudf::strings::replace_re(view, *prog, repl, 1, cudf::test::get_default_stream()); + + cudf::test::strings_column_wrapper repls({"1", "a", " "}); + cudf::strings::replace_re(view, + {pattern, pattern, pattern}, + cudf::strings_column_view(repls), + cudf::strings::regex_flags::DEFAULT, + cudf::test::get_default_stream()); +} + +TEST_F(StringsReplaceTest, ReplaceRegex) +{ + auto input = cudf::test::strings_column_wrapper({"Héllo", "thesé", "tést strings", ""}); + auto view = cudf::strings_column_view(input); + + auto const repl = cudf::string_scalar(" ", true, cudf::test::get_default_stream()); + auto const pattern = std::string("[a-z]"); + auto const prog = cudf::strings::regex_program::create(pattern); + cudf::strings::replace_re(view, *prog, repl, 1, cudf::test::get_default_stream()); + + cudf::test::strings_column_wrapper repls({"1", "a", " "}); + cudf::strings::replace_re(view, + {pattern, pattern, pattern}, + cudf::strings_column_view(repls), + cudf::strings::regex_flags::DEFAULT, + cudf::test::get_default_stream()); +} + +TEST_F(StringsReplaceTest, ReplaceRegexBackref) +{ + auto input = cudf::test::strings_column_wrapper({"Héllo thesé", "tést strings"}); + auto view = cudf::strings_column_view(input); + + auto const repl_template = std::string("\\2-\\1"); + auto const pattern = std::string("(\\w) (\\w)"); + auto const prog = cudf::strings::regex_program::create(pattern); + cudf::strings::replace_with_backrefs( + view, *prog, repl_template, cudf::test::get_default_stream()); +} From 6e00ad06abb1152816ed6edda698cb26f08a64d2 Mon Sep 17 00:00:00 2001 From: David Wendt <45795991+davidwendt@users.noreply.github.com> Date: Thu, 12 Oct 2023 22:32:25 -0400 Subject: [PATCH 148/150] Return error if BOOL8 column-type is used with integers-to-hex (#14208) Removes support to convert BOOL8 column-type to hex using `cudf::strings::integers_to_hex`. Also fixed other integer to string conversions to remove this unsupported type. Added gtests to verify an error is thrown for this case. Closes #14232 Authors: - David Wendt (https://github.com/davidwendt) Approvers: - Bradley Dice (https://github.com/bdice) - Nghia Truong (https://github.com/ttnghia) - Karthikeyan (https://github.com/karthikeyann) - MithunR (https://github.com/mythrocks) URL: https://github.com/rapidsai/cudf/pull/14208 --- .../cudf/strings/convert/convert_integers.hpp | 4 +- cpp/include/cudf/utilities/traits.hpp | 24 ++++++++++++ cpp/src/strings/convert/convert_hex.cu | 27 ++++++------- cpp/src/strings/convert/convert_integers.cu | 38 ++++++------------- cpp/src/utilities/traits.cpp | 15 +++++++- cpp/tests/strings/integers_tests.cpp | 26 +++++++++++++ 6 files changed, 89 insertions(+), 45 deletions(-) diff --git a/cpp/include/cudf/strings/convert/convert_integers.hpp b/cpp/include/cudf/strings/convert/convert_integers.hpp index 44213b84139..756ce48645d 100644 --- a/cpp/include/cudf/strings/convert/convert_integers.hpp +++ b/cpp/include/cudf/strings/convert/convert_integers.hpp @@ -199,14 +199,14 @@ std::unique_ptr is_hex( * * @code{.pseudo} * Example: - * input = [123, -1, 0, 27, 342718233] // int32 type input column + * input = [1234, -1, 0, 27, 342718233] // int32 type input column * s = integers_to_hex(input) * s is [ '04D2', 'FFFFFFFF', '00', '1B', '146D7719'] * @endcode * * The example above shows an `INT32` type column where each integer is 4 bytes. * Leading zeros are suppressed unless filling out a complete byte as in - * `123 -> '04D2'` instead of `000004D2` or `4D2`. + * `1234 -> '04D2'` instead of `000004D2` or `4D2`. * * @throw cudf::logic_error if the input column is not integral type. * diff --git a/cpp/include/cudf/utilities/traits.hpp b/cpp/include/cudf/utilities/traits.hpp index 51f5d9d571a..2dda0740b96 100644 --- a/cpp/include/cudf/utilities/traits.hpp +++ b/cpp/include/cudf/utilities/traits.hpp @@ -279,6 +279,30 @@ constexpr inline bool is_integral() */ bool is_integral(data_type type); +/** + * @brief Indicates whether the type `T` is an integral type but not bool type. + * + * @tparam T The type to verify + * @return true `T` is integral but not bool + * @return false `T` is not integral or is bool + */ +template +constexpr inline bool is_integral_not_bool() +{ + return cuda::std::is_integral_v and not std::is_same_v; +} + +/** + * @brief Indicates whether `type` is a integral `data_type` and not BOOL8 + * + * "Integral" types are fundamental integer types such as `INT*` and `UINT*`. + * + * @param type The `data_type` to verify + * @return true `type` is integral but not bool + * @return false `type` is integral or is bool + */ +bool is_integral_not_bool(data_type type); + /** * @brief Indicates whether the type `T` is a floating point type. * diff --git a/cpp/src/strings/convert/convert_hex.cu b/cpp/src/strings/convert/convert_hex.cu index bed682aba71..f5bdbcbd199 100644 --- a/cpp/src/strings/convert/convert_hex.cu +++ b/cpp/src/strings/convert/convert_hex.cu @@ -93,7 +93,8 @@ struct hex_to_integer_fn { * The output_column is expected to be one of the integer types only. */ struct dispatch_hex_to_integers_fn { - template >* = nullptr> + template ()>* = nullptr> void operator()(column_device_view const& strings_column, mutable_column_view& output_column, rmm::cuda_stream_view stream) const @@ -105,22 +106,14 @@ struct dispatch_hex_to_integers_fn { d_results, hex_to_integer_fn{strings_column}); } - // non-integral types throw an exception + // non-integer types throw an exception template - std::enable_if_t, void> operator()(Args&&...) const + std::enable_if_t(), void> operator()(Args&&...) const { - CUDF_FAIL("Output for hex_to_integers must be an integral type."); + CUDF_FAIL("Output for hex_to_integers must be an integer type."); } }; -template <> -void dispatch_hex_to_integers_fn::operator()(column_device_view const&, - mutable_column_view&, - rmm::cuda_stream_view) const -{ - CUDF_FAIL("Output for hex_to_integers must not be a boolean type."); -} - /** * @brief Functor to convert integers to hexadecimal strings * @@ -179,7 +172,8 @@ struct integer_to_hex_fn { }; struct dispatch_integers_to_hex_fn { - template >* = nullptr> + template ()>* = nullptr> std::unique_ptr operator()(column_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const @@ -195,11 +189,12 @@ struct dispatch_integers_to_hex_fn { input.null_count(), cudf::detail::copy_bitmask(input, stream, mr)); } - // non-integral types throw an exception + // non-integer types throw an exception template - std::enable_if_t, std::unique_ptr> operator()(Args...) const + std::enable_if_t(), std::unique_ptr> operator()( + Args...) const { - CUDF_FAIL("integers_to_hex only supports integral type columns"); + CUDF_FAIL("integers_to_hex only supports integer type columns"); } }; diff --git a/cpp/src/strings/convert/convert_integers.cu b/cpp/src/strings/convert/convert_integers.cu index 5597d2831c0..2c21fc5d790 100644 --- a/cpp/src/strings/convert/convert_integers.cu +++ b/cpp/src/strings/convert/convert_integers.cu @@ -111,7 +111,7 @@ inline __device__ bool is_integer(string_view const& d_str) * @brief The dispatch functions for checking if strings are valid integers. */ struct dispatch_is_integer_fn { - template >* = nullptr> + template ()>* = nullptr> std::unique_ptr operator()(strings_column_view const& strings, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const @@ -145,7 +145,7 @@ struct dispatch_is_integer_fn { return results; } - template >* = nullptr> + template ()>* = nullptr> std::unique_ptr operator()(strings_column_view const&, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) const @@ -243,7 +243,8 @@ struct string_to_integer_fn { * The output_column is expected to be one of the integer types only. */ struct dispatch_to_integers_fn { - template >* = nullptr> + template ()>* = nullptr> void operator()(column_device_view const& strings_column, mutable_column_view& output_column, rmm::cuda_stream_view stream) const @@ -254,22 +255,14 @@ struct dispatch_to_integers_fn { output_column.data(), string_to_integer_fn{strings_column}); } - // non-integral types throw an exception - template >* = nullptr> + // non-integer types throw an exception + template ()>* = nullptr> void operator()(column_device_view const&, mutable_column_view&, rmm::cuda_stream_view) const { - CUDF_FAIL("Output for to_integers must be an integral type."); + CUDF_FAIL("Output for to_integers must be an integer type."); } }; -template <> -void dispatch_to_integers_fn::operator()(column_device_view const&, - mutable_column_view&, - rmm::cuda_stream_view) const -{ - CUDF_FAIL("Output for to_integers must not be a boolean type."); -} - } // namespace // This will convert a strings column into any integer column type. @@ -351,7 +344,8 @@ struct from_integers_fn { * The template function declaration ensures only integer types are used. */ struct dispatch_from_integers_fn { - template >* = nullptr> + template ()>* = nullptr> std::unique_ptr operator()(column_view const& integers, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const @@ -373,23 +367,15 @@ struct dispatch_from_integers_fn { std::move(null_mask)); } - // non-integral types throw an exception - template >* = nullptr> + // non-integer types throw an exception + template ()>* = nullptr> std::unique_ptr operator()(column_view const&, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) const { - CUDF_FAIL("Values for from_integers function must be an integral type."); + CUDF_FAIL("Values for from_integers function must be an integer type."); } }; - -template <> -std::unique_ptr dispatch_from_integers_fn::operator()( - column_view const&, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) const -{ - CUDF_FAIL("Input for from_integers must not be a boolean type."); -} - } // namespace // This will convert all integer column types into a strings column. diff --git a/cpp/src/utilities/traits.cpp b/cpp/src/utilities/traits.cpp index bc10dd7845a..b0078ff85a2 100644 --- a/cpp/src/utilities/traits.cpp +++ b/cpp/src/utilities/traits.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -158,6 +158,19 @@ struct is_integral_impl { bool is_integral(data_type type) { return cudf::type_dispatcher(type, is_integral_impl{}); } +struct is_integral_not_bool_impl { + template + constexpr bool operator()() + { + return is_integral_not_bool(); + } +}; + +bool is_integral_not_bool(data_type type) +{ + return cudf::type_dispatcher(type, is_integral_not_bool_impl{}); +} + struct is_floating_point_impl { template constexpr bool operator()() diff --git a/cpp/tests/strings/integers_tests.cpp b/cpp/tests/strings/integers_tests.cpp index 59805f9cb6d..c8f292f55b2 100644 --- a/cpp/tests/strings/integers_tests.cpp +++ b/cpp/tests/strings/integers_tests.cpp @@ -456,3 +456,29 @@ TEST_F(StringsConvertTest, IntegerToHexWithNull) auto results = cudf::strings::integers_to_hex(integers); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); } + +TEST_F(StringsConvertTest, IntegerConvertErrors) +{ + cudf::test::fixed_width_column_wrapper bools( + {true, true, false, false, true, true, false, true}); + cudf::test::fixed_width_column_wrapper floats( + {123456.0, -1.0, 0.0, 0.0, 12.0, 12345.0, 123456789.0}); + EXPECT_THROW(cudf::strings::integers_to_hex(bools), cudf::logic_error); + EXPECT_THROW(cudf::strings::integers_to_hex(floats), cudf::logic_error); + EXPECT_THROW(cudf::strings::from_integers(bools), cudf::logic_error); + EXPECT_THROW(cudf::strings::from_integers(floats), cudf::logic_error); + + auto input = cudf::test::strings_column_wrapper({"123456", "-1", "0"}); + auto view = cudf::strings_column_view(input); + EXPECT_THROW(cudf::strings::to_integers(view, cudf::data_type(cudf::type_id::BOOL8)), + cudf::logic_error); + EXPECT_THROW(cudf::strings::to_integers(view, cudf::data_type(cudf::type_id::FLOAT32)), + cudf::logic_error); + EXPECT_THROW(cudf::strings::to_integers(view, cudf::data_type(cudf::type_id::TIMESTAMP_SECONDS)), + cudf::logic_error); + EXPECT_THROW( + cudf::strings::to_integers(view, cudf::data_type(cudf::type_id::DURATION_MILLISECONDS)), + cudf::logic_error); + EXPECT_THROW(cudf::strings::to_integers(view, cudf::data_type(cudf::type_id::DECIMAL32)), + cudf::logic_error); +} From d590e0bde9389b8a403b2b7ae4c5372ae6728016 Mon Sep 17 00:00:00 2001 From: David Wendt <45795991+davidwendt@users.noreply.github.com> Date: Mon, 16 Oct 2023 10:37:00 -0400 Subject: [PATCH 149/150] Expose stream parameter in public strings convert APIs (#14255) Add stream parameter to public APIs: - `cudf::strings::to_booleans()` - `cudf::strings::from_booleans()` - `cudf::strings::to_timestamps()` - `cudf::strings::from_timestamps()` - `cudf::strings::is_timestamp()` - `cudf::strings::to_durations()` - `cudf::strings::from_durations()` - `cudf::strings::to_fixed_point()` - `cudf::strings::from_fixed_point()` - `cudf::strings::to_floats()` - `cudf::strings::from_floats()` - `cudf::strings::is_float()` - `cudf::strings::to_integers()` - `cudf::strings::from_integers()` - `cudf::strings::is_integer()` - `cudf::strings::hex_to_integers()` - `cudf::strings::integers_to_hex()` - `cudf::strings::is_hex()` - `cudf::strings::ipv4_to_integers()` - `cudf::strings::integers_to_ipv4()` - `cudf::strings::is_ipv4()` - `cudf::strings::url_encode()` - `cudf::strings::url_decode()` - `cudf::strings::format_list_column()` Also cleaned up some of the doxygen comments and removed some default parameters. Reference #13744 Authors: - David Wendt (https://github.com/davidwendt) Approvers: - MithunR (https://github.com/mythrocks) - Nghia Truong (https://github.com/ttnghia) URL: https://github.com/rapidsai/cudf/pull/14255 --- .../cudf/strings/convert/convert_booleans.hpp | 32 ++-- .../cudf/strings/convert/convert_datetime.hpp | 34 ++-- .../strings/convert/convert_durations.hpp | 26 ++-- .../strings/convert/convert_fixed_point.hpp | 30 ++-- .../cudf/strings/convert/convert_floats.hpp | 30 ++-- .../cudf/strings/convert/convert_integers.hpp | 72 +++++---- .../cudf/strings/convert/convert_ipv4.hpp | 30 ++-- .../cudf/strings/convert/convert_lists.hpp | 14 +- .../cudf/strings/convert/convert_urls.hpp | 22 +-- cpp/src/strings/convert/convert_booleans.cu | 20 +-- cpp/src/strings/convert/convert_datetime.cu | 9 +- cpp/src/strings/convert/convert_durations.cu | 20 +-- .../strings/convert/convert_fixed_point.cu | 11 +- cpp/src/strings/convert/convert_floats.cu | 42 ++--- cpp/src/strings/convert/convert_hex.cu | 9 +- cpp/src/strings/convert/convert_integers.cu | 64 ++++---- cpp/src/strings/convert/convert_ipv4.cu | 39 ++--- cpp/src/strings/convert/convert_lists.cu | 3 +- cpp/src/strings/convert/convert_urls.cu | 10 +- cpp/tests/CMakeLists.txt | 1 + cpp/tests/streams/strings/convert_test.cpp | 146 ++++++++++++++++++ cpp/tests/strings/booleans_tests.cpp | 33 +++- cpp/tests/strings/format_lists_tests.cpp | 9 +- java/src/main/native/src/ColumnViewJni.cpp | 11 +- 24 files changed, 487 insertions(+), 230 deletions(-) create mode 100644 cpp/tests/streams/strings/convert_test.cpp diff --git a/cpp/include/cudf/strings/convert/convert_booleans.hpp b/cpp/include/cudf/strings/convert/convert_booleans.hpp index ab63503f166..9e9f25e800a 100644 --- a/cpp/include/cudf/strings/convert/convert_booleans.hpp +++ b/cpp/include/cudf/strings/convert/convert_booleans.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -35,14 +35,16 @@ namespace strings { * * Any null entries will result in corresponding null entries in the output column. * - * @param strings Strings instance for this operation. - * @param true_string String to expect for true. Non-matching strings are false. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New BOOL8 column converted from strings. + * @param input Strings instance for this operation + * @param true_string String to expect for true. Non-matching strings are false + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New BOOL8 column converted from strings */ std::unique_ptr to_booleans( - strings_column_view const& strings, - string_scalar const& true_string = string_scalar("true"), + strings_column_view const& input, + string_scalar const& true_string, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -53,16 +55,18 @@ std::unique_ptr to_booleans( * * @throw cudf::logic_error if the input column is not BOOL8 type. * - * @param booleans Boolean column to convert. - * @param true_string String to use for true in the output column. - * @param false_string String to use for false in the output column. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New strings column. + * @param booleans Boolean column to convert + * @param true_string String to use for true in the output column + * @param false_string String to use for false in the output column + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New strings column */ std::unique_ptr from_booleans( column_view const& booleans, - string_scalar const& true_string = string_scalar("true"), - string_scalar const& false_string = string_scalar("false"), + string_scalar const& true_string, + string_scalar const& false_string, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of doxygen group diff --git a/cpp/include/cudf/strings/convert/convert_datetime.hpp b/cpp/include/cudf/strings/convert/convert_datetime.hpp index fa729d26734..81cce14b53b 100644 --- a/cpp/include/cudf/strings/convert/convert_datetime.hpp +++ b/cpp/include/cudf/strings/convert/convert_datetime.hpp @@ -77,16 +77,18 @@ namespace strings { * * @throw cudf::logic_error if timestamp_type is not a timestamp type. * - * @param strings Strings instance for this operation. - * @param timestamp_type The timestamp type used for creating the output column. - * @param format String specifying the timestamp format in strings. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New datetime column. + * @param input Strings instance for this operation + * @param timestamp_type The timestamp type used for creating the output column + * @param format String specifying the timestamp format in strings + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New datetime column */ std::unique_ptr to_timestamps( - strings_column_view const& strings, + strings_column_view const& input, data_type timestamp_type, std::string_view format, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -124,14 +126,16 @@ std::unique_ptr to_timestamps( * This will return a column of type BOOL8 where a `true` row indicates the corresponding * input string can be parsed correctly with the given format. * - * @param strings Strings instance for this operation. - * @param format String specifying the timestamp format in strings. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New BOOL8 column. + * @param input Strings instance for this operation + * @param format String specifying the timestamp format in strings + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New BOOL8 column */ std::unique_ptr is_timestamp( - strings_column_view const& strings, + strings_column_view const& input, std::string_view format, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -231,19 +235,21 @@ std::unique_ptr is_timestamp( * @throw cudf::logic_error if the `format` string is empty * @throw cudf::logic_error if `names.size()` is an invalid size. Must be 0 or 40 strings. * - * @param timestamps Timestamp values to convert. + * @param timestamps Timestamp values to convert * @param format The string specifying output format. * Default format is "%Y-%m-%dT%H:%M:%SZ". * @param names The string names to use for weekdays ("%a", "%A") and months ("%b", "%B") * Default is an empty `strings_column_view`. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New strings column with formatted timestamps. + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New strings column with formatted timestamps */ std::unique_ptr from_timestamps( column_view const& timestamps, std::string_view format = "%Y-%m-%dT%H:%M:%SZ", strings_column_view const& names = strings_column_view(column_view{ data_type{type_id::STRING}, 0, nullptr, nullptr, 0}), + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of doxygen group diff --git a/cpp/include/cudf/strings/convert/convert_durations.hpp b/cpp/include/cudf/strings/convert/convert_durations.hpp index e915ec26279..a1f4e4ead1d 100644 --- a/cpp/include/cudf/strings/convert/convert_durations.hpp +++ b/cpp/include/cudf/strings/convert/convert_durations.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022, NVIDIA CORPORATION. + * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -65,16 +65,18 @@ namespace strings { * * @throw cudf::logic_error if duration_type is not a duration type. * - * @param strings Strings instance for this operation. - * @param duration_type The duration type used for creating the output column. - * @param format String specifying the duration format in strings. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New duration column. + * @param input Strings instance for this operation + * @param duration_type The duration type used for creating the output column + * @param format String specifying the duration format in strings + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New duration column */ std::unique_ptr to_durations( - strings_column_view const& strings, + strings_column_view const& input, data_type duration_type, std::string_view format, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -115,15 +117,17 @@ std::unique_ptr to_durations( * * @throw cudf::logic_error if `durations` column parameter is not a duration type. * - * @param durations Duration values to convert. + * @param durations Duration values to convert * @param format The string specifying output format. - * Default format is ""%d days %H:%M:%S". - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New strings column with formatted durations. + * Default format is ""%D days %H:%M:%S". + * @param mr Device memory resource used to allocate the returned column's device memory + * @param stream CUDA stream used for device memory operations and kernel launches + * @return New strings column with formatted durations */ std::unique_ptr from_durations( column_view const& durations, std::string_view format = "%D days %H:%M:%S", + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of doxygen group diff --git a/cpp/include/cudf/strings/convert/convert_fixed_point.hpp b/cpp/include/cudf/strings/convert/convert_fixed_point.hpp index 3852dc8e81a..8f37715967a 100644 --- a/cpp/include/cudf/strings/convert/convert_fixed_point.hpp +++ b/cpp/include/cudf/strings/convert/convert_fixed_point.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -53,14 +53,16 @@ namespace strings { * * @throw cudf::logic_error if `output_type` is not a fixed-point decimal type. * - * @param input Strings instance for this operation. - * @param output_type Type of fixed-point column to return including the scale value. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New column of `output_type`. + * @param input Strings instance for this operation + * @param output_type Type of fixed-point column to return including the scale value + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New column of `output_type` */ std::unique_ptr to_fixed_point( strings_column_view const& input, data_type output_type, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -83,12 +85,14 @@ std::unique_ptr to_fixed_point( * * @throw cudf::logic_error if the `input` column is not a fixed-point decimal type. * - * @param input Fixed-point column to convert. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New strings column. + * @param input Fixed-point column to convert + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New strings column */ std::unique_ptr from_fixed_point( column_view const& input, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -111,14 +115,16 @@ std::unique_ptr from_fixed_point( * * @throw cudf::logic_error if the `decimal_type` is not a fixed-point decimal type. * - * @param input Strings instance for this operation. - * @param decimal_type Fixed-point type (with scale) used only for checking overflow. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New column of boolean results for each string. + * @param input Strings instance for this operation + * @param decimal_type Fixed-point type (with scale) used only for checking overflow + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New column of boolean results for each string */ std::unique_ptr is_fixed_point( strings_column_view const& input, data_type decimal_type = data_type{type_id::DECIMAL64}, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of doxygen group diff --git a/cpp/include/cudf/strings/convert/convert_floats.hpp b/cpp/include/cudf/strings/convert/convert_floats.hpp index 38a84fc1548..a35cb68ef4e 100644 --- a/cpp/include/cudf/strings/convert/convert_floats.hpp +++ b/cpp/include/cudf/strings/convert/convert_floats.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. + * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -39,14 +39,16 @@ namespace strings { * * @throw cudf::logic_error if output_type is not float type. * - * @param strings Strings instance for this operation. - * @param output_type Type of float numeric column to return. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New column with floats converted from strings. + * @param strings Strings instance for this operation + * @param output_type Type of float numeric column to return + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New column with floats converted from strings */ std::unique_ptr to_floats( strings_column_view const& strings, data_type output_type, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -62,12 +64,14 @@ std::unique_ptr to_floats( * * @throw cudf::logic_error if floats column is not float type. * - * @param floats Numeric column to convert. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New strings column with floats as strings. + * @param floats Numeric column to convert + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New strings column with floats as strings */ std::unique_ptr from_floats( column_view const& floats, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -86,12 +90,14 @@ std::unique_ptr from_floats( * * Any null row results in a null entry for that row in the output column. * - * @param strings Strings instance for this operation. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New column of boolean results for each string. + * @param input Strings instance for this operation + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New column of boolean results for each string */ std::unique_ptr is_float( - strings_column_view const& strings, + strings_column_view const& input, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of doxygen group diff --git a/cpp/include/cudf/strings/convert/convert_integers.hpp b/cpp/include/cudf/strings/convert/convert_integers.hpp index 756ce48645d..74ec5d315a2 100644 --- a/cpp/include/cudf/strings/convert/convert_integers.hpp +++ b/cpp/include/cudf/strings/convert/convert_integers.hpp @@ -46,14 +46,16 @@ namespace strings { * * @throw cudf::logic_error if output_type is not integral type. * - * @param strings Strings instance for this operation. - * @param output_type Type of integer numeric column to return. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New column with integers converted from strings. + * @param input Strings instance for this operation + * @param output_type Type of integer numeric column to return + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New column with integers converted from strings */ std::unique_ptr to_integers( - strings_column_view const& strings, + strings_column_view const& input, data_type output_type, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -67,12 +69,14 @@ std::unique_ptr to_integers( * * @throw cudf::logic_error if integers column is not integral type. * - * @param integers Numeric column to convert. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New strings column with integers as strings. + * @param integers Numeric column to convert + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New strings column with integers as strings */ std::unique_ptr from_integers( column_view const& integers, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -94,12 +98,14 @@ std::unique_ptr from_integers( * * Any null row results in a null entry for that row in the output column. * - * @param strings Strings instance for this operation. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New column of boolean results for each string. + * @param input Strings instance for this operation + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New column of boolean results for each string */ std::unique_ptr is_integer( - strings_column_view const& strings, + strings_column_view const& input, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -124,14 +130,16 @@ std::unique_ptr is_integer( * * Any null row results in a null entry for that row in the output column. * - * @param strings Strings instance for this operation. - * @param int_type Integer type used for checking underflow and overflow. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New column of boolean results for each string. + * @param input Strings instance for this operation + * @param int_type Integer type used for checking underflow and overflow + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New column of boolean results for each string */ std::unique_ptr is_integer( - strings_column_view const& strings, + strings_column_view const& input, data_type int_type, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -152,14 +160,16 @@ std::unique_ptr is_integer( * * @throw cudf::logic_error if output_type is not integral type. * - * @param strings Strings instance for this operation. - * @param output_type Type of integer numeric column to return. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New column with integers converted from strings. + * @param input Strings instance for this operation + * @param output_type Type of integer numeric column to return + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New column with integers converted from strings */ std::unique_ptr hex_to_integers( - strings_column_view const& strings, + strings_column_view const& input, data_type output_type, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -179,12 +189,14 @@ std::unique_ptr hex_to_integers( * * Any null row results in a null entry for that row in the output column. * - * @param strings Strings instance for this operation. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New column of boolean results for each string. + * @param input Strings instance for this operation + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New column of boolean results for each string */ std::unique_ptr is_hex( - strings_column_view const& strings, + strings_column_view const& input, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -210,12 +222,14 @@ std::unique_ptr is_hex( * * @throw cudf::logic_error if the input column is not integral type. * - * @param input Integer column to convert to hex. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New strings column with hexadecimal characters. + * @param input Integer column to convert to hex + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New strings column with hexadecimal characters */ std::unique_ptr integers_to_hex( column_view const& input, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of doxygen group diff --git a/cpp/include/cudf/strings/convert/convert_ipv4.hpp b/cpp/include/cudf/strings/convert/convert_ipv4.hpp index 22272af74fc..25ad7b86748 100644 --- a/cpp/include/cudf/strings/convert/convert_ipv4.hpp +++ b/cpp/include/cudf/strings/convert/convert_ipv4.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -48,12 +48,14 @@ namespace strings { * * Any null entries will result in corresponding null entries in the output column. * - * @param strings Strings instance for this operation. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New INT64 column converted from strings. + * @param input Strings instance for this operation + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New INT64 column converted from strings */ std::unique_ptr ipv4_to_integers( - strings_column_view const& strings, + strings_column_view const& input, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -71,12 +73,14 @@ std::unique_ptr ipv4_to_integers( * * @throw cudf::logic_error if the input column is not INT64 type. * - * @param integers Integer (INT64) column to convert. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New strings column. + * @param integers Integer (INT64) column to convert + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New strings column */ std::unique_ptr integers_to_ipv4( column_view const& integers, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -96,12 +100,14 @@ std::unique_ptr integers_to_ipv4( * * Any null row results in a null entry for that row in the output column. * - * @param strings Strings instance for this operation. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New column of boolean results for each string. + * @param input Strings instance for this operation + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New column of boolean results for each string */ std::unique_ptr is_ipv4( - strings_column_view const& strings, + strings_column_view const& input, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of doxygen group diff --git a/cpp/include/cudf/strings/convert/convert_lists.hpp b/cpp/include/cudf/strings/convert/convert_lists.hpp index 7ab1bf47b0a..dedf4e95138 100644 --- a/cpp/include/cudf/strings/convert/convert_lists.hpp +++ b/cpp/include/cudf/strings/convert/convert_lists.hpp @@ -50,17 +50,19 @@ namespace strings { * * @throw cudf::logic_error if the input column is not a LIST type with a STRING child. * - * @param input Lists column to format. - * @param na_rep Replacement string for null elements. - * @param separators Strings to use for enclosing list components and separating elements. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New strings column. + * @param input Lists column to format + * @param na_rep Replacement string for null elements + * @param separators Strings to use for enclosing list components and separating elements + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New strings column */ std::unique_ptr format_list_column( lists_column_view const& input, - string_scalar const& na_rep = string_scalar("NULL"), + string_scalar const& na_rep = string_scalar(""), strings_column_view const& separators = strings_column_view(column_view{ data_type{type_id::STRING}, 0, nullptr, nullptr, 0}), + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of doxygen group diff --git a/cpp/include/cudf/strings/convert/convert_urls.hpp b/cpp/include/cudf/strings/convert/convert_urls.hpp index 7f29a0d2149..902835081af 100644 --- a/cpp/include/cudf/strings/convert/convert_urls.hpp +++ b/cpp/include/cudf/strings/convert/convert_urls.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, NVIDIA CORPORATION. + * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -39,12 +39,14 @@ namespace strings { * * Any null entries will result in corresponding null entries in the output column. * - * @param strings Strings instance for this operation. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New strings column. + * @param input Strings instance for this operation + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New strings column */ std::unique_ptr url_encode( - strings_column_view const& strings, + strings_column_view const& input, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @@ -60,12 +62,14 @@ std::unique_ptr url_encode( * * Any null entries will result in corresponding null entries in the output column. * - * @param strings Strings instance for this operation. - * @param mr Device memory resource used to allocate the returned column's device memory. - * @return New strings column. + * @param input Strings instance for this operation + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned column's device memory + * @return New strings column */ std::unique_ptr url_decode( - strings_column_view const& strings, + strings_column_view const& input, + rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of doxygen group diff --git a/cpp/src/strings/convert/convert_booleans.cu b/cpp/src/strings/convert/convert_booleans.cu index 0d04fc74b0c..8196e1d90fb 100644 --- a/cpp/src/strings/convert/convert_booleans.cu +++ b/cpp/src/strings/convert/convert_booleans.cu @@ -39,25 +39,25 @@ namespace cudf { namespace strings { namespace detail { // Convert strings column to boolean column -std::unique_ptr to_booleans(strings_column_view const& strings, +std::unique_ptr to_booleans(strings_column_view const& input, string_scalar const& true_string, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { - size_type strings_count = strings.size(); + size_type strings_count = input.size(); if (strings_count == 0) return make_numeric_column(data_type{type_id::BOOL8}, 0); CUDF_EXPECTS(true_string.is_valid(stream) && true_string.size() > 0, "Parameter true_string must not be empty."); auto d_true = string_view(true_string.data(), true_string.size()); - auto strings_column = column_device_view::create(strings.parent(), stream); + auto strings_column = column_device_view::create(input.parent(), stream); auto d_strings = *strings_column; // create output column copying the strings' null-mask auto results = make_numeric_column(data_type{type_id::BOOL8}, strings_count, - cudf::detail::copy_bitmask(strings.parent(), stream, mr), - strings.null_count(), + cudf::detail::copy_bitmask(input.parent(), stream, mr), + input.null_count(), stream, mr); auto results_view = results->mutable_view(); @@ -73,19 +73,20 @@ std::unique_ptr to_booleans(strings_column_view const& strings, result = d_strings.element(idx).compare(d_true) == 0; return result; }); - results->set_null_count(strings.null_count()); + results->set_null_count(input.null_count()); return results; } } // namespace detail // external API -std::unique_ptr to_booleans(strings_column_view const& strings, +std::unique_ptr to_booleans(strings_column_view const& input, string_scalar const& true_string, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::to_booleans(strings, true_string, cudf::get_default_stream(), mr); + return detail::to_booleans(input, true_string, stream, mr); } namespace detail { @@ -156,10 +157,11 @@ std::unique_ptr from_booleans(column_view const& booleans, std::unique_ptr from_booleans(column_view const& booleans, string_scalar const& true_string, string_scalar const& false_string, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::from_booleans(booleans, true_string, false_string, cudf::get_default_stream(), mr); + return detail::from_booleans(booleans, true_string, false_string, stream, mr); } } // namespace strings diff --git a/cpp/src/strings/convert/convert_datetime.cu b/cpp/src/strings/convert/convert_datetime.cu index 8a953d778ed..d2609441d72 100644 --- a/cpp/src/strings/convert/convert_datetime.cu +++ b/cpp/src/strings/convert/convert_datetime.cu @@ -710,18 +710,20 @@ std::unique_ptr is_timestamp(strings_column_view const& input, std::unique_ptr to_timestamps(strings_column_view const& input, data_type timestamp_type, std::string_view format, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::to_timestamps(input, timestamp_type, format, cudf::get_default_stream(), mr); + return detail::to_timestamps(input, timestamp_type, format, stream, mr); } std::unique_ptr is_timestamp(strings_column_view const& input, std::string_view format, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::is_timestamp(input, format, cudf::get_default_stream(), mr); + return detail::is_timestamp(input, format, stream, mr); } namespace detail { @@ -1168,10 +1170,11 @@ std::unique_ptr from_timestamps(column_view const& timestamps, std::unique_ptr from_timestamps(column_view const& timestamps, std::string_view format, strings_column_view const& names, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::from_timestamps(timestamps, format, names, cudf::get_default_stream(), mr); + return detail::from_timestamps(timestamps, format, names, stream, mr); } } // namespace strings diff --git a/cpp/src/strings/convert/convert_durations.cu b/cpp/src/strings/convert/convert_durations.cu index 6ab70825a6b..e781581b378 100644 --- a/cpp/src/strings/convert/convert_durations.cu +++ b/cpp/src/strings/convert/convert_durations.cu @@ -690,30 +690,30 @@ std::unique_ptr from_durations(column_view const& durations, durations.type(), dispatch_from_durations_fn{}, durations, format, stream, mr); } -std::unique_ptr to_durations(strings_column_view const& strings, +std::unique_ptr to_durations(strings_column_view const& input, data_type duration_type, std::string_view format, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { - size_type strings_count = strings.size(); + size_type strings_count = input.size(); if (strings_count == 0) return make_duration_column(duration_type, 0); CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty."); - auto strings_column = column_device_view::create(strings.parent(), stream); + auto strings_column = column_device_view::create(input.parent(), stream); auto d_column = *strings_column; auto results = make_duration_column(duration_type, strings_count, - cudf::detail::copy_bitmask(strings.parent(), stream, mr), - strings.null_count(), + cudf::detail::copy_bitmask(input.parent(), stream, mr), + input.null_count(), stream, mr); auto results_view = results->mutable_view(); cudf::type_dispatcher( duration_type, dispatch_to_durations_fn(), d_column, format, results_view, stream); - results->set_null_count(strings.null_count()); + results->set_null_count(input.null_count()); return results; } @@ -721,19 +721,21 @@ std::unique_ptr to_durations(strings_column_view const& strings, std::unique_ptr from_durations(column_view const& durations, std::string_view format, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::from_durations(durations, format, cudf::get_default_stream(), mr); + return detail::from_durations(durations, format, stream, mr); } -std::unique_ptr to_durations(strings_column_view const& strings, +std::unique_ptr to_durations(strings_column_view const& input, data_type duration_type, std::string_view format, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::to_durations(strings, duration_type, format, cudf::get_default_stream(), mr); + return detail::to_durations(input, duration_type, format, stream, mr); } } // namespace strings diff --git a/cpp/src/strings/convert/convert_fixed_point.cu b/cpp/src/strings/convert/convert_fixed_point.cu index 51aab9faeba..2c59f6dcd29 100644 --- a/cpp/src/strings/convert/convert_fixed_point.cu +++ b/cpp/src/strings/convert/convert_fixed_point.cu @@ -184,12 +184,13 @@ std::unique_ptr to_fixed_point(strings_column_view const& input, } // namespace detail // external API -std::unique_ptr to_fixed_point(strings_column_view const& strings, +std::unique_ptr to_fixed_point(strings_column_view const& input, data_type output_type, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::to_fixed_point(strings, output_type, cudf::get_default_stream(), mr); + return detail::to_fixed_point(input, output_type, stream, mr); } namespace detail { @@ -277,10 +278,11 @@ std::unique_ptr from_fixed_point(column_view const& input, // external API std::unique_ptr from_fixed_point(column_view const& input, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::from_fixed_point(input, cudf::get_default_stream(), mr); + return detail::from_fixed_point(input, stream, mr); } namespace detail { @@ -341,10 +343,11 @@ std::unique_ptr is_fixed_point(strings_column_view const& input, std::unique_ptr is_fixed_point(strings_column_view const& input, data_type decimal_type, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::is_fixed_point(input, decimal_type, cudf::get_default_stream(), mr); + return detail::is_fixed_point(input, decimal_type, stream, mr); } } // namespace strings diff --git a/cpp/src/strings/convert/convert_floats.cu b/cpp/src/strings/convert/convert_floats.cu index 32167589ab4..81d686d690c 100644 --- a/cpp/src/strings/convert/convert_floats.cu +++ b/cpp/src/strings/convert/convert_floats.cu @@ -91,26 +91,26 @@ struct dispatch_to_floats_fn { } // namespace // This will convert a strings column into any float column type. -std::unique_ptr to_floats(strings_column_view const& strings, +std::unique_ptr to_floats(strings_column_view const& input, data_type output_type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { - size_type strings_count = strings.size(); + size_type strings_count = input.size(); if (strings_count == 0) return make_numeric_column(output_type, 0); - auto strings_column = column_device_view::create(strings.parent(), stream); + auto strings_column = column_device_view::create(input.parent(), stream); auto d_strings = *strings_column; // create float output column copying the strings null-mask auto results = make_numeric_column(output_type, strings_count, - cudf::detail::copy_bitmask(strings.parent(), stream, mr), - strings.null_count(), + cudf::detail::copy_bitmask(input.parent(), stream, mr), + input.null_count(), stream, mr); auto results_view = results->mutable_view(); // fill output column with floats type_dispatcher(output_type, dispatch_to_floats_fn{}, d_strings, results_view, stream); - results->set_null_count(strings.null_count()); + results->set_null_count(input.null_count()); return results; } @@ -118,12 +118,13 @@ std::unique_ptr to_floats(strings_column_view const& strings, // external API -std::unique_ptr to_floats(strings_column_view const& strings, +std::unique_ptr to_floats(strings_column_view const& input, data_type output_type, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::to_floats(strings, output_type, cudf::get_default_stream(), mr); + return detail::to_floats(input, output_type, stream, mr); } namespace detail { @@ -436,48 +437,51 @@ std::unique_ptr from_floats(column_view const& floats, } // namespace detail // external API -std::unique_ptr from_floats(column_view const& floats, rmm::mr::device_memory_resource* mr) +std::unique_ptr from_floats(column_view const& floats, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::from_floats(floats, cudf::get_default_stream(), mr); + return detail::from_floats(floats, stream, mr); } namespace detail { -std::unique_ptr is_float(strings_column_view const& strings, +std::unique_ptr is_float(strings_column_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { - auto strings_column = column_device_view::create(strings.parent(), stream); + auto strings_column = column_device_view::create(input.parent(), stream); auto d_column = *strings_column; // create output column auto results = make_numeric_column(data_type{type_id::BOOL8}, - strings.size(), - cudf::detail::copy_bitmask(strings.parent(), stream, mr), - strings.null_count(), + input.size(), + cudf::detail::copy_bitmask(input.parent(), stream, mr), + input.null_count(), stream, mr); auto d_results = results->mutable_view().data(); // check strings for valid float chars thrust::transform(rmm::exec_policy(stream), thrust::make_counting_iterator(0), - thrust::make_counting_iterator(strings.size()), + thrust::make_counting_iterator(input.size()), d_results, [d_column] __device__(size_type idx) { if (d_column.is_null(idx)) return false; return is_float(d_column.element(idx)); }); - results->set_null_count(strings.null_count()); + results->set_null_count(input.null_count()); return results; } } // namespace detail // external API -std::unique_ptr is_float(strings_column_view const& strings, +std::unique_ptr is_float(strings_column_view const& input, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::is_float(strings, cudf::get_default_stream(), mr); + return detail::is_float(input, stream, mr); } } // namespace strings diff --git a/cpp/src/strings/convert/convert_hex.cu b/cpp/src/strings/convert/convert_hex.cu index f5bdbcbd199..8f656b149a5 100644 --- a/cpp/src/strings/convert/convert_hex.cu +++ b/cpp/src/strings/convert/convert_hex.cu @@ -275,24 +275,27 @@ std::unique_ptr integers_to_hex(column_view const& input, // external API std::unique_ptr hex_to_integers(strings_column_view const& strings, data_type output_type, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::hex_to_integers(strings, output_type, cudf::get_default_stream(), mr); + return detail::hex_to_integers(strings, output_type, stream, mr); } std::unique_ptr is_hex(strings_column_view const& strings, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::is_hex(strings, cudf::get_default_stream(), mr); + return detail::is_hex(strings, stream, mr); } std::unique_ptr integers_to_hex(column_view const& input, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::integers_to_hex(input, cudf::get_default_stream(), mr); + return detail::integers_to_hex(input, stream, mr); } } // namespace strings diff --git a/cpp/src/strings/convert/convert_integers.cu b/cpp/src/strings/convert/convert_integers.cu index 2c21fc5d790..4839e83d5dd 100644 --- a/cpp/src/strings/convert/convert_integers.cu +++ b/cpp/src/strings/convert/convert_integers.cu @@ -112,20 +112,20 @@ inline __device__ bool is_integer(string_view const& d_str) */ struct dispatch_is_integer_fn { template ()>* = nullptr> - std::unique_ptr operator()(strings_column_view const& strings, + std::unique_ptr operator()(strings_column_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { - auto const d_column = column_device_view::create(strings.parent(), stream); + auto const d_column = column_device_view::create(input.parent(), stream); auto results = make_numeric_column(data_type{type_id::BOOL8}, - strings.size(), - cudf::detail::copy_bitmask(strings.parent(), stream, mr), - strings.null_count(), + input.size(), + cudf::detail::copy_bitmask(input.parent(), stream, mr), + input.null_count(), stream, mr); auto d_results = results->mutable_view().data(); - if (strings.has_nulls()) { + if (input.has_nulls()) { thrust::transform(rmm::exec_policy(stream), d_column->pair_begin(), d_column->pair_end(), @@ -140,7 +140,7 @@ struct dispatch_is_integer_fn { } // Calling mutable_view() on a column invalidates it's null count so we need to set it back - results->set_null_count(strings.null_count()); + results->set_null_count(input.null_count()); return results; } @@ -156,20 +156,20 @@ struct dispatch_is_integer_fn { } // namespace -std::unique_ptr is_integer(strings_column_view const& strings, +std::unique_ptr is_integer(strings_column_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { - auto const d_column = column_device_view::create(strings.parent(), stream); + auto const d_column = column_device_view::create(input.parent(), stream); auto results = make_numeric_column(data_type{type_id::BOOL8}, - strings.size(), - cudf::detail::copy_bitmask(strings.parent(), stream, mr), - strings.null_count(), + input.size(), + cudf::detail::copy_bitmask(input.parent(), stream, mr), + input.null_count(), stream, mr); auto d_results = results->mutable_view().data(); - if (strings.has_nulls()) { + if (input.has_nulls()) { thrust::transform( rmm::exec_policy(stream), d_column->pair_begin(), @@ -185,36 +185,38 @@ std::unique_ptr is_integer(strings_column_view const& strings, } // Calling mutable_view() on a column invalidates it's null count so we need to set it back - results->set_null_count(strings.null_count()); + results->set_null_count(input.null_count()); return results; } -std::unique_ptr is_integer(strings_column_view const& strings, +std::unique_ptr is_integer(strings_column_view const& input, data_type int_type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { - if (strings.is_empty()) { return cudf::make_empty_column(type_id::BOOL8); } - return type_dispatcher(int_type, dispatch_is_integer_fn{}, strings, stream, mr); + if (input.is_empty()) { return cudf::make_empty_column(type_id::BOOL8); } + return type_dispatcher(int_type, dispatch_is_integer_fn{}, input, stream, mr); } } // namespace detail // external APIs -std::unique_ptr is_integer(strings_column_view const& strings, +std::unique_ptr is_integer(strings_column_view const& input, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::is_integer(strings, cudf::get_default_stream(), mr); + return detail::is_integer(input, stream, mr); } -std::unique_ptr is_integer(strings_column_view const& strings, +std::unique_ptr is_integer(strings_column_view const& input, data_type int_type, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::is_integer(strings, int_type, cudf::get_default_stream(), mr); + return detail::is_integer(input, int_type, stream, mr); } namespace detail { @@ -266,28 +268,28 @@ struct dispatch_to_integers_fn { } // namespace // This will convert a strings column into any integer column type. -std::unique_ptr to_integers(strings_column_view const& strings, +std::unique_ptr to_integers(strings_column_view const& input, data_type output_type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { - size_type strings_count = strings.size(); + size_type strings_count = input.size(); if (strings_count == 0) return make_numeric_column(output_type, 0); // Create integer output column copying the strings null-mask auto results = make_numeric_column(output_type, strings_count, - cudf::detail::copy_bitmask(strings.parent(), stream, mr), - strings.null_count(), + cudf::detail::copy_bitmask(input.parent(), stream, mr), + input.null_count(), stream, mr); // Fill output column with integers - auto const strings_dev_view = column_device_view::create(strings.parent(), stream); + auto const strings_dev_view = column_device_view::create(input.parent(), stream); auto results_view = results->mutable_view(); type_dispatcher(output_type, dispatch_to_integers_fn{}, *strings_dev_view, results_view, stream); // Calling mutable_view() on a column invalidates it's null count so we need to set it back - results->set_null_count(strings.null_count()); + results->set_null_count(input.null_count()); return results; } @@ -295,12 +297,13 @@ std::unique_ptr to_integers(strings_column_view const& strings, } // namespace detail // external API -std::unique_ptr to_integers(strings_column_view const& strings, +std::unique_ptr to_integers(strings_column_view const& input, data_type output_type, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::to_integers(strings, output_type, cudf::get_default_stream(), mr); + return detail::to_integers(input, output_type, stream, mr); } namespace detail { @@ -393,10 +396,11 @@ std::unique_ptr from_integers(column_view const& integers, // external API std::unique_ptr from_integers(column_view const& integers, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::from_integers(integers, cudf::get_default_stream(), mr); + return detail::from_integers(integers, stream, mr); } } // namespace strings diff --git a/cpp/src/strings/convert/convert_ipv4.cu b/cpp/src/strings/convert/convert_ipv4.cu index adb72cb0263..07e4b3e5b17 100644 --- a/cpp/src/strings/convert/convert_ipv4.cu +++ b/cpp/src/strings/convert/convert_ipv4.cu @@ -72,19 +72,19 @@ struct ipv4_to_integers_fn { } // namespace // Convert strings column of IPv4 addresses to integers column -std::unique_ptr ipv4_to_integers(strings_column_view const& strings, +std::unique_ptr ipv4_to_integers(strings_column_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { - size_type strings_count = strings.size(); + size_type strings_count = input.size(); if (strings_count == 0) return make_numeric_column(data_type{type_id::INT64}, 0); - auto strings_column = column_device_view::create(strings.parent(), stream); + auto strings_column = column_device_view::create(input.parent(), stream); // create output column copying the strings' null-mask auto results = make_numeric_column(data_type{type_id::INT64}, strings_count, - cudf::detail::copy_bitmask(strings.parent(), stream, mr), - strings.null_count(), + cudf::detail::copy_bitmask(input.parent(), stream, mr), + input.null_count(), stream, mr); auto d_results = results->mutable_view().data(); @@ -95,18 +95,19 @@ std::unique_ptr ipv4_to_integers(strings_column_view const& strings, d_results, ipv4_to_integers_fn{*strings_column}); // done - results->set_null_count(strings.null_count()); + results->set_null_count(input.null_count()); return results; } } // namespace detail // external API -std::unique_ptr ipv4_to_integers(strings_column_view const& strings, +std::unique_ptr ipv4_to_integers(strings_column_view const& input, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::ipv4_to_integers(strings, cudf::get_default_stream(), mr); + return detail::ipv4_to_integers(input, stream, mr); } namespace detail { @@ -173,23 +174,23 @@ std::unique_ptr integers_to_ipv4(column_view const& integers, cudf::detail::copy_bitmask(integers, stream, mr)); } -std::unique_ptr is_ipv4(strings_column_view const& strings, +std::unique_ptr is_ipv4(strings_column_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { - auto strings_column = column_device_view::create(strings.parent(), stream); + auto strings_column = column_device_view::create(input.parent(), stream); auto d_column = *strings_column; // create output column auto results = make_numeric_column(data_type{type_id::BOOL8}, - strings.size(), - cudf::detail::copy_bitmask(strings.parent(), stream, mr), - strings.null_count(), + input.size(), + cudf::detail::copy_bitmask(input.parent(), stream, mr), + input.null_count(), stream, mr); auto d_results = results->mutable_view().data(); thrust::transform(rmm::exec_policy(stream), thrust::make_counting_iterator(0), - thrust::make_counting_iterator(strings.size()), + thrust::make_counting_iterator(input.size()), d_results, [d_column] __device__(size_type idx) { if (d_column.is_null(idx)) return false; @@ -214,7 +215,7 @@ std::unique_ptr is_ipv4(strings_column_view const& strings, return ip_vals[0] >= 0 && ip_vals[1] >= 0 && ip_vals[2] >= 0 && ip_vals[3] >= 0; }); - results->set_null_count(strings.null_count()); + results->set_null_count(input.null_count()); return results; } @@ -223,17 +224,19 @@ std::unique_ptr is_ipv4(strings_column_view const& strings, // external API std::unique_ptr integers_to_ipv4(column_view const& integers, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::integers_to_ipv4(integers, cudf::get_default_stream(), mr); + return detail::integers_to_ipv4(integers, stream, mr); } -std::unique_ptr is_ipv4(strings_column_view const& strings, +std::unique_ptr is_ipv4(strings_column_view const& input, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::is_ipv4(strings, cudf::get_default_stream(), mr); + return detail::is_ipv4(input, stream, mr); } } // namespace strings diff --git a/cpp/src/strings/convert/convert_lists.cu b/cpp/src/strings/convert/convert_lists.cu index 3aef37914fd..f9f2b91eb12 100644 --- a/cpp/src/strings/convert/convert_lists.cu +++ b/cpp/src/strings/convert/convert_lists.cu @@ -233,10 +233,11 @@ std::unique_ptr format_list_column(lists_column_view const& input, std::unique_ptr format_list_column(lists_column_view const& input, string_scalar const& na_rep, strings_column_view const& separators, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::format_list_column(input, na_rep, separators, cudf::get_default_stream(), mr); + return detail::format_list_column(input, na_rep, separators, stream, mr); } } // namespace strings diff --git a/cpp/src/strings/convert/convert_urls.cu b/cpp/src/strings/convert/convert_urls.cu index 9efa148cfd2..9e847131be2 100644 --- a/cpp/src/strings/convert/convert_urls.cu +++ b/cpp/src/strings/convert/convert_urls.cu @@ -148,11 +148,12 @@ std::unique_ptr url_encode(strings_column_view const& input, } // namespace detail // external API -std::unique_ptr url_encode(strings_column_view const& strings, +std::unique_ptr url_encode(strings_column_view const& input, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::url_encode(strings, cudf::get_default_stream(), mr); + return detail::url_encode(input, stream, mr); } namespace detail { @@ -428,11 +429,12 @@ std::unique_ptr url_decode(strings_column_view const& strings, // external API -std::unique_ptr url_decode(strings_column_view const& strings, +std::unique_ptr url_decode(strings_column_view const& input, + rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); - return detail::url_decode(strings, cudf::get_default_stream(), mr); + return detail::url_decode(input, stream, mr); } } // namespace strings diff --git a/cpp/tests/CMakeLists.txt b/cpp/tests/CMakeLists.txt index f36fcbc9246..3e30db7abcb 100644 --- a/cpp/tests/CMakeLists.txt +++ b/cpp/tests/CMakeLists.txt @@ -635,6 +635,7 @@ ConfigureTest(STREAM_DICTIONARY_TEST streams/dictionary_test.cpp STREAM_MODE tes ConfigureTest( STREAM_STRINGS_TEST streams/strings/case_test.cpp + streams/strings/convert_test.cpp streams/strings/find_test.cpp streams/strings/replace_test.cpp streams/strings/split_test.cpp diff --git a/cpp/tests/streams/strings/convert_test.cpp b/cpp/tests/streams/strings/convert_test.cpp new file mode 100644 index 00000000000..8dc3f625746 --- /dev/null +++ b/cpp/tests/streams/strings/convert_test.cpp @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +class StringsConvertTest : public cudf::test::BaseFixture {}; + +TEST_F(StringsConvertTest, Booleans) +{ + auto input = cudf::test::strings_column_wrapper({"true", "false", "True", ""}); + auto view = cudf::strings_column_view(input); + + auto true_scalar = cudf::string_scalar("true", true, cudf::test::get_default_stream()); + auto false_scalar = cudf::string_scalar("false", true, cudf::test::get_default_stream()); + + auto bools = cudf::strings::to_booleans(view, true_scalar, cudf::test::get_default_stream()); + cudf::strings::from_booleans( + bools->view(), true_scalar, false_scalar, cudf::test::get_default_stream()); +} + +TEST_F(StringsConvertTest, Timestamps) +{ + auto input = cudf::test::strings_column_wrapper({"2019-03-20T12:34:56Z", "2020-02-29T00:00:00Z"}); + auto view = cudf::strings_column_view(input); + + std::string format = "%Y-%m-%dT%H:%M:%SZ"; + auto dtype = cudf::data_type{cudf::type_id::TIMESTAMP_SECONDS}; + + cudf::strings::is_timestamp(view, format, cudf::test::get_default_stream()); + auto timestamps = + cudf::strings::to_timestamps(view, dtype, format, cudf::test::get_default_stream()); + + auto empty = cudf::test::strings_column_wrapper(); + cudf::strings::from_timestamps( + timestamps->view(), format, cudf::strings_column_view(empty), cudf::test::get_default_stream()); +} + +TEST_F(StringsConvertTest, Durations) +{ + auto input = cudf::test::strings_column_wrapper({"17975 days 12:34:56", "18321 days 00:00:00"}); + auto view = cudf::strings_column_view(input); + + std::string format = "%D days %H:%M:%S"; + auto dtype = cudf::data_type{cudf::type_id::DURATION_SECONDS}; + + auto durations = + cudf::strings::to_durations(view, dtype, format, cudf::test::get_default_stream()); + cudf::strings::from_durations(durations->view(), format, cudf::test::get_default_stream()); +} + +TEST_F(StringsConvertTest, FixedPoint) +{ + auto input = cudf::test::strings_column_wrapper({"1.234E3", "-876", "543.2"}); + auto view = cudf::strings_column_view(input); + + auto dtype = cudf::data_type{cudf::type_id::DECIMAL64, numeric::scale_type{-3}}; + + auto values = cudf::strings::to_fixed_point(view, dtype, cudf::test::get_default_stream()); + cudf::strings::from_fixed_point(values->view(), cudf::test::get_default_stream()); +} + +TEST_F(StringsConvertTest, Floats) +{ + auto input = cudf::test::strings_column_wrapper({"1.234E3", "-876", "543.2"}); + auto view = cudf::strings_column_view(input); + + auto dtype = cudf::data_type{cudf::type_id::FLOAT32}; + + auto values = cudf::strings::to_floats(view, dtype, cudf::test::get_default_stream()); + cudf::strings::from_floats(values->view(), cudf::test::get_default_stream()); + cudf::strings::is_float(view, cudf::test::get_default_stream()); +} + +TEST_F(StringsConvertTest, Integers) +{ + auto input = cudf::test::strings_column_wrapper({"1234", "-876", "5432"}); + auto view = cudf::strings_column_view(input); + + auto dtype = cudf::data_type{cudf::type_id::INT32}; + + auto values = cudf::strings::to_integers(view, dtype, cudf::test::get_default_stream()); + cudf::strings::from_integers(values->view(), cudf::test::get_default_stream()); + cudf::strings::is_integer(view, cudf::test::get_default_stream()); + cudf::strings::is_hex(view, cudf::test::get_default_stream()); + cudf::strings::hex_to_integers(view, dtype, cudf::test::get_default_stream()); + cudf::strings::integers_to_hex(values->view(), cudf::test::get_default_stream()); +} + +TEST_F(StringsConvertTest, IPv4) +{ + auto input = cudf::test::strings_column_wrapper({"192.168.0.1", "10.0.0.1"}); + auto view = cudf::strings_column_view(input); + + auto values = cudf::strings::ipv4_to_integers(view, cudf::test::get_default_stream()); + cudf::strings::integers_to_ipv4(values->view(), cudf::test::get_default_stream()); + cudf::strings::is_ipv4(view, cudf::test::get_default_stream()); +} + +TEST_F(StringsConvertTest, URLs) +{ + auto input = cudf::test::strings_column_wrapper({"www.nvidia.com/rapids?p=é", "/_file-7.txt"}); + auto view = cudf::strings_column_view(input); + + auto values = cudf::strings::url_encode(view, cudf::test::get_default_stream()); + cudf::strings::url_decode(values->view(), cudf::test::get_default_stream()); +} + +TEST_F(StringsConvertTest, ListsFormat) +{ + using STR_LISTS = cudf::test::lists_column_wrapper; + auto const input = + STR_LISTS{{STR_LISTS{"a", "bb", "ccc"}, STR_LISTS{}, STR_LISTS{"ddd", "ee", "f"}}, + {STR_LISTS{"gg", "hhh"}, STR_LISTS{"i", "", "", "jj"}}}; + auto view = cudf::lists_column_view(input); + auto null_scalar = cudf::string_scalar("NULL", true, cudf::test::get_default_stream()); + auto separators = cudf::strings_column_view(cudf::test::strings_column_wrapper()); + cudf::strings::format_list_column( + view, null_scalar, separators, cudf::test::get_default_stream()); +} diff --git a/cpp/tests/strings/booleans_tests.cpp b/cpp/tests/strings/booleans_tests.cpp index 0c7fc992065..469ca77a4c5 100644 --- a/cpp/tests/strings/booleans_tests.cpp +++ b/cpp/tests/strings/booleans_tests.cpp @@ -36,7 +36,8 @@ TEST_F(StringsConvertTest, ToBooleans) thrust::make_transform_iterator(h_strings.begin(), [](auto str) { return str != nullptr; })); auto strings_view = cudf::strings_column_view(strings); - auto results = cudf::strings::to_booleans(strings_view); + auto true_scalar = cudf::string_scalar("true"); + auto results = cudf::strings::to_booleans(strings_view, true_scalar); std::vector h_expected{false, false, false, true, false, false}; cudf::test::fixed_width_column_wrapper expected( @@ -60,26 +61,46 @@ TEST_F(StringsConvertTest, FromBooleans) h_column.end(), thrust::make_transform_iterator(h_strings.begin(), [](auto str) { return str != nullptr; })); - auto results = cudf::strings::from_booleans(column); + auto true_scalar = cudf::string_scalar("true"); + auto false_scalar = cudf::string_scalar("false"); + auto results = cudf::strings::from_booleans(column, true_scalar, false_scalar); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, strings); } TEST_F(StringsConvertTest, ZeroSizeStringsColumnBoolean) { auto const zero_size_column = cudf::make_empty_column(cudf::type_id::BOOL8)->view(); - auto results = cudf::strings::from_booleans(zero_size_column); + auto true_scalar = cudf::string_scalar("true"); + auto false_scalar = cudf::string_scalar("false"); + auto results = cudf::strings::from_booleans(zero_size_column, true_scalar, false_scalar); cudf::test::expect_column_empty(results->view()); } TEST_F(StringsConvertTest, ZeroSizeBooleansColumn) { auto const zero_size_strings_column = cudf::make_empty_column(cudf::type_id::STRING)->view(); - auto results = cudf::strings::to_booleans(zero_size_strings_column); + auto true_scalar = cudf::string_scalar("true"); + auto results = cudf::strings::to_booleans(zero_size_strings_column, true_scalar); EXPECT_EQ(0, results->size()); } TEST_F(StringsConvertTest, BooleanError) { - auto column = cudf::make_numeric_column(cudf::data_type{cudf::type_id::INT32}, 100); - EXPECT_THROW(cudf::strings::from_booleans(column->view()), cudf::logic_error); + auto int_column = cudf::test::fixed_width_column_wrapper({1, 2, 3}); + auto true_scalar = cudf::string_scalar("true"); + auto false_scalar = cudf::string_scalar("false"); + EXPECT_THROW(cudf::strings::from_booleans(int_column, true_scalar, false_scalar), + cudf::logic_error); + + auto bool_column = cudf::test::fixed_width_column_wrapper({1, 0, 1}); + auto null_scalar = cudf::string_scalar("", false); + EXPECT_THROW(cudf::strings::from_booleans(bool_column, null_scalar, false_scalar), + cudf::logic_error); + EXPECT_THROW(cudf::strings::from_booleans(bool_column, true_scalar, null_scalar), + cudf::logic_error); + auto empty_scalar = cudf::string_scalar("", true); + EXPECT_THROW(cudf::strings::from_booleans(int_column, empty_scalar, false_scalar), + cudf::logic_error); + EXPECT_THROW(cudf::strings::from_booleans(int_column, true_scalar, empty_scalar), + cudf::logic_error); } diff --git a/cpp/tests/strings/format_lists_tests.cpp b/cpp/tests/strings/format_lists_tests.cpp index 95dc9725afc..6196b8ed6ad 100644 --- a/cpp/tests/strings/format_lists_tests.cpp +++ b/cpp/tests/strings/format_lists_tests.cpp @@ -60,8 +60,9 @@ TEST_F(StringsFormatListsTest, WithNulls) cudf::test::iterators::null_at(1)}; auto const view = cudf::lists_column_view(input); - auto results = cudf::strings::format_list_column(view); - auto expected = cudf::test::strings_column_wrapper( + auto null_scalar = cudf::string_scalar("NULL"); + auto results = cudf::strings::format_list_column(view, null_scalar); + auto expected = cudf::test::strings_column_wrapper( {"[a,NULL,ccc]", "NULL", "[NULL,bb,ddd]", "[zzz,xxxxx]", "[v,,NULL,w]"}); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); } @@ -132,11 +133,13 @@ TEST_F(StringsFormatListsTest, SlicedLists) "[ééé,12345abcdef]", "[www,12345]"}); + auto null_scalar = cudf::string_scalar("NULL"); + // set of slice intervals: covers slicing the front, back, and middle std::vector> index_pairs({{0, 11}, {0, 4}, {3, 8}, {5, 11}}); for (auto indexes : index_pairs) { auto sliced = cudf::lists_column_view(cudf::slice(input, {indexes.first, indexes.second})[0]); - auto results = cudf::strings::format_list_column(sliced); + auto results = cudf::strings::format_list_column(sliced, null_scalar); auto expected = cudf::test::strings_column_wrapper(h_expected.begin() + indexes.first, h_expected.begin() + indexes.second); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); diff --git a/java/src/main/native/src/ColumnViewJni.cpp b/java/src/main/native/src/ColumnViewJni.cpp index 0ddaa2c15b5..462f0d8eac9 100644 --- a/java/src/main/native/src/ColumnViewJni.cpp +++ b/java/src/main/native/src/ColumnViewJni.cpp @@ -1130,7 +1130,11 @@ JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_castTo(JNIEnv *env, jclas } if (n_data_type.id() == cudf::type_id::STRING) { switch (column->type().id()) { - case cudf::type_id::BOOL8: return release_as_jlong(cudf::strings::from_booleans(*column)); + case cudf::type_id::BOOL8: { + auto const true_scalar = cudf::string_scalar("true"); + auto const false_scalar = cudf::string_scalar("false"); + return release_as_jlong(cudf::strings::from_booleans(*column, true_scalar, false_scalar)); + } case cudf::type_id::FLOAT32: case cudf::type_id::FLOAT64: return release_as_jlong(cudf::strings::from_floats(*column)); case cudf::type_id::INT8: @@ -1149,7 +1153,10 @@ JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_castTo(JNIEnv *env, jclas } } else if (column->type().id() == cudf::type_id::STRING) { switch (n_data_type.id()) { - case cudf::type_id::BOOL8: return release_as_jlong(cudf::strings::to_booleans(*column)); + case cudf::type_id::BOOL8: { + auto const true_scalar = cudf::string_scalar("true"); + return release_as_jlong(cudf::strings::to_booleans(*column, true_scalar)); + } case cudf::type_id::FLOAT32: case cudf::type_id::FLOAT64: return release_as_jlong(cudf::strings::to_floats(*column, n_data_type)); From 655f3a4659653e95b9f12ed924c7e887be41c5d4 Mon Sep 17 00:00:00 2001 From: Robert Maynard Date: Mon, 16 Oct 2023 12:46:48 -0400 Subject: [PATCH 150/150] Update rapids-cmake functions to non-deprecated signatures (#14265) Update to use non deprecated signatures for `rapids_export` functions Authors: - Robert Maynard (https://github.com/robertmaynard) Approvers: - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cudf/pull/14265 --- cpp/cmake/thirdparty/get_arrow.cmake | 18 +++++++++++++----- cpp/cmake/thirdparty/get_cufile.cmake | 4 ++-- cpp/cmake/thirdparty/get_gtest.cmake | 4 ++-- cpp/cmake/thirdparty/get_kvikio.cmake | 12 +++++++----- cpp/cmake/thirdparty/get_libcudacxx.cmake | 18 ++++++++---------- cpp/cmake/thirdparty/get_spdlog.cmake | 4 +++- cpp/cmake/thirdparty/get_thrust.cmake | 15 +++++++-------- 7 files changed, 42 insertions(+), 33 deletions(-) diff --git a/cpp/cmake/thirdparty/get_arrow.cmake b/cpp/cmake/thirdparty/get_arrow.cmake index 894dc9649e2..10d3145a36f 100644 --- a/cpp/cmake/thirdparty/get_arrow.cmake +++ b/cpp/cmake/thirdparty/get_arrow.cmake @@ -387,11 +387,19 @@ function(find_and_configure_arrow VERSION BUILD_STATIC ENABLE_S3 ENABLE_ORC ENAB endif() include("${rapids-cmake-dir}/export/find_package_root.cmake") - rapids_export_find_package_root(BUILD Arrow [=[${CMAKE_CURRENT_LIST_DIR}]=] cudf-exports) - if(ENABLE_PARQUET) - rapids_export_find_package_root(BUILD Parquet [=[${CMAKE_CURRENT_LIST_DIR}]=] cudf-exports) - rapids_export_find_package_root(BUILD ArrowDataset [=[${CMAKE_CURRENT_LIST_DIR}]=] cudf-exports) - endif() + rapids_export_find_package_root( + BUILD Arrow [=[${CMAKE_CURRENT_LIST_DIR}]=] EXPORT_SET cudf-exports + ) + rapids_export_find_package_root( + BUILD Parquet [=[${CMAKE_CURRENT_LIST_DIR}]=] + EXPORT_SET cudf-exports + CONDITION ENABLE_PARQUET + ) + rapids_export_find_package_root( + BUILD ArrowDataset [=[${CMAKE_CURRENT_LIST_DIR}]=] + EXPORT_SET cudf-exports + CONDITION ENABLE_PARQUET + ) set(ARROW_LIBRARIES "${ARROW_LIBRARIES}" diff --git a/cpp/cmake/thirdparty/get_cufile.cmake b/cpp/cmake/thirdparty/get_cufile.cmake index c0235eba508..bfdff3a99ff 100644 --- a/cpp/cmake/thirdparty/get_cufile.cmake +++ b/cpp/cmake/thirdparty/get_cufile.cmake @@ -21,10 +21,10 @@ function(find_and_configure_cufile) if(cuFile_FOUND AND NOT BUILD_SHARED_LIBS) include("${rapids-cmake-dir}/export/find_package_file.cmake") rapids_export_find_package_file( - BUILD "${CUDF_SOURCE_DIR}/cmake/Modules/FindcuFile.cmake" cudf-exports + BUILD "${CUDF_SOURCE_DIR}/cmake/Modules/FindcuFile.cmake" EXPORT_SET cudf-exports ) rapids_export_find_package_file( - INSTALL "${CUDF_SOURCE_DIR}/cmake/Modules/FindcuFile.cmake" cudf-exports + INSTALL "${CUDF_SOURCE_DIR}/cmake/Modules/FindcuFile.cmake" EXPORT_SET cudf-exports ) endif() endfunction() diff --git a/cpp/cmake/thirdparty/get_gtest.cmake b/cpp/cmake/thirdparty/get_gtest.cmake index 1363f43fae2..cfb219448f1 100644 --- a/cpp/cmake/thirdparty/get_gtest.cmake +++ b/cpp/cmake/thirdparty/get_gtest.cmake @@ -1,5 +1,5 @@ # ============================================================================= -# Copyright (c) 2021, NVIDIA CORPORATION. +# Copyright (c) 2021-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at @@ -30,7 +30,7 @@ function(find_and_configure_gtest) include("${rapids-cmake-dir}/export/find_package_root.cmake") rapids_export_find_package_root( - BUILD GTest [=[${CMAKE_CURRENT_LIST_DIR}]=] cudf-testing-exports + BUILD GTest [=[${CMAKE_CURRENT_LIST_DIR}]=] EXPORT_SET cudf-testing-exports ) endif() diff --git a/cpp/cmake/thirdparty/get_kvikio.cmake b/cpp/cmake/thirdparty/get_kvikio.cmake index e94e024d6c9..20712beec41 100644 --- a/cpp/cmake/thirdparty/get_kvikio.cmake +++ b/cpp/cmake/thirdparty/get_kvikio.cmake @@ -1,5 +1,5 @@ # ============================================================================= -# Copyright (c) 2022, NVIDIA CORPORATION. +# Copyright (c) 2022-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at @@ -25,10 +25,12 @@ function(find_and_configure_kvikio VERSION) OPTIONS "KvikIO_BUILD_EXAMPLES OFF" ) - if(KvikIO_BINARY_DIR) - include("${rapids-cmake-dir}/export/find_package_root.cmake") - rapids_export_find_package_root(BUILD KvikIO "${KvikIO_BINARY_DIR}" cudf-exports) - endif() + include("${rapids-cmake-dir}/export/find_package_root.cmake") + rapids_export_find_package_root( + BUILD KvikIO "${KvikIO_BINARY_DIR}" + EXPORT_SET cudf-exports + CONDITION KvikIO_BINARY_DIR + ) endfunction() diff --git a/cpp/cmake/thirdparty/get_libcudacxx.cmake b/cpp/cmake/thirdparty/get_libcudacxx.cmake index 0e03352c335..285d66287f3 100644 --- a/cpp/cmake/thirdparty/get_libcudacxx.cmake +++ b/cpp/cmake/thirdparty/get_libcudacxx.cmake @@ -22,16 +22,14 @@ function(find_and_configure_libcudacxx) include(${rapids-cmake-dir}/cpm/libcudacxx.cmake) rapids_cpm_libcudacxx(BUILD_EXPORT_SET cudf-exports INSTALL_EXPORT_SET cudf-exports) - if(libcudacxx_SOURCE_DIR) - # Store where CMake can find our custom Thrust install - include("${rapids-cmake-dir}/export/find_package_root.cmake") - rapids_export_find_package_root( - INSTALL - libcudacxx - [=[${CMAKE_CURRENT_LIST_DIR}/../../../include/libcudf/lib/rapids/cmake/libcudacxx]=] - cudf-exports - ) - endif() + # Store where CMake can find our custom Thrust install + include("${rapids-cmake-dir}/export/find_package_root.cmake") + rapids_export_find_package_root( + INSTALL libcudacxx + [=[${CMAKE_CURRENT_LIST_DIR}/../../../include/libcudf/lib/rapids/cmake/libcudacxx]=] + EXPORT_SET cudf-exports + CONDITION libcudacxx_SOURCE_DIR + ) endfunction() find_and_configure_libcudacxx() diff --git a/cpp/cmake/thirdparty/get_spdlog.cmake b/cpp/cmake/thirdparty/get_spdlog.cmake index fff5b84af0d..c0e07d02d94 100644 --- a/cpp/cmake/thirdparty/get_spdlog.cmake +++ b/cpp/cmake/thirdparty/get_spdlog.cmake @@ -27,7 +27,9 @@ function(find_and_configure_spdlog) NAMESPACE spdlog:: ) include("${rapids-cmake-dir}/export/find_package_root.cmake") - rapids_export_find_package_root(BUILD spdlog [=[${CMAKE_CURRENT_LIST_DIR}]=] cudf-exports) + rapids_export_find_package_root( + BUILD spdlog [=[${CMAKE_CURRENT_LIST_DIR}]=] EXPORT_SET cudf-exports + ) endif() endfunction() diff --git a/cpp/cmake/thirdparty/get_thrust.cmake b/cpp/cmake/thirdparty/get_thrust.cmake index 39a9de15fa6..67ed4287d7b 100644 --- a/cpp/cmake/thirdparty/get_thrust.cmake +++ b/cpp/cmake/thirdparty/get_thrust.cmake @@ -33,14 +33,13 @@ function(find_and_configure_thrust) INSTALL_EXPORT_SET cudf-exports ) - if(Thrust_SOURCE_DIR) - # Store where CMake can find our custom Thrust install - include("${rapids-cmake-dir}/export/find_package_root.cmake") - rapids_export_find_package_root( - INSTALL Thrust - [=[${CMAKE_CURRENT_LIST_DIR}/../../../include/libcudf/lib/rapids/cmake/thrust]=] cudf-exports - ) - endif() + # Store where CMake can find our custom Thrust install + include("${rapids-cmake-dir}/export/find_package_root.cmake") + rapids_export_find_package_root( + INSTALL Thrust [=[${CMAKE_CURRENT_LIST_DIR}/../../../include/libcudf/lib/rapids/cmake/thrust]=] + EXPORT_SET cudf-exports + CONDITION Thrust_SOURCE_DIR + ) endfunction() find_and_configure_thrust()