Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Avoid use of CUDF_EXPECTS in libcudf unit tests outside of helper functions with return values #13812

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cpp/tests/copying/concatenate_tests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -842,7 +842,7 @@ TEST_F(StructsColumnTest, ConcatenateEmptyStructs)

// concatenate
auto result = cudf::concatenate(std::vector<column_view>({*first, *second, *third, *fourth}));
CUDF_EXPECTS(result->size() == expected->size(), "column size changed after concat");
ASSERT_EQ(result->size(), expected->size());
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(*result, *expected);
}

Expand Down
11 changes: 5 additions & 6 deletions cpp/tests/copying/split_tests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1229,7 +1229,7 @@ void split_nested_list_of_structs(SplitFunc Split, CompareFunc Compare, bool spl
// these outputs correctly, this should be safe.
auto result = Split(*outer_list, splits);
auto expected = cudf::split(static_cast<cudf::column_view>(*outer_list), splits);
CUDF_EXPECTS(result.size() == expected.size(), "Split result size mismatch");
ASSERT_EQ(result.size(), expected.size());

for (std::size_t index = 0; index < result.size(); index++) {
Compare(expected[index], result[index]);
Expand Down Expand Up @@ -1591,8 +1591,7 @@ TEST_F(ContiguousSplitUntypedTest, ValidityRepartition)
cudf::table_view t({*col});
auto result = cudf::contiguous_split(t, {num_rows / 2});
auto expected = cudf::split(t, {num_rows / 2});
CUDF_EXPECTS(result.size() == expected.size(),
"Mismatch in split results in ValidityRepartition test");
ASSERT_EQ(result.size(), expected.size());

for (size_t idx = 0; idx < result.size(); idx++) {
CUDF_TEST_EXPECT_TABLES_EQUAL(result[idx].table, expected[idx]);
Expand Down Expand Up @@ -1696,22 +1695,22 @@ TEST_F(ContiguousSplitStringTableTest, EmptyInputColumn)
{
std::vector<cudf::size_type> splits;
auto result = cudf::contiguous_split(src_table, splits);
CUDF_EXPECTS(result.size() == 1, "Incorrect returned contiguous_split result size!");
ASSERT_EQ(result.size(), 1);

CUDF_TEST_EXPECT_TABLES_EQUIVALENT(src_table, result[0].table);
}

{
auto result = do_chunked_pack(src_table);
CUDF_EXPECTS(result.size() == 1, "Incorrect returned contiguous_split result size!");
ASSERT_EQ(result.size(), 1);

CUDF_TEST_EXPECT_TABLES_EQUIVALENT(src_table, result[0].table);
}

{
std::vector<cudf::size_type> splits{0, 0, 0, 0};
auto result = cudf::contiguous_split(src_table, splits);
CUDF_EXPECTS(result.size() == 5, "Incorrect returned contiguous_split result size!");
ASSERT_EQ(result.size(), 5);

for (size_t idx = 0; idx < result.size(); idx++) {
CUDF_TEST_EXPECT_TABLES_EQUIVALENT(src_table, result[idx].table);
Expand Down
8 changes: 4 additions & 4 deletions cpp/tests/groupby/tdigest_tests.cu
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ TEST_F(TDigestMergeTest, Grouped)
{
auto values = cudf::test::generate_standardized_percentile_distribution(
cudf::data_type{cudf::type_id::FLOAT64});
CUDF_EXPECTS(values->size() == 750000, "Unexpected distribution size");
ASSERT_EQ(values->size(), 750000);
// all in the same group
auto keys = cudf::make_fixed_width_column(
cudf::data_type{cudf::type_id::INT32}, values->size(), cudf::mask_state::UNALLOCATED);
Expand Down Expand Up @@ -321,7 +321,7 @@ TEST_F(TDigestMergeTest, Grouped)
requests.push_back({*merge_input, std::move(aggregations)});
auto result = gb.aggregate(requests);

CUDF_EXPECTS(result.second[0].results[0]->size() == 2, "Unexpected tdigest merge result size");
ASSERT_EQ(result.second[0].results[0]->size(), 2);
cudf::tdigest::tdigest_column_view tdv(*result.second[0].results[0]);

// verify centroids
Expand Down Expand Up @@ -376,7 +376,7 @@ TEST_F(TDigestMergeTest, Grouped)
requests.push_back({*merge_input, std::move(aggregations)});
auto result = gb.aggregate(requests);

CUDF_EXPECTS(result.second[0].results[0]->size() == 2, "Unexpected tdigest merge result size");
ASSERT_EQ(result.second[0].results[0]->size(), 2);
cudf::tdigest::tdigest_column_view tdv(*result.second[0].results[0]);

// verify centroids
Expand Down Expand Up @@ -423,7 +423,7 @@ TEST_F(TDigestMergeTest, Grouped)
requests.push_back({*merge_input, std::move(aggregations)});
auto result = gb.aggregate(requests);

CUDF_EXPECTS(result.second[0].results[0]->size() == 2, "Unexpected tdigest merge result size");
ASSERT_EQ(result.second[0].results[0]->size(), 2);
cudf::tdigest::tdigest_column_view tdv(*result.second[0].results[0]);

// verify centroids
Expand Down
9 changes: 4 additions & 5 deletions cpp/tests/interop/from_arrow_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,8 @@ TEST_F(FromArrowTest, DateTimeTable)
std::shared_ptr<arrow::Array> arr;
arrow::TimestampBuilder timestamp_builder(arrow::timestamp(arrow::TimeUnit::type::MILLI),
arrow::default_memory_pool());
CUDF_EXPECTS(timestamp_builder.AppendValues(data).ok(), "Failed to append values");
CUDF_EXPECTS(timestamp_builder.Finish(&arr).ok(), "Failed to build array");
ASSERT_TRUE(timestamp_builder.AppendValues(data).ok());
ASSERT_TRUE(timestamp_builder.Finish(&arr).ok());

std::vector<std::shared_ptr<arrow::Field>> schema_vector({arrow::field("a", arr->type())});
auto schema = std::make_shared<arrow::Schema>(schema_vector);
Expand Down Expand Up @@ -119,9 +119,8 @@ TYPED_TEST(FromArrowTestDurationsTest, DurationTable)
default: CUDF_FAIL("Unsupported duration unit in arrow");
}
arrow::DurationBuilder duration_builder(duration(arrow_unit), arrow::default_memory_pool());
CUDF_EXPECTS(duration_builder.AppendValues(std::vector<int64_t>{1, 2, 3, 4, 5, 6}).ok(),
"Failed to append values");
CUDF_EXPECTS(duration_builder.Finish(&arr).ok(), "Failed to build array");
ASSERT_TRUE(duration_builder.AppendValues(std::vector<int64_t>{1, 2, 3, 4, 5, 6}).ok());
ASSERT_TRUE(duration_builder.Finish(&arr).ok());

std::vector<std::shared_ptr<arrow::Field>> schema_vector({arrow::field("a", arr->type())});
auto schema = std::make_shared<arrow::Schema>(schema_vector);
Expand Down
10 changes: 4 additions & 6 deletions cpp/tests/interop/to_arrow_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -188,9 +188,8 @@ TEST_F(ToArrowTest, DateTimeTable)
std::shared_ptr<arrow::Array> arr;
arrow::TimestampBuilder timestamp_builder(timestamp(arrow::TimeUnit::type::MILLI),
arrow::default_memory_pool());
CUDF_EXPECTS(timestamp_builder.AppendValues(std::vector<int64_t>{1, 2, 3, 4, 5, 6}).ok(),
"Failed to append values");
CUDF_EXPECTS(timestamp_builder.Finish(&arr).ok(), "Failed to build array");
ASSERT_TRUE(timestamp_builder.AppendValues(std::vector<int64_t>{1, 2, 3, 4, 5, 6}).ok());
ASSERT_TRUE(timestamp_builder.Finish(&arr).ok());

std::vector<std::shared_ptr<arrow::Field>> schema_vector({arrow::field("a", arr->type())});
auto schema = std::make_shared<arrow::Schema>(schema_vector);
Expand Down Expand Up @@ -222,9 +221,8 @@ TYPED_TEST(ToArrowTestDurationsTest, DurationTable)
default: CUDF_FAIL("Unsupported duration unit in arrow");
}
arrow::DurationBuilder duration_builder(duration(arrow_unit), arrow::default_memory_pool());
CUDF_EXPECTS(duration_builder.AppendValues(std::vector<int64_t>{1, 2, 3, 4, 5, 6}).ok(),
"Failed to append values");
CUDF_EXPECTS(duration_builder.Finish(&arr).ok(), "Failed to build array");
ASSERT_TRUE(duration_builder.AppendValues(std::vector<int64_t>{1, 2, 3, 4, 5, 6}).ok());
ASSERT_TRUE(duration_builder.Finish(&arr).ok());

std::vector<std::shared_ptr<arrow::Field>> schema_vector({arrow::field("a", arr->type())});
auto schema = std::make_shared<arrow::Schema>(schema_vector);
Expand Down
2 changes: 1 addition & 1 deletion cpp/tests/io/arrow_io_source_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ TEST_F(ArrowIOTest, S3FileSystem)

close_s3_func = reinterpret_cast<decltype(close_s3_func)>(
dlsym(whole_app, "_ZN5arrow2fs17EnsureS3FinalizedEv"));
if (close_s3_func) { CUDF_EXPECTS(close_s3_func().ok(), "Failed to finalize s3 filesystem"); }
if (close_s3_func) { EXPECT_TRUE(close_s3_func().ok()); }
dlclose(whole_app);
}
}
Expand Down
3 changes: 1 addition & 2 deletions cpp/tests/io/csv_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -169,8 +169,7 @@ void check_float_column(cudf::column_view const& col_lhs,

CUDF_TEST_EXPECT_COLUMN_PROPERTIES_EQUIVALENT(col_lhs,
(wrapper<T>{data.begin(), data.end(), validity}));
CUDF_EXPECTS(col_lhs.null_count() == 0 and col_rhs.null_count() == 0,
"All elements should be valid");
EXPECT_TRUE(col_lhs.null_count() == 0 and col_rhs.null_count() == 0);
EXPECT_THAT(cudf::test::to_host<T>(col_lhs).first,
::testing::Pointwise(FloatNearPointwise(tol), data));
}
Expand Down
2 changes: 1 addition & 1 deletion cpp/tests/io/json_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ void check_float_column(cudf::column_view const& col,
valid_t const& validity)
{
CUDF_TEST_EXPECT_COLUMN_PROPERTIES_EQUAL(col, (wrapper<T>{data.begin(), data.end(), validity}));
CUDF_EXPECTS(col.null_count() == 0, "All elements should be valid");
EXPECT_EQ(col.null_count(), 0);
EXPECT_THAT(cudf::test::to_host<T>(col).first,
::testing::Pointwise(FloatNearPointwise(1e-6), data));
}
Expand Down
33 changes: 15 additions & 18 deletions cpp/tests/io/parquet_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -213,12 +213,10 @@ void read_footer(std::unique_ptr<cudf::io::datasource> const& source,
auto const ender = reinterpret_cast<cudf::io::parquet::file_ender_s const*>(ender_buffer->data());

// checks for valid header, footer, and file length
CUDF_EXPECTS(len > header_len + ender_len, "Incorrect data source");
CUDF_EXPECTS(header->magic == cudf::io::parquet::parquet_magic &&
ender->magic == cudf::io::parquet::parquet_magic,
"Corrupted header or footer");
CUDF_EXPECTS(ender->footer_len != 0 && ender->footer_len <= (len - header_len - ender_len),
"Incorrect footer length");
ASSERT_GT(len, header_len + ender_len);
ASSERT_TRUE(header->magic == cudf::io::parquet::parquet_magic &&
ender->magic == cudf::io::parquet::parquet_magic);
ASSERT_TRUE(ender->footer_len != 0 && ender->footer_len <= (len - header_len - ender_len));

// parquet files end with 4-byte footer_length and 4-byte magic == "PAR1"
// seek backwards from the end of the file (footer_length + 8 bytes of ender)
Expand All @@ -228,7 +226,7 @@ void read_footer(std::unique_ptr<cudf::io::datasource> const& source,

// returns true on success
bool res = cp.read(file_meta_data);
CUDF_EXPECTS(res, "Cannot parse file metadata");
ASSERT_TRUE(res);
}

// returns the number of bits used for dictionary encoding data at the given page location.
Expand Down Expand Up @@ -1622,7 +1620,7 @@ TEST_F(ParquetChunkedWriterTest, LargeTables)
cudf::io::chunked_parquet_writer_options args =
cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info{filepath});
auto md = cudf::io::parquet_chunked_writer(args).write(*table1).write(*table2).close();
CUDF_EXPECTS(!md, "The return value should be null.");
ASSERT_EQ(md, nullptr);

cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
Expand Down Expand Up @@ -1653,7 +1651,7 @@ TEST_F(ParquetChunkedWriterTest, ManyTables)
writer.write(tbl);
});
auto md = writer.close({"dummy/path"});
CUDF_EXPECTS(md, "The returned metadata should not be null.");
ASSERT_NE(md, nullptr);

cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
Expand Down Expand Up @@ -3660,10 +3658,10 @@ TEST_F(ParquetWriterTest, CheckPageRows)
cudf::io::parquet::FileMetaData fmd;

read_footer(source, &fmd);
CUDF_EXPECTS(fmd.row_groups.size() > 0, "No row groups found");
CUDF_EXPECTS(fmd.row_groups[0].columns.size() == 1, "Invalid number of columns");
ASSERT_GT(fmd.row_groups.size(), 0);
ASSERT_EQ(fmd.row_groups[0].columns.size(), 1);
auto const& first_chunk = fmd.row_groups[0].columns[0].meta_data;
CUDF_EXPECTS(first_chunk.data_page_offset > 0, "Invalid location for first data page");
ASSERT_GT(first_chunk.data_page_offset, 0);

// read first data page header. sizeof(PageHeader) is not exact, but the thrift encoded
// version should be smaller than size of the struct.
Expand Down Expand Up @@ -3696,10 +3694,10 @@ TEST_F(ParquetWriterTest, CheckPageRowsAdjusted)
cudf::io::parquet::FileMetaData fmd;

read_footer(source, &fmd);
CUDF_EXPECTS(fmd.row_groups.size() > 0, "No row groups found");
CUDF_EXPECTS(fmd.row_groups[0].columns.size() == 1, "Invalid number of columns");
ASSERT_GT(fmd.row_groups.size(), 0);
ASSERT_EQ(fmd.row_groups[0].columns.size(), 1);
auto const& first_chunk = fmd.row_groups[0].columns[0].meta_data;
CUDF_EXPECTS(first_chunk.data_page_offset > 0, "Invalid location for first data page");
ASSERT_GT(first_chunk.data_page_offset, 0);

// read first data page header. sizeof(PageHeader) is not exact, but the thrift encoded
// version should be smaller than size of the struct.
Expand Down Expand Up @@ -4005,11 +4003,10 @@ TYPED_TEST(ParquetWriterComparableTypeTest, ThreeColumnSorted)
cudf::io::parquet::FileMetaData fmd;

read_footer(source, &fmd);
CUDF_EXPECTS(fmd.row_groups.size() > 0, "No row groups found");
ASSERT_GT(fmd.row_groups.size(), 0);

auto const& columns = fmd.row_groups[0].columns;
CUDF_EXPECTS(columns.size() == static_cast<size_t>(expected.num_columns()),
"Invalid number of columns");
ASSERT_EQ(columns.size(), static_cast<size_t>(expected.num_columns()));

// now check that the boundary order for chunk 1 is ascending,
// chunk 2 is descending, and chunk 3 is unordered
Expand Down
4 changes: 2 additions & 2 deletions cpp/tests/transform/row_bit_count_test.cu
Original file line number Diff line number Diff line change
Expand Up @@ -750,7 +750,7 @@ TEST_F(RowBitCount, EmptyTable)
{
cudf::table_view empty;
auto result = cudf::row_bit_count(empty);
CUDF_EXPECTS(result != nullptr && result->size() == 0, "Expected an empty column");
EXPECT_TRUE(result != nullptr && result->size() == 0);
}

{
Expand All @@ -759,6 +759,6 @@ TEST_F(RowBitCount, EmptyTable)
cudf::table_view empty({*strings, *ints});

auto result = cudf::row_bit_count(empty);
CUDF_EXPECTS(result != nullptr && result->size() == 0, "Expected an empty column");
EXPECT_TRUE(result != nullptr && result->size() == 0);
}
}
6 changes: 2 additions & 4 deletions cpp/tests/transpose/transpose_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -146,12 +146,10 @@ void run_test(size_t ncols, size_t nrows, bool add_nulls)
auto result = transpose(input_view);
auto result_view = std::get<1>(result);

CUDF_EXPECTS(result_view.num_columns() == expected_view.num_columns(),
"Expected same number of columns");
ASSERT_EQ(result_view.num_columns(), expected_view.num_columns());
for (cudf::size_type i = 0; i < result_view.num_columns(); ++i) {
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result_view.column(i), expected_view.column(i));
CUDF_EXPECTS(result_view.column(i).null_count() == expected_nulls[i],
"Expected correct null count");
EXPECT_EQ(result_view.column(i).null_count(), expected_nulls[i]);
}
}

Expand Down