Skip to content

Commit

Permalink
Merge branch 'branch-23.04' into fea/parallel_testing
Browse files Browse the repository at this point in the history
  • Loading branch information
robertmaynard authored Mar 22, 2023
2 parents 23ac812 + bf18cea commit 97ff54b
Show file tree
Hide file tree
Showing 146 changed files with 2,779 additions and 1,876 deletions.
7 changes: 4 additions & 3 deletions build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -300,8 +300,7 @@ if buildAll || hasArg libcudf; then
# Record build times
if [[ "$BUILD_REPORT_METRICS" == "ON" && -f "${LIB_BUILD_DIR}/.ninja_log" ]]; then
echo "Formatting build metrics"
python ${REPODIR}/cpp/scripts/sort_ninja_log.py ${LIB_BUILD_DIR}/.ninja_log --fmt xml > ${LIB_BUILD_DIR}/ninja_log.xml
MSG="<p>"
MSG=""
# get some sccache stats after the compile
if [[ "$BUILD_REPORT_INCL_CACHE_STATS" == "ON" && -x "$(command -v sccache)" ]]; then
COMPILE_REQUESTS=$(sccache -s | grep "Compile requests \+ [0-9]\+$" | awk '{ print $NF }')
Expand All @@ -318,7 +317,9 @@ if buildAll || hasArg libcudf; then
BMR_DIR=${RAPIDS_ARTIFACTS_DIR:-"${LIB_BUILD_DIR}"}
echo "Metrics output dir: [$BMR_DIR]"
mkdir -p ${BMR_DIR}
python ${REPODIR}/cpp/scripts/sort_ninja_log.py ${LIB_BUILD_DIR}/.ninja_log --fmt html --msg "$MSG" > ${BMR_DIR}/ninja_log.html
MSG_OUTFILE="$(mktemp)"
echo "$MSG" > "${MSG_OUTFILE}"
python ${REPODIR}/cpp/scripts/sort_ninja_log.py ${LIB_BUILD_DIR}/.ninja_log --fmt html --msg "${MSG_OUTFILE}" > ${BMR_DIR}/ninja_log.html
cp ${LIB_BUILD_DIR}/.ninja_log ${BMR_DIR}/ninja.log
fi

Expand Down
26 changes: 0 additions & 26 deletions ci/build_cpp.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,29 +14,3 @@ rapids-logger "Begin cpp build"
rapids-mamba-retry mambabuild conda/recipes/libcudf

rapids-upload-conda-to-s3 cpp

echo "++++++++++++++++++++++++++++++++++++++++++++"

if [[ -d $RAPIDS_ARTIFACTS_DIR ]]; then
ls -l ${RAPIDS_ARTIFACTS_DIR}
fi

echo "++++++++++++++++++++++++++++++++++++++++++++"

FILE=${RAPIDS_ARTIFACTS_DIR}/ninja.log
if [[ -f $FILE ]]; then
echo -e "\x1B[33;1m\x1B[48;5;240m Ninja log for this build available at the following link \x1B[0m"
UPLOAD_NAME=cpp_cuda${RAPIDS_CUDA_VERSION%%.*}_$(arch).ninja.log
rapids-upload-to-s3 "${UPLOAD_NAME}" "${FILE}"
fi

echo "++++++++++++++++++++++++++++++++++++++++++++"

FILE=${RAPIDS_ARTIFACTS_DIR}/ninja_log.html
if [[ -f $FILE ]]; then
echo -e "\x1B[33;1m\x1B[48;5;240m Build Metrics Report for this build available at the following link \x1B[0m"
UPLOAD_NAME=cpp_cuda${RAPIDS_CUDA_VERSION%%.*}_$(arch).BuildMetricsReport.html
rapids-upload-to-s3 "${UPLOAD_NAME}" "${FILE}"
fi

echo "++++++++++++++++++++++++++++++++++++++++++++"
6 changes: 0 additions & 6 deletions ci/release/apply_wheel_modifications.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,6 @@
VERSION=${1}
CUDA_SUFFIX=${2}

# __init__.py versions
sed -i "s/__version__ = .*/__version__ = \"${VERSION}\"/g" python/cudf/cudf/__init__.py
sed -i "s/__version__ = .*/__version__ = \"${VERSION}\"/g" python/dask_cudf/dask_cudf/__init__.py
sed -i "s/__version__ = .*/__version__ = \"${VERSION}\"/g" python/cudf_kafka/cudf_kafka/__init__.py
sed -i "s/__version__ = .*/__version__ = \"${VERSION}\"/g" python/custreamz/custreamz/__init__.py

# pyproject.toml versions
sed -i "s/^version = .*/version = \"${VERSION}\"/g" python/cudf/pyproject.toml
sed -i "s/^version = .*/version = \"${VERSION}\"/g" python/dask_cudf/pyproject.toml
Expand Down
3 changes: 3 additions & 0 deletions conda/recipes/libcudf/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ outputs:
- test -f $PREFIX/include/cudf/concatenate.hpp
- test -f $PREFIX/include/cudf/copying.hpp
- test -f $PREFIX/include/cudf/datetime.hpp
- test -f $PREFIX/include/cudf/timezone.hpp
- test -f $PREFIX/include/cudf/detail/aggregation/aggregation.hpp
- test -f $PREFIX/include/cudf/detail/aggregation/result_cache.hpp
- test -f $PREFIX/include/cudf/detail/binaryop.hpp
Expand Down Expand Up @@ -128,6 +129,8 @@ outputs:
- test -f $PREFIX/include/cudf/detail/stream_compaction.hpp
- test -f $PREFIX/include/cudf/detail/structs/utilities.hpp
- test -f $PREFIX/include/cudf/detail/tdigest/tdigest.hpp
- test -f $PREFIX/include/cudf/detail/timezone.cuh
- test -f $PREFIX/include/cudf/detail/timezone.hpp
- test -f $PREFIX/include/cudf/detail/transform.hpp
- test -f $PREFIX/include/cudf/detail/transpose.hpp
- test -f $PREFIX/include/cudf/detail/unary.hpp
Expand Down
4 changes: 2 additions & 2 deletions conda/recipes/libcudf/post-link.sh
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#!/bin/bash
# Copyright (c) 2022, NVIDIA CORPORATION.
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Only add the license notice to libcudf and not our examples / tests
if [[ "$PKG_NAME" == "libcudf" ]]; then
cat ./nvlink.txt >> $PREFIX/.messages.txt
cat ./nvcomp.txt >> $PREFIX/.messages.txt
fi
51 changes: 12 additions & 39 deletions cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -369,7 +369,7 @@ add_library(
src/io/orc/stripe_data.cu
src/io/orc/stripe_enc.cu
src/io/orc/stripe_init.cu
src/io/orc/timezone.cpp
src/datetime/timezone.cpp
src/io/orc/writer_impl.cu
src/io/parquet/compact_protocol_reader.cpp
src/io/parquet/compact_protocol_writer.cpp
Expand Down Expand Up @@ -890,31 +890,20 @@ install(
EXPORT cudf-exports
)

install(DIRECTORY ${CUDF_SOURCE_DIR}/include/cudf ${CUDF_SOURCE_DIR}/include/cudf_test
${CUDF_SOURCE_DIR}/include/nvtext DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
)

if(CUDF_BUILD_TESTUTIL)
set(_components_export_string)
if(TARGET cudftestutil)
install(
TARGETS cudftest_default_stream cudftestutil
DESTINATION ${lib_dir}
EXPORT cudf-testing-exports
)

install(
EXPORT cudf-testing-exports
FILE cudf-testing-targets.cmake
NAMESPACE cudf::
DESTINATION "${lib_dir}/cmake/cudf"
)

include("${rapids-cmake-dir}/export/write_dependencies.cmake")
rapids_export_write_dependencies(
INSTALL cudf-testing-exports
"${PROJECT_BINARY_DIR}/rapids-cmake/cudf/export/cudf-testing-dependencies.cmake"
)
set(_components_export_string COMPONENTS testing COMPONENTS_EXPORT_SET cudf-testing-exports)
endif()

install(DIRECTORY ${CUDF_SOURCE_DIR}/include/cudf ${CUDF_SOURCE_DIR}/include/cudf_test
${CUDF_SOURCE_DIR}/include/nvtext DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
)

if(CUDF_BUILD_STREAMS_TEST_UTIL)
install(TARGETS cudf_identify_stream_usage_mode_cudf DESTINATION ${lib_dir})
install(TARGETS cudf_identify_stream_usage_mode_testing DESTINATION ${lib_dir})
Expand Down Expand Up @@ -976,21 +965,15 @@ string(
[=[
if(testing IN_LIST cudf_FIND_COMPONENTS)
enable_language(CUDA)
if(EXISTS "${CMAKE_CURRENT_LIST_DIR}/cudf-testing-dependencies.cmake")
include("${CMAKE_CURRENT_LIST_DIR}/cudf-testing-dependencies.cmake")
endif()
if(EXISTS "${CMAKE_CURRENT_LIST_DIR}/cudf-testing-targets.cmake")
include("${CMAKE_CURRENT_LIST_DIR}/cudf-testing-targets.cmake")
endif()
endif()
]=]
)
string(APPEND install_code_string "${common_code_string}")

rapids_export(
INSTALL cudf
EXPORT_SET cudf-exports
GLOBAL_TARGETS cudf
EXPORT_SET cudf-exports ${_components_export_string}
GLOBAL_TARGETS cudf cudftestutil
NAMESPACE cudf::
DOCUMENTATION doc_string
FINAL_CODE_BLOCK install_code_string
Expand All @@ -1013,23 +996,13 @@ string(APPEND build_code_string "${common_code_string}")

rapids_export(
BUILD cudf
EXPORT_SET cudf-exports
GLOBAL_TARGETS cudf
EXPORT_SET cudf-exports ${_components_export_string}
GLOBAL_TARGETS cudf cudftestutil
NAMESPACE cudf::
DOCUMENTATION doc_string
FINAL_CODE_BLOCK build_code_string
)

if(CUDF_BUILD_TESTUTIL)
export(
EXPORT cudf-testing-exports
FILE ${CUDF_BINARY_DIR}/cudf-testing-targets.cmake
NAMESPACE cudf::
)
rapids_export_write_dependencies(
BUILD cudf-testing-exports "${CUDF_BINARY_DIR}/cudf-testing-dependencies.cmake"
)
endif()
# ##################################################################################################
# * make documentation ----------------------------------------------------------------------------

Expand Down
35 changes: 25 additions & 10 deletions cpp/benchmarks/common/generate_input.cu
Original file line number Diff line number Diff line change
Expand Up @@ -430,8 +430,12 @@ std::unique_ptr<cudf::column> create_random_column(data_profile const& profile,
null_mask.begin());
}

auto [result_bitmask, null_count] = cudf::detail::valid_if(
null_mask.begin(), null_mask.end(), thrust::identity<bool>{}, cudf::get_default_stream());
auto [result_bitmask, null_count] =
cudf::detail::valid_if(null_mask.begin(),
null_mask.end(),
thrust::identity<bool>{},
cudf::get_default_stream(),
rmm::mr::get_current_device_resource());

return std::make_unique<cudf::column>(
dtype,
Expand Down Expand Up @@ -509,8 +513,12 @@ std::unique_ptr<cudf::column> create_random_utf8_string_column(data_profile cons
thrust::make_zip_iterator(offsets.begin(), offsets.begin() + 1),
num_rows,
string_generator{chars.data(), engine});
auto [result_bitmask, null_count] = cudf::detail::valid_if(
null_mask.begin(), null_mask.end() - 1, thrust::identity<bool>{}, cudf::get_default_stream());
auto [result_bitmask, null_count] =
cudf::detail::valid_if(null_mask.begin(),
null_mask.end() - 1,
thrust::identity<bool>{},
cudf::get_default_stream(),
rmm::mr::get_current_device_resource());
return cudf::make_strings_column(
num_rows,
std::move(offsets),
Expand Down Expand Up @@ -628,8 +636,11 @@ std::unique_ptr<cudf::column> create_random_column<cudf::struct_view>(data_profi
auto [null_mask, null_count] = [&]() {
if (profile.get_null_probability().has_value()) {
auto valids = valid_dist(engine, num_rows);
return cudf::detail::valid_if(
valids.begin(), valids.end(), thrust::identity<bool>{}, cudf::get_default_stream());
return cudf::detail::valid_if(valids.begin(),
valids.end(),
thrust::identity<bool>{},
cudf::get_default_stream(),
rmm::mr::get_current_device_resource());
}
return std::pair<rmm::device_buffer, cudf::size_type>{};
}();
Expand Down Expand Up @@ -712,9 +723,12 @@ std::unique_ptr<cudf::column> create_random_column<cudf::list_view>(data_profile
auto offsets_column = std::make_unique<cudf::column>(
cudf::data_type{cudf::type_id::INT32}, num_rows + 1, offsets.release());

auto [null_mask, null_count] = cudf::detail::valid_if(
valids.begin(), valids.end(), thrust::identity<bool>{}, cudf::get_default_stream());
list_column = cudf::make_lists_column(
auto [null_mask, null_count] = cudf::detail::valid_if(valids.begin(),
valids.end(),
thrust::identity<bool>{},
cudf::get_default_stream(),
rmm::mr::get_current_device_resource());
list_column = cudf::make_lists_column(
num_rows,
std::move(offsets_column),
std::move(current_child_column),
Expand Down Expand Up @@ -840,7 +854,8 @@ std::pair<rmm::device_buffer, cudf::size_type> create_random_null_mask(
return cudf::detail::valid_if(thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(size),
bool_generator{seed, 1.0 - *null_probability},
cudf::get_default_stream());
cudf::get_default_stream(),
rmm::mr::get_current_device_resource());
}
}

Expand Down
8 changes: 4 additions & 4 deletions cpp/benchmarks/iterator/iterator.cu
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -140,8 +140,8 @@ void BM_iterator(benchmark::State& state)
cudf::column_view hasnull_F = wrap_hasnull_F;

// Initialize dev_result to false
auto dev_result =
cudf::detail::make_zeroed_device_uvector_sync<TypeParam>(1, cudf::get_default_stream());
auto dev_result = cudf::detail::make_zeroed_device_uvector_sync<TypeParam>(
1, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
if (cub_or_thrust) {
Expand Down Expand Up @@ -210,7 +210,7 @@ void BM_pair_iterator(benchmark::State& state)

// Initialize dev_result to false
auto dev_result = cudf::detail::make_zeroed_device_uvector_sync<thrust::pair<T, bool>>(
1, cudf::get_default_stream());
1, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
if (cub_or_thrust) {
Expand Down
7 changes: 5 additions & 2 deletions cpp/benchmarks/join/join_common.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -104,8 +104,11 @@ void BM_join(state_type& state, Join JoinFunc)
// roughly 75% nulls
auto validity =
thrust::make_transform_iterator(thrust::make_counting_iterator(0), null75_generator{});
return cudf::detail::valid_if(
validity, validity + size, thrust::identity<bool>{}, cudf::get_default_stream())
return cudf::detail::valid_if(validity,
validity + size,
thrust::identity<bool>{},
cudf::get_default_stream(),
rmm::mr::get_current_device_resource())
.first;
};

Expand Down
Loading

0 comments on commit 97ff54b

Please sign in to comment.