Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Convert tests to use device_uvector #8205

Merged
merged 29 commits into from
May 12, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
1f6ec29
Convert to uvector in bitmask_tests.cu
harrism May 4, 2021
2708c01
Rename bitmask_tests .cu -> .cpp
harrism May 4, 2021
e95078b
Convert compound_test to uvector
harrism May 4, 2021
b6a212e
copyright
harrism May 4, 2021
afe19e7
Add `make_host_vector` factories
harrism May 4, 2021
3457ef0
Convert device_atomics_test to device_uvector
harrism May 4, 2021
45c06db
Merge branch 'branch-0.20' into fea_tests_uvector
harrism May 6, 2021
d307b55
Convert fixed_point_tests
harrism May 6, 2021
944cb3a
Convert map tests
harrism May 6, 2021
884b29b
Convert iterator tests to device_uvector
harrism May 6, 2021
531f591
Convert set_nullmask_tests to uvector
harrism May 6, 2021
8410543
Allow constructing a span from a `std::string`
harrism May 6, 2021
3620171
Convert value_iterator_tests.cu
harrism May 6, 2021
969b690
Unused header
harrism May 6, 2021
c0c46d4
Convert scalar test
harrism May 6, 2021
96b37a4
Convert strings array_tests.cu
harrism May 6, 2021
fd1e508
Convert strings_tests
harrism May 6, 2021
af34140
convert table_view_tests
harrism May 6, 2021
77841df
integers_tests.cu --> .cpp
harrism May 6, 2021
f8d7b54
Merge branch 'branch-0.20' into fea_tests_uvector
harrism May 10, 2021
6acd404
Convert timestamps_test
harrism May 11, 2021
05e4ec2
Change `make_type_param_vector` to return a thrust::host_vector rathe…
harrism May 11, 2021
4e5af1c
Fix map_test race condition
harrism May 11, 2021
e6e622f
Convert type_dispatcher_test to uvector
harrism May 11, 2021
fa9856b
Convert column_utilities.cu to uvector
harrism May 11, 2021
c48ab13
Convert value_iterator_test* to uvector
harrism May 11, 2021
3536c8f
Remove anonymous namespace
harrism May 11, 2021
d26371d
Require explicit stream parameter to async vector factories
harrism May 11, 2021
d186687
Remove optional default stream from exec_policy calls
harrism May 11, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
116 changes: 102 additions & 14 deletions cpp/include/cudf/detail/utilities/vector_factories.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ namespace detail {
template <typename T>
rmm::device_uvector<T> make_zeroed_device_uvector_async(
std::size_t size,
rmm::cuda_stream_view stream = rmm::cuda_stream_default,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
rmm::device_uvector<T> ret(size, stream, mr);
Expand Down Expand Up @@ -93,7 +93,7 @@ rmm::device_uvector<T> make_zeroed_device_uvector_sync(
template <typename T>
rmm::device_uvector<T> make_device_uvector_async(
host_span<T const> source_data,
rmm::cuda_stream_view stream = rmm::cuda_stream_default,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
rmm::device_uvector<T> ret(source_data.size(), stream, mr);
Expand Down Expand Up @@ -124,7 +124,7 @@ template <typename Container,
host_span<typename Container::value_type const>>::value>* = nullptr>
rmm::device_uvector<typename Container::value_type> make_device_uvector_async(
Container const& c,
rmm::cuda_stream_view stream = rmm::cuda_stream_default,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
return make_device_uvector_async(host_span<typename Container::value_type const>{c}, stream, mr);
Expand Down Expand Up @@ -177,7 +177,7 @@ template <
nullptr>
rmm::device_uvector<typename Container::value_type> make_device_uvector_async(
Container const& c,
rmm::cuda_stream_view stream = rmm::cuda_stream_default,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
return make_device_uvector_async(
Expand Down Expand Up @@ -281,6 +281,16 @@ rmm::device_uvector<typename Container::value_type> make_device_uvector_sync(
return make_device_uvector_sync(device_span<typename Container::value_type const>{c}, stream, mr);
}

// Utility function template to allow copying to either a thrust::host_vector or std::vector
template <typename T, typename OutContainer>
OutContainer make_vector_async(device_span<T const> v, rmm::cuda_stream_view stream)
{
OutContainer result(v.size());
CUDA_TRY(cudaMemcpyAsync(
result.data(), v.data(), v.size() * sizeof(T), cudaMemcpyDeviceToHost, stream.value()));
return result;
}

/**
* @brief Asynchronously construct a `std::vector` containing a copy of data from a
* `device_span`
Expand All @@ -293,13 +303,9 @@ rmm::device_uvector<typename Container::value_type> make_device_uvector_sync(
* @return The data copied to the host
*/
template <typename T>
std::vector<T> make_std_vector_async(device_span<T const> v,
rmm::cuda_stream_view stream = rmm::cuda_stream_default)
std::vector<T> make_std_vector_async(device_span<T const> v, rmm::cuda_stream_view stream)
{
std::vector<T> result(v.size());
CUDA_TRY(cudaMemcpyAsync(
result.data(), v.data(), v.size() * sizeof(T), cudaMemcpyDeviceToHost, stream.value()));
return result;
return make_vector_async<T, std::vector<T>>(v, stream);
}

/**
Expand All @@ -319,8 +325,8 @@ template <
std::enable_if_t<
std::is_convertible<Container, device_span<typename Container::value_type const>>::value>* =
nullptr>
std::vector<typename Container::value_type> make_std_vector_async(
Container const& c, rmm::cuda_stream_view stream = rmm::cuda_stream_default)
std::vector<typename Container::value_type> make_std_vector_async(Container const& c,
rmm::cuda_stream_view stream)
{
return make_std_vector_async(device_span<typename Container::value_type const>{c}, stream);
}
Expand All @@ -337,8 +343,7 @@ std::vector<typename Container::value_type> make_std_vector_async(
* @return The data copied to the host
*/
template <typename T>
std::vector<T> make_std_vector_sync(device_span<T const> v,
rmm::cuda_stream_view stream = rmm::cuda_stream_default)
std::vector<T> make_std_vector_sync(device_span<T const> v, rmm::cuda_stream_view stream)
{
auto result = make_std_vector_async(v, stream);
stream.synchronize();
Expand Down Expand Up @@ -368,6 +373,89 @@ std::vector<typename Container::value_type> make_std_vector_sync(
return make_std_vector_sync(device_span<typename Container::value_type const>{c}, stream);
}

/**
* @brief Asynchronously construct a `thrust::host_vector` containing a copy of data from a
* `device_span`
*
* @note This function does not synchronize `stream`.
*
* @tparam T The type of the data to copy
* @param source_data The device data to copy
* @param stream The stream on which to perform the copy
* @return The data copied to the host
*/
template <typename T>
thrust::host_vector<T> make_host_vector_async(device_span<T const> v, rmm::cuda_stream_view stream)
{
return make_vector_async<T, thrust::host_vector<T>>(v, stream);
}

/**
* @brief Asynchronously construct a `std::vector` containing a copy of data from a device
* container
*
* @note This function synchronizes `stream`.
*
* @tparam Container The type of the container to copy from
* @tparam T The type of the data to copy
* @param c The input device container from which to copy
* @param stream The stream on which to perform the copy
* @return The data copied to the host
*/
template <
typename Container,
std::enable_if_t<
std::is_convertible<Container, device_span<typename Container::value_type const>>::value>* =
nullptr>
thrust::host_vector<typename Container::value_type> make_host_vector_async(
Container const& c, rmm::cuda_stream_view stream)
{
return make_host_vector_async(device_span<typename Container::value_type const>{c}, stream);
}

/**
* @brief Synchronously construct a `std::vector` containing a copy of data from a
* `device_span`
*
* @note This function does a synchronize on `stream`.
*
* @tparam T The type of the data to copy
* @param source_data The device data to copy
* @param stream The stream on which to perform the copy
* @return The data copied to the host
*/
template <typename T>
thrust::host_vector<T> make_host_vector_sync(
device_span<T const> v, rmm::cuda_stream_view stream = rmm::cuda_stream_default)
{
auto result = make_host_vector_async(v, stream);
stream.synchronize();
return result;
}

/**
* @brief Synchronously construct a `std::vector` containing a copy of data from a device
* container
*
* @note This function synchronizes `stream`.
*
* @tparam Container The type of the container to copy from
* @tparam T The type of the data to copy
* @param c The input device container from which to copy
* @param stream The stream on which to perform the copy
* @return The data copied to the host
*/
template <
typename Container,
std::enable_if_t<
std::is_convertible<Container, device_span<typename Container::value_type const>>::value>* =
nullptr>
thrust::host_vector<typename Container::value_type> make_host_vector_sync(
Container const& c, rmm::cuda_stream_view stream = rmm::cuda_stream_default)
{
return make_host_vector_sync(device_span<typename Container::value_type const>{c}, stream);
}

} // namespace detail

} // namespace cudf
5 changes: 5 additions & 0 deletions cpp/include/cudf/utilities/span.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,11 @@ struct is_host_span_supported_container< //
thrust::host_vector<T, Alloc>> : std::true_type {
};

template <typename T, typename Alloc>
struct is_host_span_supported_container< //
harrism marked this conversation as resolved.
Show resolved Hide resolved
std::basic_string<T, std::char_traits<T>, Alloc>> : std::true_type {
};

template <typename T, std::size_t Extent = cudf::dynamic_extent>
struct host_span : public cudf::detail::span_base<T, Extent, host_span<T, Extent>> {
using base = cudf::detail::span_base<T, Extent, host_span<T, Extent>>;
Expand Down
11 changes: 7 additions & 4 deletions cpp/include/cudf_test/type_lists.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@
#include <cudf/wrappers/timestamps.hpp>
#include <cudf_test/type_list_utilities.hpp>

#include <thrust/host_vector.h>

#include <array>
#include <tuple>

Expand Down Expand Up @@ -79,10 +81,10 @@ constexpr auto types_to_ids()
template <typename TypeParam, typename T>
typename std::enable_if<cudf::is_fixed_width<TypeParam>() &&
!cudf::is_timestamp_t<TypeParam>::value,
std::vector<TypeParam>>::type
thrust::host_vector<TypeParam>>::type
make_type_param_vector(std::initializer_list<T> const& init_list)
{
std::vector<TypeParam> vec(init_list.size());
thrust::host_vector<TypeParam> vec(init_list.size());
std::transform(std::cbegin(init_list), std::cend(init_list), std::begin(vec), [](auto const& e) {
if (std::is_unsigned<TypeParam>::value)
return static_cast<TypeParam>(std::abs(e));
Expand All @@ -93,10 +95,11 @@ make_type_param_vector(std::initializer_list<T> const& init_list)
}

template <typename TypeParam, typename T>
typename std::enable_if<cudf::is_timestamp_t<TypeParam>::value, std::vector<TypeParam>>::type
typename std::enable_if<cudf::is_timestamp_t<TypeParam>::value,
thrust::host_vector<TypeParam>>::type
make_type_param_vector(std::initializer_list<T> const& init_list)
{
std::vector<TypeParam> vec(init_list.size());
thrust::host_vector<TypeParam> vec(init_list.size());
std::transform(std::cbegin(init_list), std::cend(init_list), std::begin(vec), [](auto const& e) {
return TypeParam{typename TypeParam::duration{e}};
});
Expand Down
11 changes: 3 additions & 8 deletions cpp/src/copying/concatenate.cu
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,6 @@ auto create_device_views(host_span<column_view const> views, rmm::cuda_stream_vi
column_device_view::create(std::declval<column_view>(), std::declval<rmm::cuda_stream_view>()));
auto device_view_owners = std::vector<CDViewPtr>(views.size());
std::transform(views.begin(), views.end(), device_view_owners.begin(), [stream](auto const& col) {
// TODO creating this device view can invoke null count computation
// even though it isn't used. See this issue:
// https://github.com/rapidsai/cudf/issues/4368
return column_device_view::create(col, stream);
});

Expand All @@ -70,10 +67,8 @@ auto create_device_views(host_span<column_view const> views, rmm::cuda_stream_vi
device_view_owners.cend(),
std::back_inserter(device_views),
[](auto const& col) { return *col; });
// TODO each of these device vector copies invoke stream synchronization
// which appears to add unnecessary overhead. See this issue:
// https://github.com/rapidsai/rmm/issues/120
auto d_views = make_device_uvector_async(device_views);

auto d_views = make_device_uvector_async(device_views, stream);

// Compute the partition offsets
auto offsets = thrust::host_vector<size_t>(views.size() + 1);
Expand All @@ -84,7 +79,7 @@ auto create_device_views(host_span<column_view const> views, rmm::cuda_stream_vi
std::next(offsets.begin()),
[](auto const& col) { return col.size(); },
thrust::plus<size_t>{});
auto d_offsets = make_device_uvector_async(offsets);
auto d_offsets = make_device_uvector_async(offsets, stream);
auto const output_size = offsets.back();

return std::make_tuple(
Expand Down
4 changes: 2 additions & 2 deletions cpp/tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -342,7 +342,7 @@ ConfigureTest(STRINGS_TEST
strings/fixed_point_tests.cpp
strings/floats_tests.cpp
strings/hash_string.cu
strings/integers_tests.cu
strings/integers_tests.cpp
strings/ipv4_tests.cpp
strings/json_tests.cpp
strings/pad_tests.cpp
Expand Down Expand Up @@ -375,7 +375,7 @@ ConfigureTest(TEXT_TEST
ConfigureTest(BITMASK_TEST
bitmask/valid_if_tests.cu
bitmask/set_nullmask_tests.cu
bitmask/bitmask_tests.cu
bitmask/bitmask_tests.cpp
bitmask/is_element_valid_tests.cpp)


Expand Down
Loading