Skip to content

Commit

Permalink
Use cuda::proclaim_return_type on device lambdas. (#5696)
Browse files Browse the repository at this point in the history
This PR updates parts of the code that require `cuda::proclaim_return_type` for compatibility with CCCL 2.2.0 (Thrust). This pulls out part of the diff of #5623. I left the part that is needed to upgrade to CUB 2.2.0, because those changes will have to go into a separate PR that updates to CCCL 2.2.0.

I also added explicit CMake dependencies on Thrust and RMM. Without these, cuml is reliant on RAFT for transitive dependencies, which makes it very difficult to test upstream changes to Thrust and RMM.

Authors:
  - Bradley Dice (https://github.com/bdice)

Approvers:
  - Divye Gala (https://github.com/divyegala)
  - Vyas Ramasubramani (https://github.com/vyasr)
  - Dante Gama Dessavre (https://github.com/dantegd)

URL: #5696
  • Loading branch information
bdice authored Dec 12, 2023
1 parent c524553 commit cb45b27
Show file tree
Hide file tree
Showing 4 changed files with 79 additions and 24 deletions.
3 changes: 3 additions & 0 deletions cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,10 @@ if(BUILD_CUML_TESTS OR BUILD_PRIMS_TESTS)
find_package(Threads)
endif()

# thrust before rmm, rmm before raft so we get the right version of thrust/rmm
include(cmake/thirdparty/get_thrust.cmake)
include(cmake/thirdparty/get_libcudacxx.cmake)
include(cmake/thirdparty/get_rmm.cmake)
include(cmake/thirdparty/get_raft.cmake)

if(LINK_TREELITE)
Expand Down
23 changes: 23 additions & 0 deletions cpp/cmake/thirdparty/get_rmm.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
#=============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================

function(find_and_configure_rmm)
include(${rapids-cmake-dir}/cpm/rmm.cmake)
rapids_cpm_rmm(BUILD_EXPORT_SET cuml-exports
INSTALL_EXPORT_SET cuml-exports)
endfunction()

find_and_configure_rmm()
23 changes: 23 additions & 0 deletions cpp/cmake/thirdparty/get_thrust.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# =============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================

# Use CPM to find or clone thrust
function(find_and_configure_thrust)
include(${rapids-cmake-dir}/cpm/thrust.cmake)
rapids_cpm_thrust(NAMESPACE cuml
BUILD_EXPORT_SET cuml-exports
INSTALL_EXPORT_SET cuml-exports)
endfunction()

find_and_configure_thrust()
54 changes: 30 additions & 24 deletions cpp/src/kmeans/kmeans_mg_impl.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>

#include <cuda/functional>
#include <ml_cuda_utils.h>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
Expand Down Expand Up @@ -241,7 +242,8 @@ void initKMeansPlusPlus(const raft::handle_t& handle,
minClusterDistance.view(),
workspace,
clusterCost.view(),
[] __device__(const DataT& a, const DataT& b) { return a + b; });
cuda::proclaim_return_type<DataT>(
[] __device__(const DataT& a, const DataT& b) { return a + b; }));

// compute total cluster cost by accumulating the partial cost from all the
// ranks
Expand Down Expand Up @@ -291,7 +293,8 @@ void initKMeansPlusPlus(const raft::handle_t& handle,
minClusterDistance.view(),
workspace,
clusterCost.view(),
[] __device__(const DataT& a, const DataT& b) { return a + b; });
cuda::proclaim_return_type<DataT>(
[] __device__(const DataT& a, const DataT& b) { return a + b; }));
comm.allreduce(
clusterCost.data_handle(), clusterCost.data_handle(), 1, raft::comms::op_t::SUM, stream);
raft::copy(&psi, clusterCost.data_handle(), 1, stream);
Expand Down Expand Up @@ -481,7 +484,7 @@ void checkWeights(const raft::handle_t& handle,
weight.data_handle(),
weight.data_handle(),
weight.size(),
[=] __device__(const DataT& wt) { return wt * scale; },
cuda::proclaim_return_type<DataT>([=] __device__(const DataT& wt) { return wt * scale; }),
stream);
}
}
Expand Down Expand Up @@ -621,12 +624,12 @@ void fit(const raft::handle_t& handle,
newCentroids.extent(0),
true,
false,
[=] __device__(DataT mat, DataT vec) {
cuda::proclaim_return_type<DataT>([=] __device__(DataT mat, DataT vec) {
if (vec == 0)
return DataT(0);
else
return mat / vec;
},
}),
stream);

// copy the centroids[i] to newCentroids[i] when wtInCluster[i] is 0
Expand All @@ -639,16 +642,18 @@ void fit(const raft::handle_t& handle,
itr_wt,
wtInCluster.extent(0),
newCentroids.data_handle(),
[=] __device__(raft::KeyValuePair<ptrdiff_t, DataT> map) { // predicate
// copy when the # of samples in the cluster is 0
if (map.value == 0)
return true;
else
return false;
},
[=] __device__(raft::KeyValuePair<ptrdiff_t, DataT> map) { // map
return map.key;
},
cuda::proclaim_return_type<bool>(
[=] __device__(raft::KeyValuePair<ptrdiff_t, DataT> map) { // predicate
// copy when the # of samples in the cluster is 0
if (map.value == 0)
return true;
else
return false;
}),
cuda::proclaim_return_type<ptrdiff_t>(
[=] __device__(raft::KeyValuePair<ptrdiff_t, DataT> map) { // map
return map.key;
}),
stream);

// compute the squared norm between the newCentroids and the original
Expand All @@ -657,10 +662,10 @@ void fit(const raft::handle_t& handle,
raft::linalg::mapThenSumReduce(
sqrdNorm.data_handle(),
newCentroids.size(),
[=] __device__(const DataT a, const DataT b) {
cuda::proclaim_return_type<DataT>([=] __device__(const DataT a, const DataT b) {
DataT diff = a - b;
return diff * diff;
},
}),
stream,
centroids.data_handle(),
newCentroids.data_handle());
Expand All @@ -680,13 +685,14 @@ void fit(const raft::handle_t& handle,
minClusterAndDistance.view(),
workspace,
raft::make_device_scalar_view(clusterCostD.data()),
[] __device__(const raft::KeyValuePair<IndexT, DataT>& a,
const raft::KeyValuePair<IndexT, DataT>& b) {
raft::KeyValuePair<IndexT, DataT> res;
res.key = 0;
res.value = a.value + b.value;
return res;
});
cuda::proclaim_return_type<raft::KeyValuePair<IndexT, DataT>>(
[] __device__(const raft::KeyValuePair<IndexT, DataT>& a,
const raft::KeyValuePair<IndexT, DataT>& b) {
raft::KeyValuePair<IndexT, DataT> res;
res.key = 0;
res.value = a.value + b.value;
return res;
}));

// Cluster cost phi_x(C) from all ranks
comm.allreduce(&(clusterCostD.data()->value),
Expand Down

0 comments on commit cb45b27

Please sign in to comment.