Skip to content

Commit

Permalink
Merge branch 'rapidsai:branch-24.06' into basic_tutorial
Browse files Browse the repository at this point in the history
  • Loading branch information
acostadon authored May 22, 2024
2 parents ff09b77 + ddfaacf commit 0414984
Show file tree
Hide file tree
Showing 96 changed files with 3,482 additions and 1,029 deletions.
5 changes: 5 additions & 0 deletions .devcontainer/cuda11.8-conda/devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,11 @@
"BASE": "rapidsai/devcontainers:24.06-cpp-cuda11.8-mambaforge-ubuntu22.04"
}
},
"runArgs": [
"--rm",
"--name",
"${localEnv:USER}-rapids-${localWorkspaceFolderBasename}-24.06-cuda11.8-conda"
],
"hostRequirements": {"gpu": "optional"},
"features": {
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.6": {}
Expand Down
7 changes: 6 additions & 1 deletion .devcontainer/cuda11.8-pip/devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,15 @@
"BASE": "rapidsai/devcontainers:24.06-cpp-cuda11.8-ubuntu22.04"
}
},
"runArgs": [
"--rm",
"--name",
"${localEnv:USER}-rapids-${localWorkspaceFolderBasename}-24.06-cuda11.8-pip"
],
"hostRequirements": {"gpu": "optional"},
"features": {
"ghcr.io/rapidsai/devcontainers/features/ucx:24.6": {
"version": "1.14.1"
"version": "1.15.0"
},
"ghcr.io/rapidsai/devcontainers/features/cuda:24.6": {
"version": "11.8",
Expand Down
5 changes: 5 additions & 0 deletions .devcontainer/cuda12.2-conda/devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,11 @@
"BASE": "rapidsai/devcontainers:24.06-cpp-mambaforge-ubuntu22.04"
}
},
"runArgs": [
"--rm",
"--name",
"${localEnv:USER}-rapids-${localWorkspaceFolderBasename}-24.06-cuda12.2-conda"
],
"hostRequirements": {"gpu": "optional"},
"features": {
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.6": {}
Expand Down
7 changes: 6 additions & 1 deletion .devcontainer/cuda12.2-pip/devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,15 @@
"BASE": "rapidsai/devcontainers:24.06-cpp-cuda12.2-ubuntu22.04"
}
},
"runArgs": [
"--rm",
"--name",
"${localEnv:USER}-rapids-${localWorkspaceFolderBasename}-24.06-cuda12.2-pip"
],
"hostRequirements": {"gpu": "optional"},
"features": {
"ghcr.io/rapidsai/devcontainers/features/ucx:24.6": {
"version": "1.14.1"
"version": "1.15.0"
},
"ghcr.io/rapidsai/devcontainers/features/cuda:24.6": {
"version": "12.2",
Expand Down
1 change: 1 addition & 0 deletions ci/release/update-version.sh
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@ find .devcontainer/ -type f -name devcontainer.json -print0 | while IFS= read -r
sed_runner "s@rapidsai/devcontainers/features/ucx:[0-9.]*@rapidsai/devcontainers/features/ucx:${NEXT_SHORT_TAG_PEP440}@" "${filename}"
sed_runner "s@rapidsai/devcontainers/features/cuda:[0-9.]*@rapidsai/devcontainers/features/cuda:${NEXT_SHORT_TAG_PEP440}@" "${filename}"
sed_runner "s@rapidsai/devcontainers/features/rapids-build-utils:[0-9.]*@rapidsai/devcontainers/features/rapids-build-utils:${NEXT_SHORT_TAG_PEP440}@" "${filename}"
sed_runner "s@rapids-\${localWorkspaceFolderBasename}-[0-9.]*@rapids-\${localWorkspaceFolderBasename}-${NEXT_SHORT_TAG}@g" "${filename}"
done

sed_runner "s/:[0-9][0-9]\.[0-9][0-9]/:${NEXT_SHORT_TAG}/" ./notebooks/README.md
Expand Down
2 changes: 1 addition & 1 deletion ci/test_wheel_cugraph-pyg.sh
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ else
fi
rapids-logger "Installing PyTorch and PyG dependencies"
rapids-retry python -m pip install torch==2.1.0 --index-url ${PYTORCH_URL}
rapids-retry python -m pip install torch-geometric==2.4.0
rapids-retry python -m pip install "torch-geometric>=2.5,<2.6"
rapids-retry python -m pip install \
ogb \
pyg_lib \
Expand Down
2 changes: 1 addition & 1 deletion conda/recipes/cugraph-pyg/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ requirements:
- cupy >=12.0.0
- cugraph ={{ version }}
- pylibcugraphops ={{ minor_version }}
- pyg >=2.3,<2.5
- pyg >=2.5,<2.6

tests:
imports:
Expand Down
18 changes: 13 additions & 5 deletions cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,16 @@ rapids_find_package(CUDAToolkit REQUIRED
INSTALL_EXPORT_SET cugraph-exports
)

if (BUILD_CUGRAPH_MTMG_TESTS)
if(NOT TARGET ucx::ucp)
find_package(ucx REQUIRED)
endif()

if(NOT TARGET ucxx::ucxx)
find_package(ucxx REQUIRED)
endif()
endif()

set(CUGRAPH_C_FLAGS "")
set(CUGRAPH_CXX_FLAGS "")
set(CUGRAPH_CUDA_FLAGS "")
Expand Down Expand Up @@ -145,11 +155,6 @@ if(USE_CUGRAPH_OPS)
include(cmake/thirdparty/get_libcugraphops.cmake)
endif()


if (BUILD_CUGRAPH_MTMG_TESTS)
include(cmake/thirdparty/get_ucp.cmake)
endif()

if(BUILD_TESTS)
include(${rapids-cmake-dir}/cpm/gtest.cmake)
rapids_cpm_gtest(BUILD_STATIC)
Expand Down Expand Up @@ -283,9 +288,12 @@ set(CUGRAPH_SOURCES
src/structure/symmetrize_edgelist_mg.cu
src/community/triangle_count_sg.cu
src/community/triangle_count_mg.cu
src/community/approx_weighted_matching_sg.cu
src/community/approx_weighted_matching_mg.cu
src/traversal/k_hop_nbrs_sg.cu
src/traversal/k_hop_nbrs_mg.cu
src/mtmg/vertex_result.cu
src/mtmg/vertex_pairs_result.cu
)

if(USE_CUGRAPH_OPS)
Expand Down
35 changes: 0 additions & 35 deletions cpp/cmake/thirdparty/get_ucp.cmake

This file was deleted.

26 changes: 26 additions & 0 deletions cpp/include/cugraph/algorithms.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -2368,6 +2368,32 @@ rmm::device_uvector<vertex_t> vertex_coloring(
graph_view_t<vertex_t, edge_t, false, multi_gpu> const& graph_view,
raft::random::RngState& rng_state);

/*
* @brief Approximate Weighted Matching
*
* A matching in an undirected graph G = (V, E) is a pairing of adjacent vertices
* such that each vertex is matched with at most one other vertex, the objective
* being to match as many vertices as possible or to maximise the sum of the
* weights of the matched edges. Here we provide an implementation of an
* approximation algorithm to the weighted Maximum matching. See
* https://web.archive.org/web/20081031230449id_/http://www.ii.uib.no/~fredrikm/fredrik/papers/CP75.pdf
* for further information.
*
* @tparam vertex_t Type of vertex identifiers. Needs to be an integral type.
* @tparam edge_t Type of edge identifiers. Needs to be an integral type.
* @tparam multi_gpu Flag indicating whether template instantiation should target single-GPU (false)
* @param[in] handle RAFT handle object to encapsulate resources (e.g. CUDA stream, communicator,
* and handles to various CUDA libraries) to run graph algorithms.
* @param[in] graph_view Graph view object.
* @param[in] edge_weight_view View object holding edge weights for @p graph_view.
* @return A tuple of device vector of matched vertex ids and sum of the weights of the matched
* edges.
*/
template <typename vertex_t, typename edge_t, typename weight_t, bool multi_gpu>
std::tuple<rmm::device_uvector<vertex_t>, weight_t> approximate_weighted_matching(
raft::handle_t const& handle,
graph_view_t<vertex_t, edge_t, false, multi_gpu> const& graph_view,
edge_property_view_t<edge_t, weight_t const*> edge_weight_view);
} // namespace cugraph

/**
Expand Down
8 changes: 4 additions & 4 deletions cpp/include/cugraph/detail/shuffle_wrappers.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -213,12 +213,12 @@ shuffle_int_vertex_value_pairs_to_local_gpu_by_vertex_partitioning(
*
* @param[in] handle RAFT handle object to encapsulate resources (e.g. CUDA stream, communicator,
* and handles to various CUDA libraries) to run graph algorithms.
* @param[in/out] d_edgelist_majors Vertex IDs for sources (if we are internally storing edges in
* @param[in,out] d_edgelist_majors Vertex IDs for sources (if we are internally storing edges in
* the sparse 2D matrix using sources as major indices) or destinations (otherwise)
* @param[in/out] d_edgelist_minors Vertex IDs for destinations (if we are internally storing edges
* @param[in,out] d_edgelist_minors Vertex IDs for destinations (if we are internally storing edges
* in the sparse 2D matrix using sources as major indices) or sources (otherwise)
* @param[in/out] d_edgelist_weights Optional edge weights
* @param[in/out] d_edgelist_id_type_pairs Optional edge (ID, type) pairs
* @param[in,out] d_edgelist_weights Optional edge weights
* @param[in,out] d_edgelist_id_type_pairs Optional edge (ID, type) pairs
* @param[in] groupby_and_count_local_partition_by_minor If set to true, groupby and count edges
* based on (local partition ID, GPU ID) pairs (where GPU IDs are computed by applying the
* compute_gpu_id_from_vertex_t function to the minor vertex ID). If set to false, groupby and count
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,7 @@ namespace mtmg {
namespace detail {

/**
* @brief Wrap an object to be available for each GPU
*
* In the MTMG environment we need the ability to manage a collection of objects
* that are associated with a particular GPU, and fetch the objects from an
* arbitrary GPU thread. This object will wrap any object and allow it to be
* accessed from different threads.
* @brief Manage device spans on each GPU
*/
template <typename T>
using device_shared_device_span_t = device_shared_wrapper_t<raft::device_span<T>>;
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
/*
* Copyright (c) 2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#pragma once

#include <cugraph/mtmg/detail/device_shared_wrapper.hpp>

#include <raft/core/device_span.hpp>

namespace cugraph {
namespace mtmg {
namespace detail {

/**
* @brief Manage a tuple of device spans on each GPU
*/
template <typename... Ts>
using device_shared_device_span_tuple_t =
device_shared_wrapper_t<std::tuple<raft::device_span<Ts>...>>;

} // namespace detail
} // namespace mtmg
} // namespace cugraph
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,10 @@ namespace mtmg {
namespace detail {

/**
* @brief Wrap an object to be available for each GPU
* @brief Manage a device vector on each GPU
*
* In the MTMG environment we need the ability to manage a collection of objects
* that are associated with a particular GPU, and fetch the objects from an
* arbitrary GPU thread. This object will wrap any object and allow it to be
* accessed from different threads.
* Uses the device_shared_wrapper to manage an rmm::device_uvector<T> on
* each GPU.
*/
template <typename T>
class device_shared_device_vector_t : public device_shared_wrapper_t<rmm::device_uvector<T>> {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
/*
* Copyright (c) 2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#pragma once

#include <cugraph/mtmg/detail/device_shared_device_span_tuple.hpp>

#include <rmm/device_uvector.hpp>

namespace cugraph {
namespace mtmg {
namespace detail {

/**
* @brief Manage a tuple of device vector on each GPU
*
* Uses the device_shared_wrapper to manage a tuple of rmm::device_uvector
* instances on each GPU.
*/
template <typename... Ts>
class device_shared_device_vector_tuple_t
: public device_shared_wrapper_t<std::tuple<rmm::device_uvector<Ts>...>> {
using parent_t = detail::device_shared_wrapper_t<std::tuple<rmm::device_uvector<Ts>...>>;

public:
/**
* @brief Create a device_shared_device_span (read only view)
*/
auto view()
{
std::lock_guard<std::mutex> lock(parent_t::lock_);

device_shared_device_span_tuple_t<Ts...> result;

std::for_each(parent_t::objects_.begin(), parent_t::objects_.end(), [&result, this](auto& p) {
convert_to_span(std::index_sequence_for<Ts...>(), result, p);
// std::size_t Is... = std::index_sequence_for<Ts...>;
// result.set(p.first, std::make_tuple(raft::device_span<Ts
// const>{std::get<Is>(p.second).data(), std::get<Is>(p.second).size()}...));
});

return result;
}

private:
template <std::size_t... Is>
void convert_to_span(std::index_sequence<Is...>,
device_shared_device_span_tuple_t<Ts...>& result,
std::pair<int32_t const, std::tuple<rmm::device_uvector<Ts>...>>& p)
{
result.set(p.first,
std::make_tuple(raft::device_span<Ts>{std::get<Is>(p.second).data(),
std::get<Is>(p.second).size()}...));
}
};

} // namespace detail
} // namespace mtmg
} // namespace cugraph
41 changes: 41 additions & 0 deletions cpp/include/cugraph/mtmg/vertex_pair_result.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
/*
* Copyright (c) 2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#pragma once

#include <cugraph/mtmg/detail/device_shared_device_vector_tuple.hpp>
#include <cugraph/mtmg/vertex_pair_result_view.hpp>

namespace cugraph {
namespace mtmg {

/**
* @brief An MTMG device vector for storing vertex results
*/
template <typename vertex_t, typename result_t>
class vertex_pair_result_t
: public detail::device_shared_device_vector_tuple_t<vertex_t, vertex_t, result_t> {
using parent_t = detail::device_shared_device_vector_tuple_t<vertex_t, vertex_t, result_t>;

public:
/**
* @brief Create a vertex result view (read only)
*/
auto view() { return vertex_pair_result_view_t<vertex_t, result_t>(this->parent_t::view()); }
};

} // namespace mtmg
} // namespace cugraph
Loading

0 comments on commit 0414984

Please sign in to comment.