Skip to content

Commit

Permalink
Merge branch-24.06 into branch-24.08
Browse files Browse the repository at this point in the history
  • Loading branch information
nv-rliu committed May 31, 2024
2 parents ee4cbb7 + 1667f7a commit 30954bb
Show file tree
Hide file tree
Showing 185 changed files with 10,250 additions and 2,418 deletions.
5 changes: 5 additions & 0 deletions .devcontainer/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,11 @@ FROM ${BASE} as pip-base

ENV DEFAULT_VIRTUAL_ENV=rapids

RUN apt update -y \
&& DEBIAN_FRONTEND=noninteractive apt install -y \
libblas-dev liblapack-dev \
&& rm -rf /tmp/* /var/tmp/* /var/cache/apt/* /var/lib/apt/lists/*;

FROM ${BASE} as conda-base

ENV DEFAULT_CONDA_ENV=rapids
Expand Down
5 changes: 5 additions & 0 deletions .devcontainer/cuda11.8-conda/devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,11 @@
"BASE": "rapidsai/devcontainers:24.08-cpp-cuda11.8-mambaforge-ubuntu22.04"
}
},
"runArgs": [
"--rm",
"--name",
"${localEnv:USER:anon}-rapids-${localWorkspaceFolderBasename}-24.06-cuda11.8-conda"
],
"hostRequirements": {"gpu": "optional"},
"features": {
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.8": {}
Expand Down
6 changes: 5 additions & 1 deletion .devcontainer/cuda11.8-pip/devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,11 @@
"BASE": "rapidsai/devcontainers:24.08-cpp-cuda11.8-ubuntu22.04"
}
},
"runArgs": [
"--rm",
"--name",
"${localEnv:USER:anon}-rapids-${localWorkspaceFolderBasename}-24.06-cuda11.8-pip"
],
"hostRequirements": {"gpu": "optional"},
"features": {
"ghcr.io/rapidsai/devcontainers/features/ucx:24.8": {
Expand All @@ -23,7 +28,6 @@
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.8": {}
},
"overrideFeatureInstallOrder": [
"ghcr.io/rapidsai/devcontainers/features/ucx",
"ghcr.io/rapidsai/devcontainers/features/cuda",
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils"
],
Expand Down
5 changes: 5 additions & 0 deletions .devcontainer/cuda12.2-conda/devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,11 @@
"BASE": "rapidsai/devcontainers:24.08-cpp-mambaforge-ubuntu22.04"
}
},
"runArgs": [
"--rm",
"--name",
"${localEnv:USER:anon}-rapids-${localWorkspaceFolderBasename}-24.06-cuda12.2-conda"
],
"hostRequirements": {"gpu": "optional"},
"features": {
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.8": {}
Expand Down
6 changes: 5 additions & 1 deletion .devcontainer/cuda12.2-pip/devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,11 @@
"BASE": "rapidsai/devcontainers:24.08-cpp-cuda12.2-ubuntu22.04"
}
},
"runArgs": [
"--rm",
"--name",
"${localEnv:USER:anon}-rapids-${localWorkspaceFolderBasename}-24.06-cuda12.2-pip"
],
"hostRequirements": {"gpu": "optional"},
"features": {
"ghcr.io/rapidsai/devcontainers/features/ucx:24.8": {
Expand All @@ -23,7 +28,6 @@
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.8": {}
},
"overrideFeatureInstallOrder": [
"ghcr.io/rapidsai/devcontainers/features/ucx",
"ghcr.io/rapidsai/devcontainers/features/cuda",
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils"
],
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/pr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -196,5 +196,5 @@ jobs:
extra-repo-deploy-key: CUGRAPH_OPS_SSH_PRIVATE_DEPLOY_KEY
build_command: |
sccache -z;
build-all --verbose -j$(nproc --ignore=1);
build-all --verbose -j$(nproc --ignore=1) -DBUILD_CUGRAPH_MG_TESTS=ON;
sccache -s;
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,7 @@ def generate_rmat_dataset(
del label_df
gc.collect()

dask_label_df = dask_cudf.from_dask_dataframe(dask_label_df)
dask_label_df = dask_label_df.to_backend("cudf")

node_offsets = {"paper": 0}
edge_offsets = {("paper", "cites", "paper"): 0}
Expand Down
17 changes: 17 additions & 0 deletions benchmarks/nx-cugraph/pytest-based/bench_algos.py
Original file line number Diff line number Diff line change
Expand Up @@ -848,6 +848,23 @@ def bench_weakly_connected_components(benchmark, graph_obj, backend_wrapper):
assert type(result) is list


def bench_ego_graph(benchmark, graph_obj, backend_wrapper):
G = get_graph_obj_for_benchmark(graph_obj, backend_wrapper)
node = get_highest_degree_node(graph_obj)
result = benchmark.pedantic(
target=backend_wrapper(nx.ego_graph),
args=(G,),
kwargs=dict(
n=node,
radius=100,
),
rounds=rounds,
iterations=iterations,
warmup_rounds=warmup_rounds,
)
assert isinstance(result, (nx.Graph, nxcg.Graph))


@pytest.mark.skip(reason="benchmark not implemented")
def bench_complete_bipartite_graph(benchmark, graph_obj, backend_wrapper):
pass
Expand Down
2 changes: 1 addition & 1 deletion ci/build_wheel.sh
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ if ! rapids-is-release-build; then
alpha_spec=',>=0.0.0a0'
fi

for dep in rmm cudf cugraph raft-dask pylibcugraph pylibcugraphops pylibraft ucx-py; do
for dep in rmm cudf cugraph raft-dask pylibcugraph pylibcugraphops pylibwholegraph pylibraft ucx-py; do
sed -r -i "s/${dep}==(.*)\"/${dep}${PACKAGE_CUDA_SUFFIX}==\1${alpha_spec}\"/g" ${pyproject_file}
done

Expand Down
1 change: 1 addition & 0 deletions ci/release/update-version.sh
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@ find .devcontainer/ -type f -name devcontainer.json -print0 | while IFS= read -r
sed_runner "s@rapidsai/devcontainers/features/ucx:[0-9.]*@rapidsai/devcontainers/features/ucx:${NEXT_SHORT_TAG_PEP440}@" "${filename}"
sed_runner "s@rapidsai/devcontainers/features/cuda:[0-9.]*@rapidsai/devcontainers/features/cuda:${NEXT_SHORT_TAG_PEP440}@" "${filename}"
sed_runner "s@rapidsai/devcontainers/features/rapids-build-utils:[0-9.]*@rapidsai/devcontainers/features/rapids-build-utils:${NEXT_SHORT_TAG_PEP440}@" "${filename}"
sed_runner "s@rapids-\${localWorkspaceFolderBasename}-[0-9.]*@rapids-\${localWorkspaceFolderBasename}-${NEXT_SHORT_TAG}@g" "${filename}"
done

sed_runner "s/:[0-9][0-9]\.[0-9][0-9]/:${NEXT_SHORT_TAG}/" ./notebooks/README.md
Expand Down
5 changes: 4 additions & 1 deletion ci/run_cugraph_pyg_pytests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,10 @@ set -euo pipefail
# Support invoking run_cugraph_pyg_pytests.sh outside the script directory
cd "$(dirname "$(realpath "${BASH_SOURCE[0]}")")"/../python/cugraph-pyg/cugraph_pyg

pytest --cache-clear --ignore=tests/mg "$@" .
pytest --cache-clear --benchmark-disable "$@" .

# Used to skip certain examples in CI due to memory limitations
export CI_RUN=1

# Test examples
for e in "$(pwd)"/examples/*.py; do
Expand Down
2 changes: 1 addition & 1 deletion ci/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ if hasArg "--run-python-tests"; then
conda list
cd ${CUGRAPH_ROOT}/python/cugraph-pyg/cugraph_pyg
# rmat is not tested because of MG testing
pytest --cache-clear --junitxml=${CUGRAPH_ROOT}/junit-cugraph-pytests.xml -v --cov-config=.coveragerc --cov=cugraph_pyg --cov-report=xml:${WORKSPACE}/python/cugraph_pyg/cugraph-coverage.xml --cov-report term --ignore=raft --ignore=tests/mg --ignore=tests/int --ignore=tests/generators --benchmark-disable
pytest -sv -m sg --cache-clear --junitxml=${CUGRAPH_ROOT}/junit-cugraph-pytests.xml -v --cov-config=.coveragerc --cov=cugraph_pyg --cov-report=xml:${WORKSPACE}/python/cugraph_pyg/cugraph-coverage.xml --cov-report term --ignore=raft --benchmark-disable
echo "Ran Python pytest for cugraph_pyg : return code was: $?, test script exit code is now: $EXITCODE"

echo "Python pytest for cugraph-service (single-GPU only)..."
Expand Down
11 changes: 4 additions & 7 deletions ci/test_python.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,6 @@

set -euo pipefail

# TODO: Enable dask query planning (by default) once some bugs are fixed.
# xref: https://github.com/rapidsai/cudf/issues/15027
export DASK_DATAFRAME__QUERY_PLANNING=False

# Support invoking test_python.sh outside the script directory
cd "$(dirname "$(realpath "${BASH_SOURCE[0]}")")"/../

Expand Down Expand Up @@ -217,13 +213,14 @@ if [[ "${RAPIDS_CUDA_VERSION}" == "11.8.0" ]]; then

# Install pyg dependencies (which requires pip)

pip install ogb
pip install \
ogb \
tensordict

pip install \
pyg_lib \
torch_scatter \
torch_sparse \
torch_cluster \
torch_spline_conv \
-f ${PYG_URL}

rapids-print-env
Expand Down
4 changes: 0 additions & 4 deletions ci/test_wheel.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,6 @@

set -eoxu pipefail

# TODO: Enable dask query planning (by default) once some bugs are fixed.
# xref: https://github.com/rapidsai/cudf/issues/15027
export DASK_DATAFRAME__QUERY_PLANNING=False

package_name=$1
package_dir=$2

Expand Down
12 changes: 11 additions & 1 deletion ci/test_wheel_cugraph-dgl.sh
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,18 @@ fi
PYTORCH_URL="https://download.pytorch.org/whl/cu${PYTORCH_CUDA_VER}"
DGL_URL="https://data.dgl.ai/wheels/cu${PYTORCH_CUDA_VER}/repo.html"

# Starting from 2.2, PyTorch wheels depend on nvidia-nccl-cuxx>=2.19 wheel and
# dynamically link to NCCL. RAPIDS CUDA 11 CI images have an older NCCL version that
# might shadow the newer NCCL required by PyTorch during import (when importing
# `cupy` before `torch`).
if [[ "${NCCL_VERSION}" < "2.19" ]]; then
PYTORCH_VER="2.1.0"
else
PYTORCH_VER="2.3.0"
fi

rapids-logger "Installing PyTorch and DGL"
rapids-retry python -m pip install torch --index-url ${PYTORCH_URL}
rapids-retry python -m pip install "torch==${PYTORCH_VER}" --index-url ${PYTORCH_URL}
rapids-retry python -m pip install dgl==2.0.0 --find-links ${DGL_URL}

python -m pytest python/cugraph-dgl/tests
8 changes: 5 additions & 3 deletions ci/test_wheel_cugraph-pyg.sh
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,9 @@ python -m pip install $(ls ./dist/${python_package_name}*.whl)[test]
# RAPIDS_DATASET_ROOT_DIR is used by test scripts
export RAPIDS_DATASET_ROOT_DIR="$(realpath datasets)"

# Used to skip certain examples in CI due to memory limitations
export CI_RUN=1

if [[ "${CUDA_VERSION}" == "11.8.0" ]]; then
PYTORCH_URL="https://download.pytorch.org/whl/cu118"
PYG_URL="https://data.pyg.org/whl/torch-2.1.0+cu118.html"
Expand All @@ -39,15 +42,14 @@ rapids-retry python -m pip install \
pyg_lib \
torch_scatter \
torch_sparse \
torch_cluster \
torch_spline_conv \
tensordict \
-f ${PYG_URL}

rapids-logger "pytest cugraph-pyg (single GPU)"
pushd python/cugraph-pyg/cugraph_pyg
python -m pytest \
--cache-clear \
--ignore=tests/mg \
--benchmark-disable \
tests
# Test examples
for e in "$(pwd)"/examples/*.py; do
Expand Down
1 change: 1 addition & 0 deletions conda/recipes/cugraph-pyg/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ requirements:
- cupy >=12.0.0
- cugraph ={{ version }}
- pylibcugraphops ={{ minor_version }}
- tensordict >=0.1.2
- pyg >=2.5,<2.6

tests:
Expand Down
7 changes: 5 additions & 2 deletions cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -92,14 +92,14 @@ set(CUGRAPH_CXX_FLAGS "")
set(CUGRAPH_CUDA_FLAGS "")

if(CMAKE_COMPILER_IS_GNUCXX)
list(APPEND CUGRAPH_CXX_FLAGS -Werror -Wno-error=deprecated-declarations)
list(APPEND CUGRAPH_CXX_FLAGS -Werror -Wno-error=deprecated-declarations -Wno-deprecated-declarations -DRAFT_HIDE_DEPRECATION_WARNINGS)
endif(CMAKE_COMPILER_IS_GNUCXX)


message("-- Building for GPU_ARCHS = ${CMAKE_CUDA_ARCHITECTURES}")

list(APPEND CUGRAPH_CUDA_FLAGS --expt-extended-lambda --expt-relaxed-constexpr)
list(APPEND CUGRAPH_CUDA_FLAGS -Werror=cross-execution-space-call -Wno-deprecated-declarations -Xptxas=--disable-warnings)
list(APPEND CUGRAPH_CUDA_FLAGS -Werror=cross-execution-space-call -Wno-deprecated-declarations -DRAFT_HIDE_DEPRECATION_WARNINGS -Xptxas=--disable-warnings)
list(APPEND CUGRAPH_CUDA_FLAGS -Xcompiler=-Wall,-Wno-error=sign-compare,-Wno-error=unused-but-set-variable)
list(APPEND CUGRAPH_CUDA_FLAGS -Xfatbin=-compress-all)

Expand Down Expand Up @@ -180,6 +180,7 @@ set(CUGRAPH_SOURCES
src/community/detail/refine_sg.cu
src/community/detail/refine_mg.cu
src/community/edge_triangle_count_sg.cu
src/community/edge_triangle_count_mg.cu
src/community/detail/maximal_independent_moves_sg.cu
src/community/detail/maximal_independent_moves_mg.cu
src/detail/utility_wrappers.cu
Expand Down Expand Up @@ -288,6 +289,8 @@ set(CUGRAPH_SOURCES
src/structure/symmetrize_edgelist_mg.cu
src/community/triangle_count_sg.cu
src/community/triangle_count_mg.cu
src/community/approx_weighted_matching_sg.cu
src/community/approx_weighted_matching_mg.cu
src/traversal/k_hop_nbrs_sg.cu
src/traversal/k_hop_nbrs_mg.cu
src/mtmg/vertex_result.cu
Expand Down
44 changes: 44 additions & 0 deletions cpp/include/cugraph/algorithms.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -2007,6 +2007,24 @@ void triangle_count(raft::handle_t const& handle,
raft::device_span<edge_t> counts,
bool do_expensive_check = false);

/*
* @brief Compute edge triangle counts.
*
* Compute edge triangle counts for the entire set of edges.
*
* @tparam vertex_t Type of vertex identifiers. Needs to be an integral type.
* @tparam edge_t Type of edge identifiers. Needs to be an integral type.
* @tparam multi_gpu Flag indicating whether template instantiation should target single-GPU (false)
* @param handle RAFT handle object to encapsulate resources (e.g. CUDA stream, communicator, and
* handles to various CUDA libraries) to run graph algorithms.
* @param graph_view Graph view object.
*
* @return edge_property_t containing the edge triangle count
*/
template <typename vertex_t, typename edge_t, bool multi_gpu>
edge_property_t<graph_view_t<vertex_t, edge_t, false, multi_gpu>, edge_t> edge_triangle_count(
raft::handle_t const& handle, graph_view_t<vertex_t, edge_t, false, multi_gpu> const& graph_view);

/*
* @brief Compute K-Truss.
*
Expand Down Expand Up @@ -2368,6 +2386,32 @@ rmm::device_uvector<vertex_t> vertex_coloring(
graph_view_t<vertex_t, edge_t, false, multi_gpu> const& graph_view,
raft::random::RngState& rng_state);

/*
* @brief Approximate Weighted Matching
*
* A matching in an undirected graph G = (V, E) is a pairing of adjacent vertices
* such that each vertex is matched with at most one other vertex, the objective
* being to match as many vertices as possible or to maximise the sum of the
* weights of the matched edges. Here we provide an implementation of an
* approximation algorithm to the weighted Maximum matching. See
* https://web.archive.org/web/20081031230449id_/http://www.ii.uib.no/~fredrikm/fredrik/papers/CP75.pdf
* for further information.
*
* @tparam vertex_t Type of vertex identifiers. Needs to be an integral type.
* @tparam edge_t Type of edge identifiers. Needs to be an integral type.
* @tparam multi_gpu Flag indicating whether template instantiation should target single-GPU (false)
* @param[in] handle RAFT handle object to encapsulate resources (e.g. CUDA stream, communicator,
* and handles to various CUDA libraries) to run graph algorithms.
* @param[in] graph_view Graph view object.
* @param[in] edge_weight_view View object holding edge weights for @p graph_view.
* @return A tuple of device vector of matched vertex ids and sum of the weights of the matched
* edges.
*/
template <typename vertex_t, typename edge_t, typename weight_t, bool multi_gpu>
std::tuple<rmm::device_uvector<vertex_t>, weight_t> approximate_weighted_matching(
raft::handle_t const& handle,
graph_view_t<vertex_t, edge_t, false, multi_gpu> const& graph_view,
edge_property_view_t<edge_t, weight_t const*> edge_weight_view);
} // namespace cugraph

/**
Expand Down
Loading

0 comments on commit 30954bb

Please sign in to comment.