Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Unify dense and sparse tests #3

Open
wants to merge 27 commits into
base: unify-dense-sparse-import
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
6e12723
unified dense adn sparse tests; test cases are almost entirely disjoint
levsnv Dec 2, 2021
32a5103
stray comment
levsnv Dec 2, 2021
70aface
Update to UCX-Py 0.24 (#4396)
pentschev Dec 4, 2021
c10ebf1
Add a warning to prefer LinearSVM over SVM(kernel='linear') (#4382)
achirkin Dec 4, 2021
c364d05
Hiding cusparse deprecation warnings (#4373)
cjnolet Dec 4, 2021
5820133
fix minor ASAN issues in UMAPAlgo::Optimize::find_params_ab() (#4405)
yitao-li Dec 6, 2021
7613d70
Simplify perplexity normalization in t-SNE (#4425)
zbjornson Dec 6, 2021
4215577
RF: code re-organization to enhance build parallelism (#4299)
venkywonka Dec 6, 2021
0a6916d
Remove comment numerical warning (#4408)
viclafargue Dec 6, 2021
3c36f62
Fix docstring for npermutations in PermutationExplainer (#4402)
hcho3 Dec 6, 2021
f678c2c
Using sparse public API functions from RAFT (#4389)
cjnolet Dec 7, 2021
4ce5bd6
Remove direct Scikit-learn imports (#4431)
dantegd Dec 8, 2021
0114c67
Merge pull request #4434 from rapidsai/branch-21.12
GPUtester Dec 8, 2021
9046f85
Integrating RAFT handle updates (#4313)
divyegala Dec 14, 2021
307196d
Update ucx-py version on release using rvc (#4411)
jjacobelli Dec 15, 2021
f94f272
Update CUDA 11.5 conda environment to use 22.02 pinnings. (#4450)
bdice Dec 16, 2021
fed3774
Use RAFT template instantations for distances (#4302)
cjnolet Dec 16, 2021
e4913ad
Merge branch 'branch-22.02' of github.com:rapidsai/cuml into unify-tests
levsnv Dec 16, 2021
b9b97fc
Add missing imports tests (#4452)
jjacobelli Dec 16, 2021
7ac36f2
Merge branch 'unify-dense-sparse-import' into unify-tests
levsnv Dec 17, 2021
d9d888f
moved node_traits from common.cuh to internal.cuh to use in fil_tests.cu
levsnv Dec 17, 2021
f36304d
types
levsnv Dec 17, 2021
8de22be
ref -> val
levsnv Dec 17, 2021
d630156
Move NVTX range helpers to raft (#4445)
achirkin Dec 17, 2021
3ae44b9
Unify dense and sparse import in FIL (#4328)
levsnv Dec 17, 2021
d200341
Merge branch 'branch-22.02' into unify-tests
levsnv Dec 17, 2021
5679fe1
fix conflict resolution
levsnv Dec 17, 2021
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion ci/checks/style.sh
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@ cd "$WORKSPACE"

export GIT_DESCRIBE_TAG=`git describe --tags`
export MINOR_VERSION=`echo $GIT_DESCRIBE_TAG | grep -o -E '([0-9]+\.[0-9]+)'`
conda install "ucx-py=0.23.*" "ucx-proc=*=gpu"
export UCX_PY_VERSION='0.24.*'
conda install "ucx-py=${UCX_PY_VERSION}" "ucx-proc=*=gpu"

# Run flake8 and get results/return code
FLAKE=`flake8 --config=python/setup.cfg`
Expand Down
3 changes: 3 additions & 0 deletions ci/cpu/build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,9 @@ if [ "${IS_STABLE_BUILD}" != "true" ] ; then
export VERSION_SUFFIX=`date +%y%m%d`
fi

# ucx-py version
export UCX_PY_VERSION='0.24.*'

################################################################################
# SETUP - Check environment
################################################################################
Expand Down
10 changes: 9 additions & 1 deletion ci/gpu/build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,9 @@ cd $WORKSPACE
export GIT_DESCRIBE_TAG=`git describe --tags`
export MINOR_VERSION=`echo $GIT_DESCRIBE_TAG | grep -o -E '([0-9]+\.[0-9]+)'`

# ucx-py version
export UCX_PY_VERSION='0.24.*'

################################################################################
# SETUP - Check environment
################################################################################
Expand All @@ -53,7 +56,7 @@ gpuci_mamba_retry install -c conda-forge -c rapidsai -c rapidsai-nightly -c nvid
"libcumlprims=${MINOR_VERSION}" \
"dask-cudf=${MINOR_VERSION}" \
"dask-cuda=${MINOR_VERSION}" \
"ucx-py=0.23.*" \
"ucx-py=${UCX_PY_VERSION}" \
"ucx-proc=*=gpu" \
"xgboost=1.5.0dev.rapidsai${MINOR_VERSION}" \
"rapids-build-env=${MINOR_VERSION}.*" \
Expand Down Expand Up @@ -184,6 +187,11 @@ else
cd $LIBCUML_BUILD_DIR
chrpath -d ./test/ml
patchelf --replace-needed `patchelf --print-needed ./test/ml | grep faiss` libfaiss.so ./test/ml
cp _deps/raft-build/libraft_nn.so $PWD
patchelf --replace-needed `patchelf --print-needed libraft_nn.so | grep faiss` libfaiss.so libraft_nn.so
cp _deps/raft-build/libraft_distance.so $PWD

gpuci_logger "Running libcuml binaries"
GTEST_OUTPUT="xml:${WORKSPACE}/test-results/libcuml_cpp/" ./test/ml

CONDA_FILE=`find ${CONDA_ARTIFACT_PATH} -name "libcuml*.tar.bz2"`
Expand Down
7 changes: 7 additions & 0 deletions ci/release/update-version.sh
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ CURRENT_SHORT_TAG=${CURRENT_MAJOR}.${CURRENT_MINOR}
NEXT_MAJOR=$(echo $NEXT_FULL_TAG | awk '{split($0, a, "."); print a[1]}')
NEXT_MINOR=$(echo $NEXT_FULL_TAG | awk '{split($0, a, "."); print a[2]}')
NEXT_SHORT_TAG=${NEXT_MAJOR}.${NEXT_MINOR}
NEXT_UCX_PY_VERSION="$(curl -sL https://version.gpuci.io/rapids/${NEXT_SHORT_TAG}).*"

echo "Preparing release $CURRENT_TAG => $NEXT_FULL_TAG"

Expand All @@ -46,4 +47,10 @@ for FILE in conda/environments/*.yml; do
sed_runner "s/rapids-build-env=${CURRENT_SHORT_TAG}/rapids-build-env=${NEXT_SHORT_TAG}/g" ${FILE};
sed_runner "s/rapids-notebook-env=${CURRENT_SHORT_TAG}/rapids-notebook-env=${NEXT_SHORT_TAG}/g" ${FILE};
sed_runner "s/rapids-doc-env=${CURRENT_SHORT_TAG}/rapids-doc-env=${NEXT_SHORT_TAG}/g" ${FILE};
sed_runner "s/ucx-py=.*/ucx-py=${NEXT_UCX_PY_VERSION}/g" ${FILE};
done

# Update ucx-py version
sed_runner "s/export UCX_PY_VERSION=.*/export UCX_PY_VERSION='${NEXT_UCX_PY_VERSION}'/g" ci/checks/style.sh
sed_runner "s/export UCX_PY_VERSION=.*/export UCX_PY_VERSION='${NEXT_UCX_PY_VERSION}'/g" ci/cpu/build.sh
sed_runner "s/export UCX_PY_VERSION=.*/export UCX_PY_VERSION='${NEXT_UCX_PY_VERSION}'/g" ci/gpu/build.sh
2 changes: 1 addition & 1 deletion conda/environments/cuml_dev_cuda11.0.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ dependencies:
- libcumlprims=22.02.*
- dask-cudf=22.02.*
- dask-cuda=22.02.*
- ucx-py=0.23
- ucx-py=0.24
- ucx-proc=*=gpu
- dask-ml
- doxygen>=1.8.20
Expand Down
2 changes: 1 addition & 1 deletion conda/environments/cuml_dev_cuda11.2.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ dependencies:
- libcumlprims=22.02.*
- dask-cudf=22.02.*
- dask-cuda=22.02.*
- ucx-py=0.23
- ucx-py=0.24
- ucx-proc=*=gpu
- dask-ml
- doxygen>=1.8.20
Expand Down
2 changes: 1 addition & 1 deletion conda/environments/cuml_dev_cuda11.4.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ dependencies:
- libcumlprims=22.02.*
- dask-cudf=22.02.*
- dask-cuda=22.02.*
- ucx-py=0.23
- ucx-py=0.24
- ucx-proc=*=gpu
- dask-ml
- doxygen>=1.8.20
Expand Down
18 changes: 9 additions & 9 deletions conda/environments/cuml_dev_cuda11.5.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,15 @@ channels:
- conda-forge
dependencies:
- cudatoolkit=11.5
- rapids-build-env=21.12.*
- rapids-notebook-env=21.12.*
- rapids-doc-env=21.12.*
- cudf=21.12.*
- rmm=21.12.*
- libcumlprims=21.12.*
- dask-cudf=21.12.*
- dask-cuda=21.12.*
- ucx-py=0.23
- rapids-build-env=22.02.*
- rapids-notebook-env=22.02.*
- rapids-doc-env=22.02.*
- cudf=22.02.*
- rmm=22.02.*
- libcumlprims=22.02.*
- dask-cudf=22.02.*
- dask-cuda=22.02.*
- ucx-py=0.24
- ucx-proc=*=gpu
- dask-ml
- doxygen>=1.8.20
Expand Down
11 changes: 9 additions & 2 deletions conda/recipes/cuml/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
{% set cuda_version='.'.join(environ.get('CUDA', 'unknown').split('.')[:2]) %}
{% set cuda_major=cuda_version.split('.')[0] %}
{% set py_version=environ.get('CONDA_PY', 36) %}
{% set ucx_py_version=environ.get('UCX_PY_VERSION') %}

package:
name: cuml
Expand Down Expand Up @@ -34,7 +35,7 @@ requirements:
- libcuml={{ version }}
- libcumlprims {{ minor_version }}
- cudatoolkit {{ cuda_version }}.*
- ucx-py 0.23
- ucx-py {{ ucx_py_version }}
- ucx-proc=*=gpu
run:
- python x.x
Expand All @@ -45,13 +46,19 @@ requirements:
- cupy>=7.8.0,<10.0.0a0
- treelite=2.1.0
- nccl>=2.9.9
- ucx-py 0.23
- ucx-py {{ ucx_py_version }}
- ucx-proc=*=gpu
- dask>=2021.11.1,<=2021.11.2
- distributed>=2021.11.1,<=2021.11.2
- joblib >=0.11
- {{ pin_compatible('cudatoolkit', max_pin='x', min_pin='x') }}

tests: # [linux64]
requirements: # [linux64]
- cudatoolkit {{ cuda_version }}.* # [linux64]
imports: # [linux64]
- cuml # [linux64]

about:
home: http://rapids.ai/
license: Apache-2.0
Expand Down
5 changes: 3 additions & 2 deletions conda/recipes/libcuml/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
{% set cuda_version='.'.join(environ.get('CUDA', '9.2').split('.')[:2]) %}
{% set cuda_major=cuda_version.split('.')[0] %}
{% set ucx_py_version=environ.get('UCX_PY_VERSION') %}
package:
name: libcuml
version: {{ version }}
Expand Down Expand Up @@ -38,7 +39,7 @@ requirements:
- nccl>=2.9.9
- cudf {{ minor_version }}
- cudatoolkit {{ cuda_version }}.*
- ucx-py 0.23
- ucx-py {{ ucx_py_version }}
- ucx-proc=*=gpu
- libcumlprims {{ minor_version }}
- lapack
Expand All @@ -51,7 +52,7 @@ requirements:
- libcumlprims {{ minor_version }}
- cudf {{ minor_version }}
- nccl>=2.9.9
- ucx-py 0.23
- ucx-py {{ ucx_py_version }}
- ucx-proc=*=gpu
- {{ pin_compatible('cudatoolkit', max_pin='x', min_pin='x') }}
- treelite=2.1.0
Expand Down
21 changes: 16 additions & 5 deletions cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -241,9 +241,7 @@ if(BUILD_CUML_CPP_LIBRARY)

# common components
add_library(${CUML_CPP_TARGET} SHARED
src/common/logger.cpp
src/common/nvtx.cu)

src/common/logger.cpp)

# FIL components
target_sources(${CUML_CPP_TARGET}
Expand All @@ -262,6 +260,19 @@ if(BUILD_CUML_CPP_LIBRARY)
src/datasets/make_blobs.cu
src/datasets/make_regression.cu
src/dbscan/dbscan.cu
src/decisiontree/batched-levelalgo/kernels/entropy-double.cu
src/decisiontree/batched-levelalgo/kernels/entropy-float.cu
src/decisiontree/batched-levelalgo/kernels/gamma-double.cu
src/decisiontree/batched-levelalgo/kernels/gamma-float.cu
src/decisiontree/batched-levelalgo/kernels/gini-double.cu
src/decisiontree/batched-levelalgo/kernels/gini-float.cu
src/decisiontree/batched-levelalgo/kernels/inverse_gaussian-double.cu
src/decisiontree/batched-levelalgo/kernels/inverse_gaussian-float.cu
src/decisiontree/batched-levelalgo/kernels/mse-double.cu
src/decisiontree/batched-levelalgo/kernels/mse-float.cu
src/decisiontree/batched-levelalgo/kernels/poisson-double.cu
src/decisiontree/batched-levelalgo/kernels/poisson-float.cu
src/decisiontree/batched-levelalgo/kernels/quantiles.cu
src/decisiontree/decisiontree.cu
src/explainer/kernel_shap.cu
src/explainer/permutation_shap.cu
Expand Down Expand Up @@ -355,7 +366,6 @@ if(BUILD_CUML_CPP_LIBRARY)

target_compile_definitions(${CUML_CPP_TARGET}
PUBLIC
$<$<BOOL:${NVTX}>:NVTX_ENABLED>
DISABLE_CUSPARSE_DEPRECATED
PRIVATE
CUML_CPP_API
Expand Down Expand Up @@ -385,6 +395,8 @@ if(BUILD_CUML_CPP_LIBRARY)
rmm::rmm
cuml::Thrust
raft::raft
raft::raft_nn
raft::raft_distance
PRIVATE
CUDA::cublas
CUDA::cufft
Expand All @@ -393,7 +405,6 @@ if(BUILD_CUML_CPP_LIBRARY)
CUDA::cudart
CUDA::cusparse
GPUTreeShap::GPUTreeShap
$<$<BOOL:${NVTX}>:CUDA::nvToolsExt>
$<$<BOOL:${LINK_FAISS}>:FAISS::FAISS>
$<IF:$<BOOL:${Treelite_ADDED}>,treelite::treelite_static,treelite::treelite>
$<IF:$<BOOL:${Treelite_ADDED}>,treelite::treelite_runtime_static,treelite::treelite_runtime>
Expand Down
4 changes: 4 additions & 0 deletions cpp/bench/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ if(BUILD_CUML_BENCH)
cuml::${CUML_CPP_TARGET}
benchmark::benchmark
raft::raft
raft::raft_nn
raft::raft_distance
$<IF:$<BOOL:${Treelite_ADDED}>,treelite::treelite_static,treelite::treelite>
$<IF:$<BOOL:${Treelite_ADDED}>,treelite::treelite_runtime_static,treelite::treelite_runtime>
)
Expand Down Expand Up @@ -87,6 +89,8 @@ if(BUILD_CUML_PRIMS_BENCH)
CUDA::cublas
benchmark::benchmark
raft::raft
raft::raft_nn
raft::raft_distance
$<IF:$<BOOL:${Treelite_ADDED}>,treelite::treelite_static,treelite::treelite>
$<IF:$<BOOL:${Treelite_ADDED}>,treelite::treelite_runtime_static,treelite::treelite_runtime>
)
Expand Down
1 change: 1 addition & 0 deletions cpp/bench/prims/distance_common.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include <raft/cudart_utils.h>
#include <common/ml_benchmark.hpp>
#include <raft/distance/distance.hpp>
#include <raft/distance/specializations.hpp>

namespace MLCommon {
namespace Bench {
Expand Down
3 changes: 1 addition & 2 deletions cpp/bench/prims/fused_l2_nn.cu
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,7 @@ struct FusedL2NN : public Fixture {
alloc(out, params.m);
alloc(workspace, params.m);
raft::random::Rng r(123456ULL);
raft::handle_t handle;
handle.set_stream(stream);
raft::handle_t handle{stream};

r.uniform(x, params.m * params.k, T(-1.0), T(1.0), stream);
r.uniform(y, params.n * params.k, T(-1.0), T(1.0), stream);
Expand Down
4 changes: 2 additions & 2 deletions cpp/bench/sg/benchmark.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,9 @@ class Fixture : public MLCommon::Bench::Fixture {

void SetUp(const ::benchmark::State& state) override
{
handle.reset(new raft::handle_t(NumStreams));
auto stream_pool = std::make_shared<rmm::cuda_stream_pool>(NumStreams);
handle.reset(new raft::handle_t{stream, stream_pool});
MLCommon::Bench::Fixture::SetUp(state);
handle->set_stream(stream);
}

void TearDown(const ::benchmark::State& state) override
Expand Down
1 change: 1 addition & 0 deletions cpp/cmake/thirdparty/get_raft.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ function(find_and_configure_raft)
SOURCE_SUBDIR cpp
OPTIONS
"BUILD_TESTS OFF"
"NVTX ${NVTX}"
)

if(raft_ADDED)
Expand Down
4 changes: 1 addition & 3 deletions cpp/examples/dbscan/dbscan_example.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -136,8 +136,6 @@ int main(int argc, char* argv[])
}
}

raft::handle_t handle;

std::vector<float> h_inputData;

if (input == "") {
Expand Down Expand Up @@ -177,7 +175,7 @@ int main(int argc, char* argv[])

cudaStream_t stream;
CUDA_RT_CALL(cudaStreamCreate(&stream));
handle.set_stream(stream);
raft::handle_t handle{stream};

std::vector<int> h_labels(nRows);
int* d_labels = nullptr;
Expand Down
4 changes: 1 addition & 3 deletions cpp/examples/kmeans/kmeans_example.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -127,11 +127,9 @@ int main(int argc, char* argv[])
std::cout << "Run KMeans with k=" << params.n_clusters << ", max_iterations=" << params.max_iter
<< std::endl;

raft::handle_t handle;

cudaStream_t stream;
CUDA_RT_CALL(cudaStreamCreate(&stream));
handle.set_stream(stream);
raft::handle_t handle{stream};

// srcdata size n_samples * n_features
double* d_srcdata = nullptr;
Expand Down
5 changes: 1 addition & 4 deletions cpp/examples/symreg/symreg_example.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -198,12 +198,10 @@ int main(int argc, char* argv[])

/* ======================= Begin GPU memory allocation ======================= */
std::cout << "***************************************" << std::endl;
raft::handle_t handle;
std::shared_ptr<raft::mr::device::allocator> allocator(new raft::mr::device::default_allocator());

cudaStream_t stream;
CUDA_RT_CALL(cudaStreamCreate(&stream));
handle.set_stream(stream);
raft::handle_t handle{stream};

// Begin recording time
cudaEventRecord(start, stream);
Expand Down Expand Up @@ -342,6 +340,5 @@ int main(int argc, char* argv[])
raft::deallocate(d_finalprogs, stream);
CUDA_RT_CALL(cudaEventDestroy(start));
CUDA_RT_CALL(cudaEventDestroy(stop));
CUDA_RT_CALL(cudaStreamDestroy(stream));
return 0;
}
11 changes: 2 additions & 9 deletions cpp/include/cuml/cuml_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,9 +53,10 @@ const char* cumlGetErrorString(cumlError_t error);
* @brief Creates a cumlHandle_t
*
* @param[inout] handle pointer to the handle to create.
* @param[in] stream the stream to which cuML work should be ordered.
* @return CUML_SUCCESS on success, @todo: add more error codes
*/
cumlError_t cumlCreate(cumlHandle_t* handle);
cumlError_t cumlCreate(cumlHandle_t* handle, cudaStream_t stream);

/**
* @brief sets the stream to which all cuML work issued via the passed handle should be ordered.
Expand All @@ -64,14 +65,6 @@ cumlError_t cumlCreate(cumlHandle_t* handle);
* @param[in] stream the stream to which cuML work should be ordered.
* @return CUML_SUCCESS on success, @todo: add more error codes
*/
cumlError_t cumlSetStream(cumlHandle_t handle, cudaStream_t stream);
/**
* @brief gets the stream to which all cuML work issued via the passed handle should be ordered.
*
* @param[inout] handle handle to get the stream of.
* @param[out] stream pointer to the stream to which cuML work should be ordered.
* @return CUML_SUCCESS on success, @todo: add more error codes
*/
cumlError_t cumlGetStream(cumlHandle_t handle, cudaStream_t* stream);

/**
Expand Down
6 changes: 2 additions & 4 deletions cpp/include/cuml/manifold/umap.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,16 +16,14 @@

#pragma once

#include <raft/sparse/coo.hpp>

#include <cstddef>
#include <cstdint>
#include <memory>

namespace raft {
class handle_t;
namespace sparse {
template <typename T, typename Index_Type>
class COO;
};
} // namespace raft

namespace ML {
Expand Down
Loading