Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add libcuml-tests package #4635

Merged
merged 1 commit into from
Apr 4, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,11 @@ fi
# If `./build.sh cuml` is called, don't build C/C++ components
if completeBuild || hasArg libcuml || hasArg prims || hasArg bench || hasArg cpp-mgtests; then
cd ${LIBCUML_BUILD_DIR}
cmake --build ${LIBCUML_BUILD_DIR} -j${PARALLEL_LEVEL} ${build_args} --target ${INSTALL_TARGET} ${VERBOSE_FLAG}
if [ -n "${INSTALL_TARGET}" ]; then
cmake --build ${LIBCUML_BUILD_DIR} -j${PARALLEL_LEVEL} ${build_args} --target ${INSTALL_TARGET} ${VERBOSE_FLAG}
else
cmake --build ${LIBCUML_BUILD_DIR} -j${PARALLEL_LEVEL} ${build_args} ${VERBOSE_FLAG}
fi
fi

if hasArg cppdocs; then
Expand Down
4 changes: 0 additions & 4 deletions ci/cpu/build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,6 @@ fi
# ucx-py version
export UCX_PY_VERSION='0.26.*'

export CMAKE_CUDA_COMPILER_LAUNCHER="sccache"
export CMAKE_CXX_COMPILER_LAUNCHER="sccache"
export CMAKE_C_COMPILER_LAUNCHER="sccache"

################################################################################
# SETUP - Check environment
################################################################################
Expand Down
21 changes: 5 additions & 16 deletions ci/cpu/upload.sh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#!/bin/bash
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
# Copyright (c) 2018-2022, NVIDIA CORPORATION.

set -e

Expand All @@ -22,32 +22,21 @@ if [[ -z "$MY_UPLOAD_KEY" ]]; then
return 0
fi

################################################################################
# SETUP - Get conda file output locations
################################################################################

gpuci_logger "Get conda file output locations"

export LIBCUML_FILE=`conda build --no-build-id --croot ${CONDA_BLD_DIR} conda/recipes/libcuml --output`
export CUML_FILE=`conda build --croot ${CONDA_BLD_DIR} conda/recipes/cuml --python=$PYTHON --output`

################################################################################
# UPLOAD - Conda packages
################################################################################

gpuci_logger "Starting conda uploads"

if [[ "$BUILD_LIBCUML" == "1" && "$UPLOAD_LIBCUML" == "1" ]]; then
test -e ${LIBCUML_FILE}
LIBCUML_FILES=$(conda build --no-build-id --croot ${CONDA_BLD_DIR} conda/recipes/libcuml --output)
echo "Upload libcuml"
echo ${LIBCUML_FILE}
gpuci_retry anaconda -t ${MY_UPLOAD_KEY} upload -u ${CONDA_USERNAME:-rapidsai} ${LABEL_OPTION} --skip-existing ${LIBCUML_FILE} --no-progress
gpuci_retry anaconda -t ${MY_UPLOAD_KEY} upload -u ${CONDA_USERNAME:-rapidsai} ${LABEL_OPTION} --skip-existing --no-progress ${LIBCUML_FILES}
fi

if [[ "$BUILD_CUML" == "1" && "$UPLOAD_CUML" == "1" ]]; then
CUML_FILE=$(conda build --croot ${CONDA_BLD_DIR} conda/recipes/cuml --python=$PYTHON --output)
test -e ${CUML_FILE}
echo "Upload cuml"
echo ${CUML_FILE}
echo "Upload cuml: ${CUML_FILE}"
gpuci_retry anaconda -t ${MY_UPLOAD_KEY} upload -u ${CONDA_USERNAME:-rapidsai} ${LABEL_OPTION} --skip-existing ${CUML_FILE} --no-progress
fi

41 changes: 4 additions & 37 deletions ci/gpu/build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,6 @@ export MINOR_VERSION=`echo $GIT_DESCRIBE_TAG | grep -o -E '([0-9]+\.[0-9]+)'`
# ucx-py version
export UCX_PY_VERSION='0.26.*'

export CMAKE_CUDA_COMPILER_LAUNCHER="sccache"
export CMAKE_CXX_COMPILER_LAUNCHER="sccache"
export CMAKE_C_COMPILER_LAUNCHER="sccache"

################################################################################
# SETUP - Check environment
################################################################################
Expand Down Expand Up @@ -90,11 +86,6 @@ conda info
conda config --show-sources
conda list --show-channel-urls

gpuci_logger "Adding ${CONDA_PREFIX}/lib to LD_LIBRARY_PATH"

export LD_LIBRARY_PATH_CACHED=$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=$CONDA_PREFIX/lib:$LD_LIBRARY_PATH

if [[ -z "$PROJECT_FLASH" || "$PROJECT_FLASH" == "0" ]]; then
gpuci_logger "Building doxygen C++ docs"
$WORKSPACE/build.sh cppdocs -v
Expand All @@ -106,8 +97,6 @@ if [[ -z "$PROJECT_FLASH" || "$PROJECT_FLASH" == "0" ]]; then
gpuci_logger "Build from source"
$WORKSPACE/build.sh clean libcuml cuml prims bench -v --codecov

gpuci_logger "Resetting LD_LIBRARY_PATH"

cd $WORKSPACE

################################################################################
Expand Down Expand Up @@ -172,10 +161,6 @@ if [[ -z "$PROJECT_FLASH" || "$PROJECT_FLASH" == "0" ]]; then
python ../scripts/cuda-memcheck.py -tool memcheck -exe ./test/prims
fi
else
#Project Flash
export LIBCUML_BUILD_DIR="$WORKSPACE/ci/artifacts/cuml/cpu/conda_work/cpp/build"
export LD_LIBRARY_PATH="$LIBCUML_BUILD_DIR:$LD_LIBRARY_PATH"

if hasArg --skip-tests; then
gpuci_logger "Skipping Tests"
exit 0
Expand All @@ -184,31 +169,17 @@ else
gpuci_logger "Check GPU usage"
nvidia-smi

gpuci_logger "Update binaries"
cd $LIBCUML_BUILD_DIR
chrpath -d libcuml.so
chrpath -d libcuml++.so
patchelf --replace-needed `patchelf --print-needed libcuml++.so | grep faiss` libfaiss.so libcuml++.so
gpuci_mamba_retry install -y -c ${CONDA_ARTIFACT_PATH} libcuml libcuml-tests

gpuci_logger "Running libcuml binaries"
gpuci_logger "Running libcuml test binaries"
GTEST_ARGS="xml:${WORKSPACE}/test-results/libcuml_cpp/"
for gt in $(find ./test -name "*_TEST" | grep -v "PRIMS_" || true); do
for gt in "$CONDA_PREFIX/bin/gtests/libcuml/"*; do
test_name=$(basename $gt)
echo "Patching gtest $test_name"
chrpath -d ${gt}
patchelf --replace-needed `patchelf --print-needed ${gt} | grep faiss` libfaiss.so ${gt}
echo "Running gtest $test_name"
${gt} ${GTEST_ARGS}
echo "Ran gtest $test_name : return code was: $?, test script exit code is now: $EXITCODE"
done


CONDA_FILE=`find ${CONDA_ARTIFACT_PATH} -name "libcuml*.tar.bz2"`
CONDA_FILE=`basename "$CONDA_FILE" .tar.bz2` #get filename without extension
CONDA_FILE=${CONDA_FILE//-/=} #convert to conda install
gpuci_logger "Installing $CONDA_FILE"
gpuci_mamba_retry install -c ${CONDA_ARTIFACT_PATH} "$CONDA_FILE"

# FIXME: Project FLASH only builds for python version 3.8 which is the one used in
# the CUDA 11.0 job, need to change all versions to project flash
if [ "$CUDA_REL" == "11.0" ];then
Expand Down Expand Up @@ -260,13 +231,9 @@ else
################################################################################

gpuci_logger "Run ml-prims test"
cd $LIBCUML_BUILD_DIR
GTEST_ARGS="xml:${WORKSPACE}/test-results/prims/"
for gt in $(find ./test -name "*_TEST" | grep -v "SG_\|MG_" || true); do
for gt in "$CONDA_PREFIX/bin/gtests/libcuml_prims/"*; do
test_name=$(basename $gt)
echo "Patching gtest $test_name"
chrpath -d ${gt}
patchelf --replace-needed `patchelf --print-needed ${gt} | grep faiss` libfaiss.so ${gt}
echo "Running gtest $test_name"
${gt} ${GTEST_ARGS}
echo "Ran gtest $test_name : return code was: $?, test script exit code is now: $EXITCODE"
Expand Down
8 changes: 3 additions & 5 deletions conda/recipes/libcuml/build.sh
Original file line number Diff line number Diff line change
@@ -1,12 +1,10 @@
#!/bin/bash
# Copyright (c) 2018-2022, NVIDIA CORPORATION.

if [ -n "$MACOSX_DEPLOYMENT_TARGET" ]; then
# C++11 requires 10.9
# but cudatoolkit 8 is build for 10.11
export MACOSX_DEPLOYMENT_TARGET=10.11
fi

if [[ -z "$PROJECT_FLASH" || "$PROJECT_FLASH" == "0" ]]; then
./build.sh clean libcuml -v --allgpuarch
else
./build.sh clean libcuml prims -v --allgpuarch
fi
./build.sh -n clean libcuml prims -v --allgpuarch
17 changes: 17 additions & 0 deletions conda/recipes/libcuml/conda_build_config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
cmake_version:
- ">=3.20.1,<3.23"

nccl_version:
- ">=2.9.9"

treelite_version:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Quick question for the future, the versions of this file would be also potentially used by the cuml conda recipe, no? Particularly treelite for example, is there a way to put this file in conda/recipes so that it could be shared?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, absolutely. We can use the --variant-config-files/-m flag in the conda build command to point to a particular file that has all of the version specifications. This is what's done in the integration repo's versions.yaml file. Here's the official conda docs on that file for reference:

We can implement this as a future enhancement since we have some additional CI changes coming soon.

- "2.3.0"

gtest_version:
- "1.10.0"

libfaiss_version:
- "1.7.0"

libcusolver_version:
- ">=11.2.1"
4 changes: 4 additions & 0 deletions conda/recipes/libcuml/install_libcuml.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#!/bin/bash
# Copyright (c) 2018-2022, NVIDIA CORPORATION.

cmake --install cpp/build
4 changes: 4 additions & 0 deletions conda/recipes/libcuml/install_libcuml_tests.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#!/bin/bash
# Copyright (c) 2018-2022, NVIDIA CORPORATION.

cmake --install cpp/build --component testing
88 changes: 57 additions & 31 deletions conda/recipes/libcuml/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,20 +3,19 @@
# Usage:
# conda build . -c conda-forge -c nvidia -c rapidsai -c pytorch
{% set version = environ.get('GIT_DESCRIBE_TAG', '0.0.0.dev').lstrip('v') + environ.get('VERSION_SUFFIX', '') %}
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
{% set cuda_version='.'.join(environ.get('CUDA', '9.2').split('.')[:2]) %}
{% set cuda_major=cuda_version.split('.')[0] %}
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
{% set cuda_version = '.'.join(environ.get('CUDA', '9.2').split('.')[:2]) %}
{% set cuda_major = cuda_version.split('.')[0] %}
{% set cuda_spec = ">=" + cuda_major ~ ",<" + (cuda_major | int + 1) ~ ".0a0" %} # i.e. >=11,<12.0a0
{% set ucx_py_version=environ.get('UCX_PY_VERSION') %}

package:
name: libcuml
version: {{ version }}
name: libcuml-split

source:
git_url: ../../..

build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
script_env:
- CC
- CXX
Expand All @@ -36,9 +35,9 @@ build:

requirements:
build:
- cmake>=3.20.1,<3.23
- cmake {{ cmake_version }}
host:
- nccl>=2.9.9
- nccl {{ nccl_version }}
- cudf {{ minor_version }}
- cudatoolkit {{ cuda_version }}.*
- ucx-py {{ ucx_py_version }}
Expand All @@ -48,28 +47,55 @@ requirements:
- libraft-distance {{ minor_version }}
- libraft-nn {{ minor_version }}
- lapack
- treelite=2.3.0
- treelite {{ treelite_version }}
- faiss-proc=*=cuda
- gtest=1.10.0
- gtest {{ gtest_version }}
- gmock
- libfaiss 1.7.0 *_cuda
run:
- libcumlprims {{ minor_version }}
- libraft-headers {{ minor_version }}
- libraft-distance {{ minor_version }}
- libraft-nn {{ minor_version }}
- cudf {{ minor_version }}
- nccl>=2.9.9
- ucx-py {{ ucx_py_version }}
- ucx-proc=*=gpu
- {{ pin_compatible('cudatoolkit', max_pin='x', min_pin='x') }}
- treelite=2.3.0
- faiss-proc=*=cuda
- libfaiss 1.7.0 *_cuda
- libcusolver>=11.2.1
- libfaiss {{ libfaiss_version }} *_cuda

about:
home: http://rapids.ai/
license: Apache-2.0
# license_file: LICENSE
summary: libcuml library
outputs:
- name: libcuml
version: {{ version }}
script: install_libcuml.sh
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
requirements:
build:
- cmake {{ cmake_version }}
run:
- cudatoolkit {{ cuda_spec }}
- libcumlprims {{ minor_version }}
- libraft-headers {{ minor_version }}
- libraft-distance {{ minor_version }}
- libraft-nn {{ minor_version }}
- cudf {{ minor_version }}
- nccl {{ nccl_version }}
- ucx-py {{ ucx_py_version }}
- ucx-proc=*=gpu
- treelite {{ treelite_version }}
- faiss-proc=*=cuda
- libfaiss {{ libfaiss_version }} *_cuda
- libcusolver {{ libcusolver_version }}
about:
home: http://rapids.ai/
license: Apache-2.0
summary: libcuml library
- name: libcuml-tests
version: {{ version }}
script: install_libcuml_tests.sh
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
requirements:
build:
- cmake {{ cmake_version }}
run:
- cudatoolkit {{ cuda_spec }}
- {{ pin_subpackage('libcuml', exact=True) }}
jjacobelli marked this conversation as resolved.
Show resolved Hide resolved
- gtest {{ gtest_version }}
- gmock {{ gtest_version }}
about:
home: http://rapids.ai/
license: Apache-2.0
summary: libcuml test & benchmark executables