Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Eurohack 23: playing with ci #1404

Merged
merged 16 commits into from
Sep 22, 2023
46 changes: 46 additions & 0 deletions ci/docker/Dockerfile.build
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
FROM docker.io/nvidia/cuda:11.8.0-devel-ubuntu22.04

ARG DEBIAN_FRONTEND=noninteractive

RUN apt-get update -qq && apt-get install -qq -y --no-install-recommends \
build-essential \
cmake \
wget \
ninja-build && \
rm -rf /var/lib/apt/lists/*

ARG MPICH_VERSION=3.3.2
ARG MPICH_PATH=/usr/local/mpich
RUN wget -q https://www.mpich.org/static/downloads/${MPICH_VERSION}/mpich-${MPICH_VERSION}.tar.gz && \
tar -xzf mpich-${MPICH_VERSION}.tar.gz && \
cd mpich-${MPICH_VERSION} && \
./configure \
--disable-fortran \
--prefix=$MPICH_PATH && \
make install -j$(nproc) && \
rm -rf /root/mpich-${MPICH_VERSION}.tar.gz /root/mpich-${MPICH_VERSION}

RUN echo "${MPICH_PATH}/lib" >> /etc/ld.so.conf.d/cscs.conf && ldconfig

COPY . /quda/src

RUN cmake -S /quda/src \
-DCMAKE_CUDA_COMPILER=nvcc \
-DCMAKE_CXX_COMPILER=/usr/local/mpich/bin/mpicxx \
-DCMAKE_C_COMPILER=/usr/local/mpich/bin/mpicc \
-DQUDA_GPU_ARCH=sm_60 \
-DQUDA_MULTIGRID=ON \
-DQUDA_MULTIGRID_NVEC_LIST=24 \
-DQUDA_MDW_FUSED_LS_LIST=4 \
-DQUDA_MPI=ON \
-DQUDA_DIRAC_DEFAULT_OFF=ON \
-DQUDA_DIRAC_WILSON=ON -DQUDA_DIRAC_CLOVER=ON -DQUDA_DIRAC_STAGGERED=ON \
-GNinja \
-B /quda/build

RUN cmake --build /quda/build

RUN cmake --install /quda/build

ENV QUDA_TEST_GRID_SIZE="1 1 1 2"

30 changes: 30 additions & 0 deletions ci/pipeline.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
include:
- remote: "https://gitlab.com/cscs-ci/recipes/-/raw/master/templates/v2/.ci-ext.yml"

stages:
- build
- test

variables:
PERSIST_IMAGE_NAME: $CSCS_REGISTRY_PATH/quda/public/build:$CI_COMMIT_SHORT_SHA

build_job:
stage: build
extends: .container-builder
variables:
DOCKERFILE: ci/docker/Dockerfile.build

test_job:
stage: test
extends: .container-runner-daint-gpu
image: $PERSIST_IMAGE_NAME
script:
- ctest --test-dir /quda/build/ --output-on-failure
variables:
CRAY_CUDA_MPS: 0
SLURM_JOB_NUM_NODES: 2
SLURM_PARTITION: normal
SLURM_TIMELIMIT: "0:30:00"
USE_MPI: "YES"
QUDA_ENABLE_TUNING: 0
QUDA_RESOURCE_PATH: .
26 changes: 13 additions & 13 deletions tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ endif()

add_subdirectory(utils)
add_subdirectory(host_reference)
enable_language(Fortran)
# enable_language(Fortran)

if(QUDA_NVSHMEM AND QUDA_DOWNLOAD_NVSHMEM)
add_dependencies(quda_test NVSHMEM)
Expand Down Expand Up @@ -247,18 +247,18 @@ target_link_libraries(heatbath_test ${TEST_LIBS})
quda_checkbuildtest(heatbath_test QUDA_BUILD_ALL_TESTS)
install(TARGETS heatbath_test ${QUDA_EXCLUDE_FROM_INSTALL} DESTINATION ${CMAKE_INSTALL_BINDIR})

if(QUDA_MPI OR QUDA_QMP)
if(DEFINED ENV{QUDA_TEST_NUMPROCS})
# user is setting number of processes to use through the QUDA_TEST_NUMPROCS env
set(QUDA_CTEST_LAUNCH ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG}
$ENV{QUDA_TEST_NUMPROCS} ${MPIEXEC_PREFLAGS})
else()
# use FindMPI variables for QUDA_CTEST_LAUNCH set MPIEXEC_MAX_NUMPROCS to the
# number of ranks you want to launch
set(QUDA_CTEST_LAUNCH ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG}
${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS})
endif()
endif()
#if(QUDA_MPI OR QUDA_QMP)
# if(DEFINED ENV{QUDA_TEST_NUMPROCS})
# # user is setting number of processes to use through the QUDA_TEST_NUMPROCS env
# set(QUDA_CTEST_LAUNCH ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG}
# $ENV{QUDA_TEST_NUMPROCS} ${MPIEXEC_PREFLAGS})
# else()
# # use FindMPI variables for QUDA_CTEST_LAUNCH set MPIEXEC_MAX_NUMPROCS to the
# # number of ranks you want to launch
# set(QUDA_CTEST_LAUNCH ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG}
# ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS})
# endif()
#endif()

# BLAS tests
if(QUDA_DIRAC_WILSON
Expand Down