Skip to content

Add exactn cpp binding #1377

Add exactn cpp binding

Add exactn cpp binding #1377

name: Testing::x86_64::GPU::Python
on:
workflow_call:
inputs:
lightning-version:
type: string
required: true
description: The version of Lightning to use. Valid values are either 'release' (most recent release candidate), 'stable' (most recent git-tag) or 'latest' (most recent commit from master)
pennylane-version:
type: string
required: true
description: The version of PennyLane to use. Valid values are either 'release' (most recent release candidate), 'stable' (most recent git-tag) or 'latest' (most recent commit from master)
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
push:
branches:
- master
env:
CI_CUDA_ARCH: 86
COVERAGE_FLAGS: "--cov=pennylane_lightning --cov-report=term-missing --cov-report=xml:./coverage.xml --no-flaky-report -p no:warnings --tb=native"
GCC_VERSION: 11
concurrency:
group: tests_gpu_python-${{ github.ref }}-${{ github.event_name }}-${{ inputs.lightning-version }}-${{ inputs.pennylane-version }}
cancel-in-progress: true
jobs:
builddeps:
if: ${{ github.event_name != 'pull_request' || (github.event.pull_request.draft == false && contains(github.event.pull_request.labels.*.name, 'ci:use-gpu-runner')) }}
runs-on:
- self-hosted
- ubuntu-22.04
- gpu
strategy:
max-parallel: 1
matrix:
cuda_version: ["12"]
steps:
- name: Validate GPU version and installed compiler
run: |
source /etc/profile.d/modules.sh
module use /opt/modules
module load cuda/${{ matrix.cuda_version }}
echo "${PATH}" >> $GITHUB_PATH
echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" >> $GITHUB_ENV
nvcc --version
nvidia-smi
pythontestswithGPU:
needs: [builddeps]
strategy:
matrix:
pl_backend: ["lightning_gpu", "lightning_tensor"]
cuda_version: ["12"]
name: Python Tests (${{ matrix.pl_backend }}, cuda-${{ matrix.cuda_version }})
runs-on:
- ubuntu-22.04
- self-hosted
- gpu
steps:
- name: Validate GPU version and installed compiler
run: |
source /etc/profile.d/modules.sh
module use /opt/modules
module load cuda/${{ matrix.cuda_version }}
echo "${PATH}" >> $GITHUB_PATH
echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" >> $GITHUB_ENV
nvcc --version
nvidia-smi
- name: Checkout PennyLane-Lightning
uses: actions/checkout@v4
with:
fetch-tags: true
- name: Switch to release build of Lightning
if: inputs.lightning-version == 'release' && matrix.pl_backend == 'lightning.qubit'
run: |
git fetch --all
git checkout $(git branch -a --list "origin/v0.*rc*" | sort | tail -1)
- name: Switch to stable build of Lightning
if: inputs.lightning-version == 'stable' && matrix.pl_backend == 'lightning.qubit'
run: |
git fetch --tags --force
git checkout latest_release
- uses: actions/setup-python@v5
name: Install Python
with:
python-version: '3.10'
- name: Setup Python virtual environment
id: setup_venv
env:
VENV_NAME: ${{ github.workspace }}/venv_${{ steps.setup_python.outputs.python-version }}_${{ github.sha }}_${{ matrix.pl_backend }}
run: |
# Clear any pre-existing venvs
rm -rf venv_*
# Create new venv for this workflow_run
python --version
python -m venv ${{ env.VENV_NAME }}
# Add the venv to PATH for subsequent steps
echo ${{ env.VENV_NAME }}/bin >> $GITHUB_PATH
# Adding venv name as an output for subsequent steps to reference if needed
source ${{ env.VENV_NAME }}/bin/activate
echo "venv_name=${{ env.VENV_NAME }}" >> $GITHUB_OUTPUT
echo "Python_ROOT_DIR=${{ env.VENV_NAME }}" >> $GITHUB_ENV
echo "Python3_ROOT_DIR=${{ env.VENV_NAME }}" >> $GITHUB_ENV
# Adding venv site-packages to output for subsequent step to referecen if needed
echo "site_packages_dir=$(${{ env.VENV_NAME }}/bin/python -c 'import sysconfig; print(sysconfig.get_path("platlib"))')" >> $GITHUB_OUTPUT
- name: Display Python-Path
id: python_path
run: |
py_path=$(which python)
echo "Python Interpreter Path => $py_path"
echo "python=$py_path" >> $GITHUB_OUTPUT
pip_path=$(which python)
echo "PIP Path => $pip_path"
echo "pip=$pip_path" >> $GITHUB_OUTPUT
- name: Install required packages
run: |
python -m pip install -r requirements-dev.txt
python -m pip install cmake openfermionpyscf
- name: Install required lightning_gpu only packages
if: matrix.pl_backend == 'lightning_gpu'
run: |
python -m pip install custatevec-cu${{ matrix.cuda_version }}
- name: Install required lightning_tensor only packages
if: matrix.pl_backend == 'lightning_tensor'
run: |
python -m pip install cutensornet-cu${{ matrix.cuda_version }}
- name: Checkout PennyLane for release or latest build
if: inputs.pennylane-version == 'release' || inputs.pennylane-version == 'latest'
uses: actions/checkout@v4
with:
path: pennylane
repository: PennyLaneAI/pennylane
- name: Switch to latest build of PennyLane
if: inputs.pennylane-version == 'latest'
run: |
cd pennylane
git checkout master
python -m pip uninstall -y pennylane && python -m pip install . -vv --no-deps
- name: Switch to release build of PennyLane
if: inputs.pennylane-version == 'release'
run: |
cd pennylane
git fetch --all
git checkout $(git branch -a --list "origin/v0.*rc*" | sort | tail -1)
python -m pip uninstall -y pennylane && python -m pip install . -vv --no-deps
- name: Install Stable PennyLane
if: inputs.pennylane-version == 'stable'
run: |
python -m pip uninstall -y pennylane && python -m pip install -U pennylane
- name: Build and install package
env:
CUQUANTUM_SDK: $(python -c "import site; print( f'{site.getsitepackages()[0]}/cuquantum')")
run: |
rm -rf build
PL_BACKEND=lightning_qubit python scripts/configure_pyproject_toml.py || true
PL_BACKEND=lightning_qubit SKIP_COMPILATION=True python -m pip install . -vv
rm -rf build
PL_BACKEND=${{ matrix.pl_backend }} python scripts/configure_pyproject_toml.py || true
PL_BACKEND=${{ matrix.pl_backend }} python -m pip install . -vv
- name: Run PennyLane unit tests
env:
OMP_NUM_THREADS: 1
OMP_PROC_BIND: false
if: matrix.pl_backend == 'lightning_gpu'
run: |
DEVICENAME=`echo ${{ matrix.pl_backend }} | sed "s/_/./g"`
pl-device-test --device ${DEVICENAME} --skip-ops --shots=20000 $COVERAGE_FLAGS --cov-append
pl-device-test --device ${DEVICENAME} --shots=None --skip-ops $COVERAGE_FLAGS --cov-append
- name: Run PennyLane Lightning unit tests
env:
OMP_NUM_THREADS: 1
OMP_PROC_BIND: false
run: |
DEVICENAME=`echo ${{ matrix.pl_backend }} | sed "s/_/./g"`
PL_DEVICE=${DEVICENAME} python -m pytest tests/ $COVERAGE_FLAGS
- name: Test wheels for Lightning-GPU
if : matrix.pl_backend == 'lightning_gpu'
run: |
python -m pip install -r requirements-dev.txt
PL_BACKEND=lightning_qubit python scripts/configure_pyproject_toml.py
SKIP_COMPILATION=True python -m pip install . -vv
DEVICENAME=`echo ${{ matrix.pl_backend }} | sed "s/_/./g"`
PL_BACKEND=${DEVICENAME} python scripts/configure_pyproject_toml.py
python -m build
python -m pip install dist/*.whl --force-reinstall --no-deps
PL_DEVICE=${DEVICENAME} python -m pytest tests/test_device.py $COVERAGE_FLAGS
- name: Move coverage file
run: |
mv coverage.xml coverage-${{ github.job }}-${{ matrix.pl_backend }}.xml
- name: Upload code coverage results
uses: actions/upload-artifact@v4
with:
name: ubuntu-codecov-results-python-${{ matrix.pl_backend }}
path: coverage-${{ github.job }}-${{ matrix.pl_backend }}.xml
retention-days: 1
if-no-files-found: error
include-hidden-files: true
upload-to-codecov-linux-python:
needs: [pythontestswithGPU]
name: Upload coverage data to codecov
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Download coverage reports
uses: actions/download-artifact@v4
with:
pattern: ubuntu-codecov-*
merge-multiple: true
- name: Upload to Codecov
uses: codecov/codecov-action@v4
with:
fail_ci_if_error: true
verbose: true
token: ${{ secrets.CODECOV_TOKEN }}