Skip to content

[pre-commit.ci] pre-commit autoupdate #975

[pre-commit.ci] pre-commit autoupdate

[pre-commit.ci] pre-commit autoupdate #975

Workflow file for this run

name: Run all checks
on:
pull_request:
branches:
- main
workflow_dispatch:
inputs:
ref:
description: 'The git ref to build the package for'
required: false
default: ''
type: string
use_lkg:
description: 'Whether to use the last known good versions of dependencies'
required: false
default: True
type: boolean
# nightly
schedule:
- cron: '0 0 * * *'
# Only run once per PR, canceling any previous runs
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
# Precompute the ref if the workflow was triggered by a workflow dispatch rather than copying this logic repeatedly
env:
ref: ${{ github.event_name == 'workflow_dispatch' && inputs.ref || null }}
# we want to use the LKG if that is explicitly requested, or if we're in a PR, but not a nightly run
# the final `|| ''` is because env vars are always converted to strings and the string 'false' is truthy (!!)
# (see https://github.com/orgs/community/discussions/25645)
use_lkg: ${{ (github.event_name == 'workflow_dispatch' && inputs.use_lkg) || github.event_name == 'pull_request' || ''}}
jobs:
eval:
name: Evaluate changes
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
ref: ${{ env.ref }}
fetch-depth: 2
# We want to enforce the following rules for PRs:
# * if all modifications are to README.md
# no testing is needed
# * if there are modifications to docs/* or to any code
# then docs need to be built to verify consistency
# * if there are modifications to notebooks/* or to any code
# then notebooks need to be run to verify consistency
# * for any code changes (or changes to metadata files)
# linting and testing should be run
# For a PR build, HEAD will be the merge commit, and we want to diff against the base branch,
# which will be the first parent: HEAD^
# (For non-PR changes, we will always perform all CI tasks)
# Note that GitHub Actions provides path filters, but they operate at the workflow level, not the job level
- name: Determine type of code change
run: |
if ($env:GITHUB_EVENT_NAME -eq 'pull_request') {
$editedFiles = git diff HEAD^ --name-only
$editedFiles # echo edited files to enable easier debugging
$codeChanges = $false
$docChanges = $false
$nbChanges = $false
$changeType = "none"
foreach ($file in $editedFiles) {
switch -Wildcard ($file) {
"README.md" { Continue }
".gitignore" { Continue }
"econml/_version.py" { Continue }
"prototypes/*" { Continue }
"images/*" { Continue }
"doc/*" { $docChanges = $true; Continue }
"notebooks/*" { $nbChanges = $true; Continue }
default { $codeChanges = $true; Continue }
}
}
}
echo "buildDocs=$(($env:GITHUB_EVENT_NAME -ne 'pull_request') -or ($docChanges -or $codeChanges))" >> $env:GITHUB_OUTPUT
echo "buildNbs=$(($env:GITHUB_EVENT_NAME -ne 'pull_request') -or ($nbChanges -or $codeChanges))" >> $env:GITHUB_OUTPUT
echo "testCode=$(($env:GITHUB_EVENT_NAME -ne 'pull_request') -or $codeChanges)" >> $env:GITHUB_OUTPUT
shell: pwsh
id: eval
outputs:
buildDocs: ${{ steps.eval.outputs.buildDocs }}
buildNbs: ${{ steps.eval.outputs.buildNbs }}
testCode: ${{ steps.eval.outputs.testCode }}
lint:
name: Lint code
needs: [eval]
if: ${{ needs.eval.outputs.testCode == 'True' }}
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
ref: ${{ env.ref }}
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: '3.9'
- name: Ensure latest pip and setuptools
run: python -m pip install --upgrade pip && pip install --upgrade setuptools
- name: Run Ruff
run: 'pip install ruff && ruff check'
notebooks:
name: Run notebooks
needs: [eval]
if: ${{ needs.eval.outputs.buildNbs == 'True' }}
runs-on: ubuntu-latest
strategy:
matrix:
kind: [except-customer-scenarios, customer-scenarios]
include:
- kind: "except-customer-scenarios"
extras: "[tf,plt,ray]"
pattern: "(?!CustomerScenarios)"
install_graphviz: true
version: '3.8' # no supported version of tensorflow for 3.9
- kind: "customer-scenarios"
extras: "[plt,dowhy]"
pattern: "CustomerScenarios"
version: '3.9'
install_graphviz: false
fail-fast: false
env:
id_string: ${{ matrix.kind }}-${{ matrix.version }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
ref: ${{ env.ref }}
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.version }}
- name: Install uv
# check if we're running on windows
run: ${{ runner.os == 'Windows' && 'irm https://astral.sh/uv/install.ps1 | iex' || 'curl -LsSf https://astral.sh/uv/install.sh | sh' }}
- name: Install graphviz
run: sudo apt-get -yq install graphviz
if: ${{ matrix.install_graphviz }}
# Add verbose flag to pip installation if in debug mode
- name: Install econml
run: uv pip install --system -e .${{ matrix.extras }} ${{ fromJSON('["","-v"]')[runner.debug] }} ${{ env.use_lkg && '-r lkg-notebook.txt' }}
# Install notebook requirements (if not already done as part of lkg)
- name: Install notebook requirements
run: uv pip install --system jupyter jupyter-client nbconvert nbformat seaborn xgboost tqdm
if: ${{ !env.use_lkg }}
- name: Save installed packages
run: pip freeze --exclude-editable > notebooks-${{ env.id_string }}-requirements.txt
- name: Upload installed packages
uses: actions/upload-artifact@v4
with:
name: requirements-${{ env.id_string }}
path: notebooks-${{ env.id_string }}-requirements.txt
- name: Install pytest
run: uv pip install --system pytest pytest-xdist pytest-cov coverage[toml]
- name: Run notebook tests
run: python -m pytest
id: run_tests
env:
PYTEST_ADDOPTS: '-m "notebook"'
NOTEBOOK_DIR_PATTERN: ${{ matrix.pattern }}
COVERAGE_PROCESS_START: 'pyproject.toml'
- name: Make coverage filename unique
run: mv .coverage .coverage.${{ env.id_string }}
# Run whether or not the tests passed, but only if they ran at all
if: success() || failure() && contains(fromJSON('["success", "failure"]'), steps.run_tests.outcome)
- name: Upload coverage report
uses: actions/upload-artifact@v4
if: success() || failure() && contains(fromJSON('["success", "failure"]'), steps.run_tests.outcome)
with:
name: coverage-${{ env.id_string }}
path: .coverage.${{ env.id_string }}
# need to include hidden files since path starts with .
include-hidden-files: true
- name: Make test XML filename unique
run: mv junit/test-results.xml ${{ env.id_string }}-test-results.xml
# Run whether or not the tests passed, but only if they ran at all
if: success() || failure() && contains(fromJSON('["success", "failure"]'), steps.run_tests.outcome)
- name: Upload test XML files
uses: actions/upload-artifact@v4
if: success() || failure() && contains(fromJSON('["success", "failure"]'), steps.run_tests.outcome)
with:
name: tests-${{ env.id_string }}
path: ${{ env.id_string }}-test-results.xml
- name: Upload notebook outputs
uses: actions/upload-artifact@v4
if: success() || failure() && contains(fromJSON('["success", "failure"]'), steps.run_tests.outcome)
with:
name: notebooks-${{ env.id_string }}
path: notebooks/output/
tests:
name: "Run tests"
needs: [eval]
if: ${{ needs.eval.outputs.testCode == 'True' }}
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macos-12]
python-version: ['3.8', '3.9', '3.10', '3.11', '3.12']
kind: [serial, other, dml, main, treatment, ray]
exclude:
# Serial tests fail randomly on mac sometimes, so we don't run them there
- os: macos-12
kind: serial
# Ray tests run out of memory on Windows
- os: windows-latest
kind: ray
# Ray doesn't currently support Python 3.12
- python-version: '3.12'
kind: ray
# Assign the correct package and testing options for each kind of test
include:
- kind: serial
opts: '-m "serial and not ray" -n 1'
extras: ""
- kind: other
opts: '-m "cate_api and not ray" -n auto'
extras: "[tf,plt]"
- kind: dml
opts: '-m "dml and not ray"'
extras: "[tf,plt]"
- kind: main
opts: '-m "not (notebook or automl or dml or serial or cate_api or treatment_featurization or ray)" -n 2'
extras: "[tf,plt,dowhy]"
- kind: treatment
opts: '-m "treatment_featurization and not ray" -n auto'
extras: "[tf,plt]"
- kind: ray
opts: '-m "ray"'
extras: "[ray]"
fail-fast: false
runs-on: ${{ matrix.os }}
env:
id_string: ${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.kind }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
ref: ${{ env.ref }}
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install uv
# check if we're running on windows
run: ${{ runner.os == 'Windows' && 'irm https://astral.sh/uv/install.ps1 | iex' || 'curl -LsSf https://astral.sh/uv/install.sh | sh' }}
# Add verbose flag to pip installation if in debug mode
- name: Install econml
run: uv pip install --system -e .${{ matrix.extras }} ${{ fromJSON('["","-v"]')[runner.debug] }} ${{ env.use_lkg && '-r lkg.txt' }}
- name: Save installed packages
run: pip freeze --exclude-editable > tests-${{ env.id_string }}-requirements.txt
- name: Upload installed packages
uses: actions/upload-artifact@v4
with:
name: requirements-${{ env.id_string }}
path: tests-${{ env.id_string }}-requirements.txt
- name: Install pytest
run: uv pip install --system pytest pytest-xdist pytest-cov coverage[toml]
- name: Run tests
run: python -m pytest
id: run_tests
env:
PYTEST_ADDOPTS: ${{ matrix.opts }}
COVERAGE_PROCESS_START: 'pyproject.toml'
- name: Make coverage filename unique
run: mv .coverage .coverage.${{ env.id_string }}
# Run whether or not the tests passed, but only if they ran at all
if: success() || failure() && contains(fromJSON('["success", "failure"]'), steps.run_tests.outcome)
- name: Upload coverage report
uses: actions/upload-artifact@v4
if: success() || failure() && contains(fromJSON('["success", "failure"]'), steps.run_tests.outcome)
with:
name: coverage-${{ env.id_string }}
path: .coverage.${{ env.id_string }}
# need to include hidden files since path starts with .
include-hidden-files: true
- name: Make test XML filename unique
run: mv junit/test-results.xml ${{ env.id_string }}-test-results.xml
# Run whether or not the tests passed, but only if they ran at all
if: success() || failure() && contains(fromJSON('["success", "failure"]'), steps.run_tests.outcome)
- name: Upload test XML files
uses: actions/upload-artifact@v4
if: success() || failure() && contains(fromJSON('["success", "failure"]'), steps.run_tests.outcome)
with:
name: tests-${{ env.id_string }}
path: ${{ env.id_string }}-test-results.xml
coverage-report:
name: "Coverage report"
needs: [tests, notebooks]
if: success() || failure()
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
ref: ${{ env.ref }}
- name: Get coverage reports
uses: actions/download-artifact@v4
with:
pattern: coverage-*
path: coverage
merge-multiple: true
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: '3.8'
- name: Install coverage
run: pip install coverage[toml]
- name: Combine coverage reports
run: coverage combine coverage/
- name: Generate coverage report
run: coverage report -m --format=markdown > $GITHUB_STEP_SUMMARY
- name: Generate coverage html --fail-under=86
run: coverage html
- name: Upload coverage report
uses: actions/upload-artifact@v4
with:
name: coverage-report
path: htmlcov
merge-artifacts:
name: "Merge artifacts"
needs: [coverage-report, tests, notebooks]
if: success() || failure()
strategy:
matrix:
artifact: [requirements, tests, coverage, notebooks]
runs-on: ubuntu-latest
steps:
- name: "Merge artifacts"
uses: actions/upload-artifact/merge@v4
with:
name: ${{ matrix.artifact }}
pattern: "${{ matrix.artifact }}-*"
delete-merged: true
# if we are re-running a job in a subsequent attempt, some of the other artifacts may not exist in this attempt (e.g. notebooks, if only non-notebook tests failed)
# Unlike with plain upload-artifact, there's no way to ignore the situation where no files are found when using the v4 merge action
# (see https://github.com/actions/upload-artifact/issues/520), so just continue on error isntead
continue-on-error: true
build:
name: Build package
needs: [eval]
if: ${{ needs.eval.outputs.testCode == 'True' }}
uses: ./.github/workflows/publish-package.yml
with:
publish: false
environment: test
# don't have access to env context here for some reason
ref: ${{ github.event_name == 'workflow_dispatch' && inputs.ref || null }}
# can't use env context here so need to duplicate expression, but these are true boolean values so don't need extra string logic
use_lkg: ${{ (github.event_name == 'workflow_dispatch' && inputs.use_lkg) || github.event_name == 'pull_request' }}
docs:
name: Build documentation
needs: [eval]
if: ${{ needs.eval.outputs.buildDocs == 'True' }}
uses: ./.github/workflows/publish-documentation.yml
with:
publish: false
environment: test
# don't have access to env context here for some reason
ref: ${{ github.event_name == 'workflow_dispatch' && inputs.ref || null }}
# can't use env context here so need to duplicate expression, but these are true boolean values so don't need extra string logic
use_lkg: ${{ (github.event_name == 'workflow_dispatch' && inputs.use_lkg) || github.event_name == 'pull_request' }}
verify:
name: Verify CI checks
needs: [lint, notebooks, tests, build, docs]
if: always()
runs-on: ubuntu-latest
steps:
- name: At least one check failed or was cancelled
run: exit 1
if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }}
- name: All checks passed
run: exit 0
if: ${{ !(contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled')) }}