name: Tests

on:
  pull_request:
  push:
    branches:
      - nightly
      - main
      - release/*
  workflow_dispatch:

jobs:
  unittests-linux:
    strategy:
      matrix:
        python-version:
          - "3.9"
          - "3.10"
          - "3.11"
          - "3.12"
        runner: ["linux.12xlarge"]
        gpu-arch-type: ["cpu"]
        include:
          - python-version: 3.9
            runner: linux.g5.4xlarge.nvidia.gpu
            gpu-arch-type: cuda
            gpu-arch-version: "11.8"
      fail-fast: false
    uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
    with:
      repository: pytorch/vision
      runner: ${{ matrix.runner }}
      gpu-arch-type: ${{ matrix.gpu-arch-type }}
      gpu-arch-version: ${{ matrix.gpu-arch-version }}
      timeout: 120
      test-infra-ref: main
      script: |
        set -euo pipefail

        export PYTHON_VERSION=${{ matrix.python-version }}
        export GPU_ARCH_TYPE=${{ matrix.gpu-arch-type }}
        export GPU_ARCH_VERSION=${{ matrix.gpu-arch-version }}

        ./.github/scripts/unittest.sh

  unittests-macos:
    strategy:
      matrix:
        python-version:
          - "3.9"
          - "3.10"
          - "3.11"
          - "3.12"
        runner: ["macos-m1-stable"]
      fail-fast: false
    uses: pytorch/test-infra/.github/workflows/macos_job.yml@main
    with:
      repository: pytorch/vision
      timeout: 240
      runner: ${{ matrix.runner }}
      test-infra-ref: main
      script: |
        set -euo pipefail

        export PYTHON_VERSION=${{ matrix.python-version }}
        export GPU_ARCH_TYPE=cpu
        export GPU_ARCH_VERSION=''

        ${CONDA_RUN} ./.github/scripts/unittest.sh

  unittests-windows:
    strategy:
      matrix:
        python-version:
          - "3.9"
          - "3.10"
          - "3.11"
          - "3.12"
        runner: ["windows.4xlarge"]
        gpu-arch-type: ["cpu"]
        include:
          - python-version: "3.9"
            runner: windows.g5.4xlarge.nvidia.gpu
            gpu-arch-type: cuda
            gpu-arch-version: "11.8"
      fail-fast: false
    uses: pytorch/test-infra/.github/workflows/windows_job.yml@main
    with:
      repository: pytorch/vision
      runner: ${{ matrix.runner }}
      gpu-arch-type: ${{ matrix.gpu-arch-type }}
      gpu-arch-version: ${{ matrix.gpu-arch-version }}
      timeout: 120
      test-infra-ref: main
      script: |
        set -euxo pipefail

        export PYTHON_VERSION=${{ matrix.python-version }}
        export VC_YEAR=2019
        export VSDEVCMD_ARGS=""
        export GPU_ARCH_TYPE=${{ matrix.gpu-arch-type }}
        export GPU_ARCH_VERSION=${{ matrix.gpu-arch-version }}

        ./.github/scripts/unittest.sh

  onnx:
    uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
    with:
      repository: pytorch/vision
      test-infra-ref: main
      script: |
        set -euo pipefail

        export PYTHON_VERSION=3.10
        export GPU_ARCH_TYPE=cpu
        export GPU_ARCH_VERSION=''

        ./.github/scripts/setup-env.sh

        # Prepare conda
        CONDA_PATH=$(which conda)
        eval "$(${CONDA_PATH} shell.bash hook)"
        conda activate ci

        echo '::group::Install ONNX'
        pip install --progress-bar=off onnx onnxruntime
        echo '::endgroup::'

        echo '::group::Install testing utilities'
        pip install --progress-bar=off pytest "numpy<2"
        echo '::endgroup::'

        echo '::group::Run ONNX tests'
        pytest --junit-xml="${RUNNER_TEST_RESULTS_DIR}/test-results.xml" -v --durations=25 test/test_onnx.py
        echo '::endgroup::'

  unittests-extended:
    uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
    if: contains(github.event.pull_request.labels.*.name, 'run-extended')
    with:
      repository: pytorch/vision
      test-infra-ref: main
      script: |
        set -euo pipefail

        export PYTHON_VERSION=3.9
        export GPU_ARCH_TYPE=cpu
        export GPU_ARCH_VERSION=''

        ./.github/scripts/setup-env.sh

        # Prepare conda
        CONDA_PATH=$(which conda)
        eval "$(${CONDA_PATH} shell.bash hook)"
        conda activate ci

        echo '::group::Pre-download model weights'
        pip install --progress-bar=off aiohttp aiofiles tqdm
        python scripts/download_model_urls.py
        echo '::endgroup::'

        echo '::group::Install testing utilities'
        # TODO: remove the <8 constraint on pytest when https://github.com/pytorch/vision/issues/8238 is closed
        pip install --progress-bar=off "pytest<8"
        echo '::endgroup::'

        echo '::group::Run extended unittests'
        export PYTORCH_TEST_WITH_EXTENDED=1
        pytest --junit-xml="${RUNNER_TEST_RESULTS_DIR}/test-results.xml" -v --durations=25 test/test_extended_*.py
        echo '::endgroup::'