Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

bump: testing with next PyTorch 2.6 #2836

Merged
merged 19 commits into from
Dec 19, 2024
Merged
Show file tree
Hide file tree
Changes from 14 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .azure/gpu-unittests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,9 @@ jobs:
"PyTorch | 2.X stable":
docker-image: "ubuntu22.04-cuda12.1.1-py3.11-torch2.5"
torch-ver: "2.5"
"PyTorch | 2.X future":
docker-image: "ubuntu22.04-cuda12.4.1-py3.11-torch2.6"
torch-ver: "2.6"
# how long to run the job before automatically cancelling
timeoutInMinutes: "180"
# how much time to give 'run always even if cancelled tasks' before stopping them
Expand Down
12 changes: 6 additions & 6 deletions .github/workflows/ci-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,9 @@ jobs:
- { os: "windows-2022", python-version: "3.10", pytorch-version: "2.0.1" }
- { os: "windows-2022", python-version: "3.12", pytorch-version: "2.5.0" }
# Future released version
#- { os: "ubuntu-22.04", python-version: "3.11", pytorch-version: "2.5.0" }
#- { os: "macOS-14", python-version: "3.11", pytorch-version: "2.5.0" }
#- { os: "windows-2022", python-version: "3.11", pytorch-version: "2.5.0" }
- { os: "ubuntu-22.04", python-version: "3.11", pytorch-version: "2.6.0" }
- { os: "macOS-14", python-version: "3.11", pytorch-version: "2.6.0" }
- { os: "windows-2022", python-version: "3.11", pytorch-version: "2.6.0" }
env:
FREEZE_REQUIREMENTS: ${{ ! (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/heads/release/')) }}
PYPI_CACHE_DIR: "_ci-cache_PyPI"
Expand Down Expand Up @@ -99,9 +99,9 @@ jobs:
pytorch-version: ${{ matrix.pytorch-version }}
pypi-dir: ${{ env.PYPI_CACHE_DIR }}

#- name: Switch to PT test URL
# if: ${{ matrix.pytorch-version == '2.X.0' }}
# run: echo 'PIP_EXTRA_INDEX_URL=--extra-index-url https://download.pytorch.org/whl/test/cpu/' >> $GITHUB_ENV
- name: Switch to PT test URL
if: ${{ matrix.pytorch-version == '2.6.0' }}
run: echo 'PIP_EXTRA_INDEX_URL=--extra-index-url https://download.pytorch.org/whl/test/cpu/' >> $GITHUB_ENV
- name: Install pkg
timeout-minutes: 25
run: |
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/docker-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ jobs:
- { python: "3.11", pytorch: "2.4.1", cuda: "12.1.1", ubuntu: "22.04" }
- { python: "3.11", pytorch: "2.5.0", cuda: "12.1.1", ubuntu: "22.04" }
# the future version - test or RC version
#- { python: "3.11", pytorch: "2.6", cuda: "12.1.1", ubuntu: "22.04" }
- { python: "3.11", pytorch: "2.6.0", cuda: "12.4.1", ubuntu: "22.04" }
steps:
- uses: actions/checkout@v4

Expand Down
2 changes: 1 addition & 1 deletion requirements/audio.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
pesq >=0.0.4, <0.0.5
numpy <2.0 # strict, for compatibility reasons
pystoi >=0.4.0, <0.5.0
torchaudio >=2.0.1, <2.6.0
torchaudio >=2.0.1, <2.7.0
gammatone >=1.0.0, <1.1.0
librosa >=0.10.0, <0.11.0
onnxruntime >=1.12.0, <1.21 # installing onnxruntime_gpu-gpu failed on macos
Expand Down
2 changes: 1 addition & 1 deletion requirements/base.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,6 @@

numpy >1.20.0
packaging >17.1
torch >=2.0.0, <2.6.0
torch >=2.0.0, <2.7.0
typing-extensions; python_version < '3.9'
lightning-utilities >=0.8.0, <0.12.0
2 changes: 1 addition & 1 deletion requirements/detection.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment

torchvision >=0.15.1, <0.21.0
torchvision >=0.15.1, <0.22.0
pycocotools >2.0.0, <2.1.0
2 changes: 1 addition & 1 deletion requirements/image.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,5 @@
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment

scipy >1.0.0, <1.15.0
torchvision >=0.15.1, <0.21.0
torchvision >=0.15.1, <0.22.0
torch-fidelity <=0.4.0 # bumping to allow install version from master, now used in testing
12 changes: 7 additions & 5 deletions src/torchmetrics/utilities/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from torch import Tensor

from torchmetrics.utilities.exceptions import TorchMetricsUserWarning
from torchmetrics.utilities.imports import _XLA_AVAILABLE
from torchmetrics.utilities.imports import _TORCH_LESS_THAN_2_6, _XLA_AVAILABLE
from torchmetrics.utilities.prints import rank_zero_warn

METRIC_EPS = 1e-6
Expand Down Expand Up @@ -207,11 +207,13 @@ def _bincount(x: Tensor, minlength: Optional[int] = None) -> Tensor:


def _cumsum(x: Tensor, dim: Optional[int] = 0, dtype: Optional[torch.dtype] = None) -> Tensor:
if torch.are_deterministic_algorithms_enabled() and x.is_cuda and x.is_floating_point() and sys.platform != "win32":
"""Implement custom cumulative summation for Torch versions which does not support it natively."""
is_cuda_fp_deterministic = torch.are_deterministic_algorithms_enabled() and x.is_cuda and x.is_floating_point()
if _TORCH_LESS_THAN_2_6 and is_cuda_fp_deterministic and sys.platform != "win32":
rank_zero_warn(
"You are trying to use a metric in deterministic mode on GPU that uses `torch.cumsum`, which is currently "
"not supported. The tensor will be copied to the CPU memory to compute it and then copied back to GPU. "
"Expect some slowdowns.",
"You are trying to use a metric in deterministic mode on GPU that uses `torch.cumsum`, which is currently"
" not supported. The tensor will be copied to the CPU memory to compute it and then copied back to GPU."
" Expect some slowdowns.",
TorchMetricsUserWarning,
)
return x.cpu().cumsum(dim=dim, dtype=dtype).to(x.device)
Expand Down
1 change: 1 addition & 0 deletions src/torchmetrics/utilities/imports.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
_TORCH_GREATER_EQUAL_2_1 = RequirementCache("torch>=2.1.0")
_TORCH_GREATER_EQUAL_2_2 = RequirementCache("torch>=2.2.0")
_TORCH_GREATER_EQUAL_2_5 = RequirementCache("torch>=2.5.0")
_TORCH_LESS_THAN_2_6 = RequirementCache("torch<2.6.0")
_TORCHMETRICS_GREATER_EQUAL_1_6 = RequirementCache("torchmetrics>=1.7.0")

_NLTK_AVAILABLE = RequirementCache("nltk")
Expand Down
3 changes: 2 additions & 1 deletion tests/unittests/image/test_uqi.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
from torch import Tensor
from torchmetrics.functional.image.uqi import universal_image_quality_index
from torchmetrics.image.uqi import UniversalImageQualityIndex
from torchmetrics.utilities.imports import _TORCH_LESS_THAN_2_6

from unittests import BATCH_SIZE, NUM_BATCHES
from unittests._helpers import seed_all
Expand Down Expand Up @@ -109,7 +110,7 @@ def test_uqi_functional(self, preds, target, multichannel, kernel_size):
)

# UQI half + cpu does not work due to missing support in torch.log
@pytest.mark.xfail(reason="UQI metric does not support cpu + half precision")
@pytest.mark.xfail(condition=_TORCH_LESS_THAN_2_6, reason="UQI metric does not support cpu + half precision")
def test_uqi_half_cpu(self, preds, target, multichannel, kernel_size):
"""Test dtype support of the metric on CPU."""
self.run_precision_test_cpu(
Expand Down
6 changes: 3 additions & 3 deletions tests/unittests/utilities/test_utilities.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
)
from torchmetrics.utilities.distributed import class_reduce, reduce
from torchmetrics.utilities.exceptions import TorchMetricsUserWarning
from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_2_2
from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_2_2, _TORCH_LESS_THAN_2_6


def test_prints():
Expand Down Expand Up @@ -171,9 +171,9 @@ def test_recursive_allclose(inputs, expected):


@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU")
@pytest.mark.xfail(sys.platform == "win32", reason="test will only fail on non-windows systems")
@pytest.mark.xfail(sys.platform == "win32" or _TORCH_LESS_THAN_2_6, reason="test will only fail on non-windows systems")
def test_cumsum_still_not_supported(use_deterministic_algorithms):
"""Make sure that cumsum on gpu and deterministic mode still fails.
"""Make sure that cumsum on GPU and deterministic mode still fails.

If this test begins to pass, it means newer Pytorch versions support this and we can drop internal support.

Expand Down
Loading