Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

More robust metric state comparison #1022

Merged
merged 5 commits into from
May 13, 2022
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

- Fixed non-empty state dict for a few metrics ([#1012](https://github.com/PyTorchLightning/metrics/pull/1012))

- Fixed bug when comparing states while finding compute groups ([#1022](https://github.com/PyTorchLightning/metrics/pull/1022))


## [0.8.2] - 2022-05-06

Expand Down
30 changes: 29 additions & 1 deletion tests/bases/test_collections.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,17 @@

from tests.helpers import seed_all
from tests.helpers.testers import DummyMetricDiff, DummyMetricSum
from torchmetrics import Accuracy, CohenKappa, ConfusionMatrix, F1Score, Metric, MetricCollection, Precision, Recall
from torchmetrics import (
Accuracy,
CohenKappa,
ConfusionMatrix,
F1Score,
MatthewsCorrCoef,
Metric,
MetricCollection,
Precision,
Recall,
)

seed_all(42)

Expand Down Expand Up @@ -406,6 +416,24 @@ def test_compute_group_define_by_user():
assert m.compute()


def test_compute_on_different_dtype():
"""Check that extraction of compute groups are robust towards difference in dtype."""
m = MetricCollection(
[
ConfusionMatrix(num_classes=3),
MatthewsCorrCoef(num_classes=3),
]
)
assert not m._groups_checked
assert m.compute_groups == {0: ["ConfusionMatrix"], 1: ["MatthewsCorrCoef"]}
preds = torch.randn(10, 3).softmax(dim=-1)
target = torch.randint(3, (10,))
for _ in range(2):
m.update(preds, target)
assert m.compute_groups == {0: ["ConfusionMatrix", "MatthewsCorrCoef"]}
assert m.compute()


def test_error_on_wrong_specified_compute_groups():
"""Test that error is raised if user mis-specify the compute groups."""
with pytest.raises(ValueError, match="Input Accuracy in `compute_groups`.*"):
Expand Down
6 changes: 3 additions & 3 deletions torchmetrics/collections.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@

from torchmetrics.metric import Metric
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.data import _flatten_dict
from torchmetrics.utilities.data import _flatten_dict, allclose

# this is just a bypass for this module name collision with build-in one
from torchmetrics.utilities.imports import OrderedDict
Expand Down Expand Up @@ -231,10 +231,10 @@ def _equal_metric_states(metric1: Metric, metric2: Metric) -> bool:
return False

if isinstance(state1, Tensor) and isinstance(state2, Tensor):
return state1.shape == state2.shape and torch.allclose(state1, state2)
return state1.shape == state2.shape and allclose(state1, state2)

if isinstance(state1, list) and isinstance(state2, list):
return all(s1.shape == s2.shape and torch.allclose(s1, s2) for s1, s2 in zip(state1, state2))
return all(s1.shape == s2.shape and allclose(s1, s2) for s1, s2 in zip(state1, state2))

return True

Expand Down
7 changes: 7 additions & 0 deletions torchmetrics/utilities/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -248,3 +248,10 @@ def _bincount(x: Tensor, minlength: Optional[int] = None) -> Tensor:
return output
else:
return torch.bincount(x, minlength=minlength)


def allclose(tensor1: Tensor, tensor2: Tensor) -> bool:
"""Wrapper of torch.allclose that is robust towards dtype difference."""
if tensor1.dtype != tensor2.dtype:
tensor2 = tensor2.to(dtype=tensor1.dtype)
return torch.allclose(tensor1, tensor2)