Skip to content

Commit

Permalink
Removed Deprecated compute_on_step from Classification (#962)
Browse files Browse the repository at this point in the history
* Removed Deprecated  from Classification
* chlog

Co-authored-by: Jirka <[email protected]>
  • Loading branch information
tanmoyio and Borda authored Apr 20, 2022
1 parent 439d205 commit 1a62754
Show file tree
Hide file tree
Showing 20 changed files with 22 additions and 177 deletions.
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

### Removed

-
- Removed deprecated `compute_on_step` argument ([#962](https://github.com/PyTorchLightning/metrics/pull/962))


-
Expand Down
8 changes: 0 additions & 8 deletions torchmetrics/classification/accuracy.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,12 +126,6 @@ class Accuracy(StatScores):
``preds = preds.flatten()`` and same for ``target``). Note that the ``top_k`` parameter
still applies in both cases, if set.
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
Expand Down Expand Up @@ -175,7 +169,6 @@ def __init__(
top_k: Optional[int] = None,
multiclass: Optional[bool] = None,
subset_accuracy: bool = False,
compute_on_step: Optional[bool] = None,
**kwargs: Dict[str, Any],
) -> None:
allowed_average = ["micro", "macro", "weighted", "samples", "none", None]
Expand All @@ -190,7 +183,6 @@ def __init__(
num_classes=num_classes,
multiclass=multiclass,
ignore_index=ignore_index,
compute_on_step=compute_on_step,
**kwargs,
)

Expand Down
10 changes: 2 additions & 8 deletions torchmetrics/classification/auc.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional
from typing import Any, Dict, List

from torch import Tensor

Expand All @@ -32,11 +32,6 @@ class AUC(Metric):
reorder: AUC expects its first input to be sorted. If this is not the case,
setting this argument to ``True`` will use a stable sorting algorithm to
sort the input in descending order
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
"""
Expand All @@ -47,10 +42,9 @@ class AUC(Metric):
def __init__(
self,
reorder: bool = False,
compute_on_step: Optional[bool] = None,
**kwargs: Dict[str, Any],
) -> None:
super().__init__(compute_on_step=compute_on_step, **kwargs)
super().__init__(**kwargs)

self.reorder = reorder

Expand Down
8 changes: 1 addition & 7 deletions torchmetrics/classification/auroc.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,11 +63,6 @@ class AUROC(Metric):
max_fpr:
If not ``None``, calculates standardized partial AUC over the
range ``[0, max_fpr]``. Should be a float between 0 and 1.
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Expand Down Expand Up @@ -113,10 +108,9 @@ def __init__(
pos_label: Optional[int] = None,
average: Optional[str] = "macro",
max_fpr: Optional[float] = None,
compute_on_step: Optional[bool] = None,
**kwargs: Dict[str, Any],
) -> None:
super().__init__(compute_on_step=compute_on_step, **kwargs)
super().__init__(**kwargs)

self.num_classes = num_classes
self.pos_label = pos_label
Expand Down
9 changes: 1 addition & 8 deletions torchmetrics/classification/avg_precision.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,12 +57,6 @@ class AveragePrecision(Metric):
- ``'none'`` or ``None``: Calculate the metric for each class separately, and return
the metric for every class.
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (binary case):
Expand Down Expand Up @@ -93,10 +87,9 @@ def __init__(
num_classes: Optional[int] = None,
pos_label: Optional[int] = None,
average: Optional[str] = "macro",
compute_on_step: Optional[bool] = None,
**kwargs: Dict[str, Any],
) -> None:
super().__init__(compute_on_step=compute_on_step, **kwargs)
super().__init__(**kwargs)

self.num_classes = num_classes
self.pos_label = pos_label
Expand Down
23 changes: 3 additions & 20 deletions torchmetrics/classification/binned_precision_recall.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional, Tuple, Union
from typing import Any, Dict, List, Tuple, Union

import torch
from torch import Tensor
Expand Down Expand Up @@ -61,11 +61,6 @@ class BinnedPrecisionRecallCurve(Metric):
thresholds: list or tensor with specific thresholds or a number of bins from linear sampling.
It is used for computation will lead to more detailed curve and accurate estimates,
but will be slower and consume more memory.
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Expand Down Expand Up @@ -122,10 +117,9 @@ def __init__(
self,
num_classes: int,
thresholds: Union[int, Tensor, List[float], None] = None,
compute_on_step: Optional[bool] = None,
**kwargs: Dict[str, Any],
) -> None:
super().__init__(compute_on_step=compute_on_step, **kwargs)
super().__init__(**kwargs)

self.num_classes = num_classes
if isinstance(thresholds, int):
Expand Down Expand Up @@ -203,11 +197,6 @@ class BinnedAveragePrecision(BinnedPrecisionRecallCurve):
thresholds: list or tensor with specific thresholds or a number of bins from linear sampling.
It is used for computation will lead to more detailed curve and accurate estimates,
but will be slower and consume more memory
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Expand Down Expand Up @@ -258,11 +247,6 @@ class BinnedRecallAtFixedPrecision(BinnedPrecisionRecallCurve):
thresholds: list or tensor with specific thresholds or a number of bins from linear sampling.
It is used for computation will lead to more detailed curve and accurate estimates,
but will be slower and consume more memory
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Expand Down Expand Up @@ -295,10 +279,9 @@ def __init__(
num_classes: int,
min_precision: float,
thresholds: Union[int, Tensor, List[float], None] = None,
compute_on_step: Optional[bool] = None,
**kwargs: Dict[str, Any],
) -> None:
super().__init__(num_classes=num_classes, thresholds=thresholds, compute_on_step=compute_on_step, **kwargs)
super().__init__(num_classes=num_classes, thresholds=thresholds, **kwargs)
self.min_precision = min_precision

def compute(self) -> Tuple[Tensor, Tensor]: # type: ignore
Expand Down
10 changes: 2 additions & 8 deletions torchmetrics/classification/calibration_error.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional
from typing import Any, Dict, List

import torch
from torch import Tensor
Expand Down Expand Up @@ -52,11 +52,6 @@ class CalibrationError(Metric):
norm: Norm used to compare empirical and expected probability bins.
Defaults to "l1", or Expected Calibration Error.
debias: Applies debiasing term, only implemented for l2 norm. Defaults to True.
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
"""
Expand All @@ -69,11 +64,10 @@ def __init__(
self,
n_bins: int = 15,
norm: str = "l1",
compute_on_step: Optional[bool] = None,
**kwargs: Dict[str, Any],
):

super().__init__(compute_on_step=compute_on_step, **kwargs)
super().__init__(**kwargs)

if norm not in self.DISTANCES:
raise ValueError(f"Norm {norm} is not supported. Please select from l1, l2, or max. ")
Expand Down
9 changes: 1 addition & 8 deletions torchmetrics/classification/cohen_kappa.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,12 +56,6 @@ class labels.
Threshold for transforming probability or logit predictions to binary ``(0,1)`` predictions, in the case
of binary or multi-label inputs. Default value of ``0.5`` corresponds to input being probabilities.
compute_on_step:
Forward only calls ``update()`` and returns ``None`` if this is set to ``False``.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
Expand All @@ -82,10 +76,9 @@ def __init__(
num_classes: int,
weights: Optional[str] = None,
threshold: float = 0.5,
compute_on_step: Optional[bool] = None,
**kwargs: Dict[str, Any],
) -> None:
super().__init__(compute_on_step=compute_on_step, **kwargs)
super().__init__(**kwargs)
self.num_classes = num_classes
self.weights = weights
self.threshold = threshold
Expand Down
8 changes: 1 addition & 7 deletions torchmetrics/classification/confusion_matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,11 +54,6 @@ class ConfusionMatrix(Metric):
of binary or multi-label inputs. Default value of ``0.5`` corresponds to input being probabilities.
multilabel: determines if data is multilabel or not.
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Expand Down Expand Up @@ -99,10 +94,9 @@ def __init__(
normalize: Optional[str] = None,
threshold: float = 0.5,
multilabel: bool = False,
compute_on_step: Optional[bool] = None,
**kwargs: Dict[str, Any],
) -> None:
super().__init__(compute_on_step=compute_on_step, **kwargs)
super().__init__(**kwargs)
self.num_classes = num_classes
self.normalize = normalize
self.threshold = threshold
Expand Down
16 changes: 0 additions & 16 deletions torchmetrics/classification/f_beta.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,12 +102,6 @@ class FBetaScore(StatScores):
:ref:`documentation section <pages/classification:using the multiclass parameter>`
for a more detailed explanation and examples.
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
Expand Down Expand Up @@ -135,7 +129,6 @@ def __init__(
ignore_index: Optional[int] = None,
top_k: Optional[int] = None,
multiclass: Optional[bool] = None,
compute_on_step: Optional[bool] = None,
**kwargs: Dict[str, Any],
) -> None:
self.beta = beta
Expand All @@ -151,7 +144,6 @@ def __init__(
num_classes=num_classes,
multiclass=multiclass,
ignore_index=ignore_index,
compute_on_step=compute_on_step,
**kwargs,
)

Expand Down Expand Up @@ -237,12 +229,6 @@ class F1Score(FBetaScore):
:ref:`documentation section <pages/classification:using the multiclass parameter>`
for a more detailed explanation and examples.
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Expand All @@ -268,7 +254,6 @@ def __init__(
ignore_index: Optional[int] = None,
top_k: Optional[int] = None,
multiclass: Optional[bool] = None,
compute_on_step: Optional[bool] = None,
**kwargs: Dict[str, Any],
) -> None:
super().__init__(
Expand All @@ -280,6 +265,5 @@ def __init__(
ignore_index=ignore_index,
top_k=top_k,
multiclass=multiclass,
compute_on_step=compute_on_step,
**kwargs,
)
10 changes: 2 additions & 8 deletions torchmetrics/classification/hamming.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Optional
from typing import Any, Dict

import torch
from torch import Tensor, tensor
Expand Down Expand Up @@ -40,11 +40,6 @@ class HammingDistance(Metric):
threshold:
Threshold for transforming probability or logit predictions to binary ``(0,1)`` predictions, in the case
of binary or multi-label inputs. Default value of ``0.5`` corresponds to input being probabilities.
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Expand All @@ -69,10 +64,9 @@ class HammingDistance(Metric):
def __init__(
self,
threshold: float = 0.5,
compute_on_step: Optional[bool] = None,
**kwargs: Dict[str, Any],
) -> None:
super().__init__(compute_on_step=compute_on_step, **kwargs)
super().__init__(**kwargs)

self.add_state("correct", default=tensor(0), dist_reduce_fx="sum")
self.add_state("total", default=tensor(0), dist_reduce_fx="sum")
Expand Down
8 changes: 1 addition & 7 deletions torchmetrics/classification/hinge.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,11 +54,6 @@ class HingeLoss(Metric):
Which approach to use for multi-class inputs (has no effect in the binary case). ``None`` (default),
``MulticlassMode.CRAMMER_SINGER`` or ``"crammer-singer"``, uses the Crammer Singer multi-class hinge loss.
``MulticlassMode.ONE_VS_ALL`` or ``"one-vs-all"`` computes the hinge loss in a one-vs-all fashion.
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Expand Down Expand Up @@ -101,10 +96,9 @@ def __init__(
self,
squared: bool = False,
multiclass_mode: Optional[Union[str, MulticlassMode]] = None,
compute_on_step: Optional[bool] = None,
**kwargs: Dict[str, Any],
) -> None:
super().__init__(compute_on_step=compute_on_step, **kwargs)
super().__init__(**kwargs)

self.add_state("measure", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=tensor(0), dist_reduce_fx="sum")
Expand Down
Loading

0 comments on commit 1a62754

Please sign in to comment.