diff --git a/CHANGELOG.md b/CHANGELOG.md index c9818244755..847fdee0dd1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,7 +35,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Removed -- +- Removed deprecated `compute_on_step` argument ([#962](https://github.com/PyTorchLightning/metrics/pull/962)) - diff --git a/torchmetrics/classification/accuracy.py b/torchmetrics/classification/accuracy.py index 6ed0957838d..5af1f0c5787 100644 --- a/torchmetrics/classification/accuracy.py +++ b/torchmetrics/classification/accuracy.py @@ -126,12 +126,6 @@ class Accuracy(StatScores): ``preds = preds.flatten()`` and same for ``target``). Note that the ``top_k`` parameter still applies in both cases, if set. - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. - - .. deprecated:: v0.8 - Argument has no use anymore and will be removed v0.9. - kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Raises: @@ -175,7 +169,6 @@ def __init__( top_k: Optional[int] = None, multiclass: Optional[bool] = None, subset_accuracy: bool = False, - compute_on_step: Optional[bool] = None, **kwargs: Dict[str, Any], ) -> None: allowed_average = ["micro", "macro", "weighted", "samples", "none", None] @@ -190,7 +183,6 @@ def __init__( num_classes=num_classes, multiclass=multiclass, ignore_index=ignore_index, - compute_on_step=compute_on_step, **kwargs, ) diff --git a/torchmetrics/classification/auc.py b/torchmetrics/classification/auc.py index 0490de54e22..fb43bf525f0 100644 --- a/torchmetrics/classification/auc.py +++ b/torchmetrics/classification/auc.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List from torch import Tensor @@ -32,11 +32,6 @@ class AUC(Metric): reorder: AUC expects its first input to be sorted. If this is not the case, setting this argument to ``True`` will use a stable sorting algorithm to sort the input in descending order - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. - - .. deprecated:: v0.8 - Argument has no use anymore and will be removed v0.9. kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. """ @@ -47,10 +42,9 @@ class AUC(Metric): def __init__( self, reorder: bool = False, - compute_on_step: Optional[bool] = None, **kwargs: Dict[str, Any], ) -> None: - super().__init__(compute_on_step=compute_on_step, **kwargs) + super().__init__(**kwargs) self.reorder = reorder diff --git a/torchmetrics/classification/auroc.py b/torchmetrics/classification/auroc.py index 1f0ff2438cd..5f8e2ce59c3 100644 --- a/torchmetrics/classification/auroc.py +++ b/torchmetrics/classification/auroc.py @@ -63,11 +63,6 @@ class AUROC(Metric): max_fpr: If not ``None``, calculates standardized partial AUC over the range ``[0, max_fpr]``. Should be a float between 0 and 1. - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. - - .. deprecated:: v0.8 - Argument has no use anymore and will be removed v0.9. kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. @@ -113,10 +108,9 @@ def __init__( pos_label: Optional[int] = None, average: Optional[str] = "macro", max_fpr: Optional[float] = None, - compute_on_step: Optional[bool] = None, **kwargs: Dict[str, Any], ) -> None: - super().__init__(compute_on_step=compute_on_step, **kwargs) + super().__init__(**kwargs) self.num_classes = num_classes self.pos_label = pos_label diff --git a/torchmetrics/classification/avg_precision.py b/torchmetrics/classification/avg_precision.py index c57f8d0d96f..464d31b3865 100644 --- a/torchmetrics/classification/avg_precision.py +++ b/torchmetrics/classification/avg_precision.py @@ -57,12 +57,6 @@ class AveragePrecision(Metric): - ``'none'`` or ``None``: Calculate the metric for each class separately, and return the metric for every class. - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. - - .. deprecated:: v0.8 - Argument has no use anymore and will be removed v0.9. - kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example (binary case): @@ -93,10 +87,9 @@ def __init__( num_classes: Optional[int] = None, pos_label: Optional[int] = None, average: Optional[str] = "macro", - compute_on_step: Optional[bool] = None, **kwargs: Dict[str, Any], ) -> None: - super().__init__(compute_on_step=compute_on_step, **kwargs) + super().__init__(**kwargs) self.num_classes = num_classes self.pos_label = pos_label diff --git a/torchmetrics/classification/binned_precision_recall.py b/torchmetrics/classification/binned_precision_recall.py index 142d548efa9..29dddefea96 100644 --- a/torchmetrics/classification/binned_precision_recall.py +++ b/torchmetrics/classification/binned_precision_recall.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Dict, List, Tuple, Union import torch from torch import Tensor @@ -61,11 +61,6 @@ class BinnedPrecisionRecallCurve(Metric): thresholds: list or tensor with specific thresholds or a number of bins from linear sampling. It is used for computation will lead to more detailed curve and accurate estimates, but will be slower and consume more memory. - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. - - .. deprecated:: v0.8 - Argument has no use anymore and will be removed v0.9. kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. @@ -122,10 +117,9 @@ def __init__( self, num_classes: int, thresholds: Union[int, Tensor, List[float], None] = None, - compute_on_step: Optional[bool] = None, **kwargs: Dict[str, Any], ) -> None: - super().__init__(compute_on_step=compute_on_step, **kwargs) + super().__init__(**kwargs) self.num_classes = num_classes if isinstance(thresholds, int): @@ -203,11 +197,6 @@ class BinnedAveragePrecision(BinnedPrecisionRecallCurve): thresholds: list or tensor with specific thresholds or a number of bins from linear sampling. It is used for computation will lead to more detailed curve and accurate estimates, but will be slower and consume more memory - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. - - .. deprecated:: v0.8 - Argument has no use anymore and will be removed v0.9. kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. @@ -258,11 +247,6 @@ class BinnedRecallAtFixedPrecision(BinnedPrecisionRecallCurve): thresholds: list or tensor with specific thresholds or a number of bins from linear sampling. It is used for computation will lead to more detailed curve and accurate estimates, but will be slower and consume more memory - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. - - .. deprecated:: v0.8 - Argument has no use anymore and will be removed v0.9. kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. @@ -295,10 +279,9 @@ def __init__( num_classes: int, min_precision: float, thresholds: Union[int, Tensor, List[float], None] = None, - compute_on_step: Optional[bool] = None, **kwargs: Dict[str, Any], ) -> None: - super().__init__(num_classes=num_classes, thresholds=thresholds, compute_on_step=compute_on_step, **kwargs) + super().__init__(num_classes=num_classes, thresholds=thresholds, **kwargs) self.min_precision = min_precision def compute(self) -> Tuple[Tensor, Tensor]: # type: ignore diff --git a/torchmetrics/classification/calibration_error.py b/torchmetrics/classification/calibration_error.py index 3dd2a54b0ef..a6349ba4cb1 100644 --- a/torchmetrics/classification/calibration_error.py +++ b/torchmetrics/classification/calibration_error.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List import torch from torch import Tensor @@ -52,11 +52,6 @@ class CalibrationError(Metric): norm: Norm used to compare empirical and expected probability bins. Defaults to "l1", or Expected Calibration Error. debias: Applies debiasing term, only implemented for l2 norm. Defaults to True. - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. - - .. deprecated:: v0.8 - Argument has no use anymore and will be removed v0.9. kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. """ @@ -69,11 +64,10 @@ def __init__( self, n_bins: int = 15, norm: str = "l1", - compute_on_step: Optional[bool] = None, **kwargs: Dict[str, Any], ): - super().__init__(compute_on_step=compute_on_step, **kwargs) + super().__init__(**kwargs) if norm not in self.DISTANCES: raise ValueError(f"Norm {norm} is not supported. Please select from l1, l2, or max. ") diff --git a/torchmetrics/classification/cohen_kappa.py b/torchmetrics/classification/cohen_kappa.py index 7dcfd8dda3d..2370fb37e20 100644 --- a/torchmetrics/classification/cohen_kappa.py +++ b/torchmetrics/classification/cohen_kappa.py @@ -56,12 +56,6 @@ class labels. Threshold for transforming probability or logit predictions to binary ``(0,1)`` predictions, in the case of binary or multi-label inputs. Default value of ``0.5`` corresponds to input being probabilities. - compute_on_step: - Forward only calls ``update()`` and returns ``None`` if this is set to ``False``. - - .. deprecated:: v0.8 - Argument has no use anymore and will be removed v0.9. - kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example: @@ -82,10 +76,9 @@ def __init__( num_classes: int, weights: Optional[str] = None, threshold: float = 0.5, - compute_on_step: Optional[bool] = None, **kwargs: Dict[str, Any], ) -> None: - super().__init__(compute_on_step=compute_on_step, **kwargs) + super().__init__(**kwargs) self.num_classes = num_classes self.weights = weights self.threshold = threshold diff --git a/torchmetrics/classification/confusion_matrix.py b/torchmetrics/classification/confusion_matrix.py index 36c56f0eafe..52fabc344de 100644 --- a/torchmetrics/classification/confusion_matrix.py +++ b/torchmetrics/classification/confusion_matrix.py @@ -54,11 +54,6 @@ class ConfusionMatrix(Metric): of binary or multi-label inputs. Default value of ``0.5`` corresponds to input being probabilities. multilabel: determines if data is multilabel or not. - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. - - .. deprecated:: v0.8 - Argument has no use anymore and will be removed v0.9. kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. @@ -99,10 +94,9 @@ def __init__( normalize: Optional[str] = None, threshold: float = 0.5, multilabel: bool = False, - compute_on_step: Optional[bool] = None, **kwargs: Dict[str, Any], ) -> None: - super().__init__(compute_on_step=compute_on_step, **kwargs) + super().__init__(**kwargs) self.num_classes = num_classes self.normalize = normalize self.threshold = threshold diff --git a/torchmetrics/classification/f_beta.py b/torchmetrics/classification/f_beta.py index 32a98b7d912..4d1b5b799a0 100644 --- a/torchmetrics/classification/f_beta.py +++ b/torchmetrics/classification/f_beta.py @@ -102,12 +102,6 @@ class FBetaScore(StatScores): :ref:`documentation section ` for a more detailed explanation and examples. - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. - - .. deprecated:: v0.8 - Argument has no use anymore and will be removed v0.9. - kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Raises: @@ -135,7 +129,6 @@ def __init__( ignore_index: Optional[int] = None, top_k: Optional[int] = None, multiclass: Optional[bool] = None, - compute_on_step: Optional[bool] = None, **kwargs: Dict[str, Any], ) -> None: self.beta = beta @@ -151,7 +144,6 @@ def __init__( num_classes=num_classes, multiclass=multiclass, ignore_index=ignore_index, - compute_on_step=compute_on_step, **kwargs, ) @@ -237,12 +229,6 @@ class F1Score(FBetaScore): :ref:`documentation section ` for a more detailed explanation and examples. - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. - - .. deprecated:: v0.8 - Argument has no use anymore and will be removed v0.9. - kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. @@ -268,7 +254,6 @@ def __init__( ignore_index: Optional[int] = None, top_k: Optional[int] = None, multiclass: Optional[bool] = None, - compute_on_step: Optional[bool] = None, **kwargs: Dict[str, Any], ) -> None: super().__init__( @@ -280,6 +265,5 @@ def __init__( ignore_index=ignore_index, top_k=top_k, multiclass=multiclass, - compute_on_step=compute_on_step, **kwargs, ) diff --git a/torchmetrics/classification/hamming.py b/torchmetrics/classification/hamming.py index 999315ab7fe..58c4975a35d 100644 --- a/torchmetrics/classification/hamming.py +++ b/torchmetrics/classification/hamming.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict, Optional +from typing import Any, Dict import torch from torch import Tensor, tensor @@ -40,11 +40,6 @@ class HammingDistance(Metric): threshold: Threshold for transforming probability or logit predictions to binary ``(0,1)`` predictions, in the case of binary or multi-label inputs. Default value of ``0.5`` corresponds to input being probabilities. - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. - - .. deprecated:: v0.8 - Argument has no use anymore and will be removed v0.9. kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. @@ -69,10 +64,9 @@ class HammingDistance(Metric): def __init__( self, threshold: float = 0.5, - compute_on_step: Optional[bool] = None, **kwargs: Dict[str, Any], ) -> None: - super().__init__(compute_on_step=compute_on_step, **kwargs) + super().__init__(**kwargs) self.add_state("correct", default=tensor(0), dist_reduce_fx="sum") self.add_state("total", default=tensor(0), dist_reduce_fx="sum") diff --git a/torchmetrics/classification/hinge.py b/torchmetrics/classification/hinge.py index d374aaf3af9..53bfd5f7c6f 100644 --- a/torchmetrics/classification/hinge.py +++ b/torchmetrics/classification/hinge.py @@ -54,11 +54,6 @@ class HingeLoss(Metric): Which approach to use for multi-class inputs (has no effect in the binary case). ``None`` (default), ``MulticlassMode.CRAMMER_SINGER`` or ``"crammer-singer"``, uses the Crammer Singer multi-class hinge loss. ``MulticlassMode.ONE_VS_ALL`` or ``"one-vs-all"`` computes the hinge loss in a one-vs-all fashion. - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. - - .. deprecated:: v0.8 - Argument has no use anymore and will be removed v0.9. kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. @@ -101,10 +96,9 @@ def __init__( self, squared: bool = False, multiclass_mode: Optional[Union[str, MulticlassMode]] = None, - compute_on_step: Optional[bool] = None, **kwargs: Dict[str, Any], ) -> None: - super().__init__(compute_on_step=compute_on_step, **kwargs) + super().__init__(**kwargs) self.add_state("measure", default=tensor(0.0), dist_reduce_fx="sum") self.add_state("total", default=tensor(0), dist_reduce_fx="sum") diff --git a/torchmetrics/classification/jaccard.py b/torchmetrics/classification/jaccard.py index c3a278e64ef..6a9074ab8bf 100644 --- a/torchmetrics/classification/jaccard.py +++ b/torchmetrics/classification/jaccard.py @@ -59,12 +59,6 @@ class JaccardIndex(ConfusionMatrix): - ``'sum'``: takes the sum - ``'none'``: no reduction will be applied - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. - - .. deprecated:: v0.8 - Argument has no use anymore and will be removed v0.9. - kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Example: @@ -88,7 +82,6 @@ def __init__( threshold: float = 0.5, multilabel: bool = False, reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", - compute_on_step: Optional[bool] = None, **kwargs: Dict[str, Any], ) -> None: super().__init__( @@ -96,7 +89,6 @@ def __init__( normalize=None, threshold=threshold, multilabel=multilabel, - compute_on_step=compute_on_step, **kwargs, ) self.reduction = reduction diff --git a/torchmetrics/classification/kl_divergence.py b/torchmetrics/classification/kl_divergence.py index f04d1da796f..10877b7a2c2 100644 --- a/torchmetrics/classification/kl_divergence.py +++ b/torchmetrics/classification/kl_divergence.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict, Optional +from typing import Any, Dict import torch from torch import Tensor @@ -44,12 +44,6 @@ class KLDivergence(Metric): - ``'sum'``: Sum score across samples - ``'none'`` or ``None``: Returns score per sample - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. - - .. deprecated:: v0.8 - Argument has no use anymore and will be removed v0.9. - kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. @@ -81,10 +75,9 @@ def __init__( self, log_prob: bool = False, reduction: Literal["mean", "sum", "none", None] = "mean", - compute_on_step: Optional[bool] = None, **kwargs: Dict[str, Any], ) -> None: - super().__init__(compute_on_step=compute_on_step, **kwargs) + super().__init__(**kwargs) if not isinstance(log_prob, bool): raise TypeError(f"Expected argument `log_prob` to be bool but got {log_prob}") self.log_prob = log_prob diff --git a/torchmetrics/classification/matthews_corrcoef.py b/torchmetrics/classification/matthews_corrcoef.py index 5e36e1d5acc..15b2c4b6c8a 100644 --- a/torchmetrics/classification/matthews_corrcoef.py +++ b/torchmetrics/classification/matthews_corrcoef.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict, Optional +from typing import Any, Dict import torch from torch import Tensor @@ -51,11 +51,6 @@ class MatthewsCorrCoef(Metric): Args: num_classes: Number of classes in the dataset. threshold: Threshold value for binary or multi-label probabilites. - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. - - .. deprecated:: v0.8 - Argument has no use anymore and will be removed v0.9. kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. @@ -76,10 +71,9 @@ def __init__( self, num_classes: int, threshold: float = 0.5, - compute_on_step: Optional[bool] = None, **kwargs: Dict[str, Any], ) -> None: - super().__init__(compute_on_step=compute_on_step, **kwargs) + super().__init__(**kwargs) self.num_classes = num_classes self.threshold = threshold diff --git a/torchmetrics/classification/precision_recall.py b/torchmetrics/classification/precision_recall.py index 86ce9f88e03..0c2dc9a9c71 100644 --- a/torchmetrics/classification/precision_recall.py +++ b/torchmetrics/classification/precision_recall.py @@ -89,12 +89,6 @@ class Precision(StatScores): :ref:`documentation section ` for a more detailed explanation and examples. - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. - - .. deprecated:: v0.8 - Argument has no use anymore and will be removed v0.9. - kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Raises: @@ -126,7 +120,6 @@ def __init__( ignore_index: Optional[int] = None, top_k: Optional[int] = None, multiclass: Optional[bool] = None, - compute_on_step: Optional[bool] = None, **kwargs: Dict[str, Any], ) -> None: allowed_average = ["micro", "macro", "weighted", "samples", "none", None] @@ -141,7 +134,6 @@ def __init__( num_classes=num_classes, multiclass=multiclass, ignore_index=ignore_index, - compute_on_step=compute_on_step, **kwargs, ) @@ -231,12 +223,6 @@ class Recall(StatScores): :ref:`documentation section ` for a more detailed explanation and examples. - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. - - .. deprecated:: v0.8 - Argument has no use anymore and will be removed v0.9. - kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Raises: @@ -268,7 +254,6 @@ def __init__( ignore_index: Optional[int] = None, top_k: Optional[int] = None, multiclass: Optional[bool] = None, - compute_on_step: Optional[bool] = None, **kwargs: Dict[str, Any], ) -> None: allowed_average = ["micro", "macro", "weighted", "samples", "none", None] @@ -283,7 +268,6 @@ def __init__( num_classes=num_classes, multiclass=multiclass, ignore_index=ignore_index, - compute_on_step=compute_on_step, **kwargs, ) diff --git a/torchmetrics/classification/precision_recall_curve.py b/torchmetrics/classification/precision_recall_curve.py index e9dd0c82a85..2ec1b7a9717 100644 --- a/torchmetrics/classification/precision_recall_curve.py +++ b/torchmetrics/classification/precision_recall_curve.py @@ -42,11 +42,6 @@ class PrecisionRecallCurve(Metric): pos_label: integer determining the positive class. Default is ``None`` which for binary problem is translated to 1. For multiclass problems this argument should not be set as we iteratively change it in the range ``[0, num_classes-1]`` - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. - - .. deprecated:: v0.8 - Argument has no use anymore and will be removed v0.9. kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. @@ -88,10 +83,9 @@ def __init__( self, num_classes: Optional[int] = None, pos_label: Optional[int] = None, - compute_on_step: Optional[bool] = None, **kwargs: Dict[str, Any], ) -> None: - super().__init__(compute_on_step=compute_on_step, **kwargs) + super().__init__(**kwargs) self.num_classes = num_classes self.pos_label = pos_label diff --git a/torchmetrics/classification/roc.py b/torchmetrics/classification/roc.py index fd5e163a7c0..f8fcc4c1da6 100644 --- a/torchmetrics/classification/roc.py +++ b/torchmetrics/classification/roc.py @@ -43,11 +43,6 @@ class ROC(Metric): pos_label: integer determining the positive class. Default is ``None`` which for binary problem is translated to 1. For multiclass problems this argument should not be set as we iteratively change it in the range ``[0,num_classes-1]`` - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. - - .. deprecated:: v0.8 - Argument has no use anymore and will be removed v0.9. kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. @@ -112,10 +107,9 @@ def __init__( self, num_classes: Optional[int] = None, pos_label: Optional[int] = None, - compute_on_step: Optional[bool] = None, **kwargs: Dict[str, Any], ) -> None: - super().__init__(compute_on_step=compute_on_step, **kwargs) + super().__init__(**kwargs) self.num_classes = num_classes self.pos_label = pos_label diff --git a/torchmetrics/classification/specificity.py b/torchmetrics/classification/specificity.py index f2e4dfe80aa..4de7400b2cb 100644 --- a/torchmetrics/classification/specificity.py +++ b/torchmetrics/classification/specificity.py @@ -92,12 +92,6 @@ class Specificity(StatScores): :ref:`documentation section ` for a more detailed explanation and examples. - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. - - .. deprecated:: v0.8 - Argument has no use anymore and will be removed v0.9. - kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Raises: @@ -128,7 +122,6 @@ def __init__( ignore_index: Optional[int] = None, top_k: Optional[int] = None, multiclass: Optional[bool] = None, - compute_on_step: Optional[bool] = None, **kwargs: Dict[str, Any], ) -> None: allowed_average = ["micro", "macro", "weighted", "samples", "none", None] @@ -143,7 +136,6 @@ def __init__( num_classes=num_classes, multiclass=multiclass, ignore_index=ignore_index, - compute_on_step=compute_on_step, **kwargs, ) diff --git a/torchmetrics/classification/stat_scores.py b/torchmetrics/classification/stat_scores.py index 272ad104ab5..d9866671089 100644 --- a/torchmetrics/classification/stat_scores.py +++ b/torchmetrics/classification/stat_scores.py @@ -83,12 +83,6 @@ class StatScores(Metric): :ref:`documentation section ` for a more detailed explanation and examples. - compute_on_step: - Forward only calls ``update()`` and returns None if this is set to False. - - .. deprecated:: v0.8 - Argument has no use anymore and will be removed v0.9. - kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Raises: @@ -132,10 +126,9 @@ def __init__( ignore_index: Optional[int] = None, mdmc_reduce: Optional[str] = None, multiclass: Optional[bool] = None, - compute_on_step: Optional[bool] = None, **kwargs: Dict[str, Any], ) -> None: - super().__init__(compute_on_step=compute_on_step, **kwargs) + super().__init__(**kwargs) self.reduce = reduce self.mdmc_reduce = mdmc_reduce