Skip to content

Commit

Permalink
add deprecation warnings to modules
Browse files Browse the repository at this point in the history
  • Loading branch information
SkafteNicki authored and Borda committed Sep 13, 2022
1 parent acda9ff commit b16607c
Show file tree
Hide file tree
Showing 16 changed files with 337 additions and 48 deletions.
17 changes: 17 additions & 0 deletions src/torchmetrics/classification/accuracy.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.enums import AverageMethod, DataType
from torchmetrics.utilities.prints import rank_zero_warn

from torchmetrics.classification.stat_scores import ( # isort:skip
StatScores,
Expand Down Expand Up @@ -325,6 +326,13 @@ def compute(self) -> Tensor:

class Accuracy(StatScores):
r"""
.. note::
From v0.10 an `'binary_*'`, `'multiclass_*', `'multilabel_*'` version now exist of each classification
metric. Moving forward we recommend using these versions. This base metric will still work as it did
prior to v0.10 until v0.11. From v0.11 the `task` argument introduced in this metric will be required
and the general order of arguments may change, such that this metric will just function as an single
entrypoint to calling the three specialized versions.
Computes Accuracy_:
.. math::
Expand Down Expand Up @@ -488,6 +496,15 @@ def __new__(
raise ValueError(
f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
)
else:
rank_zero_warn(
"From v0.10 an `'Binary*'`, `'Multiclass*', `'Multilabel*'` version now exist of each classification"
" metric. Moving forward we recommend using these versions. This base metric will still work as it did"
" prior to v0.10 until v0.11. From v0.11 the `task` argument introduced in this metric will be required"
" and the general order of arguments may change, such that this metric will just function as an single"
" entrypoint to calling the three specialized versions.",
DeprecationWarning,
)
return super().__new__(cls)

def __init__(
Expand Down
20 changes: 19 additions & 1 deletion src/torchmetrics/classification/auroc.py
Original file line number Diff line number Diff line change
Expand Up @@ -324,7 +324,16 @@ def compute(self) -> Tensor:


class AUROC(Metric):
r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_).
r"""
.. note::
From v0.10 an `'binary_*'`, `'multiclass_*', `'multilabel_*'` version now exist of each classification
metric. Moving forward we recommend using these versions. This base metric will still work as it did
prior to v0.10 until v0.11. From v0.11 the `task` argument introduced in this metric will be required
and the general order of arguments may change, such that this metric will just function as an single
entrypoint to calling the three specialized versions.
Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_).
Works for both binary, multilabel and multiclass problems. In the case of
multiclass, the values will be calculated based on a one-vs-the-rest approach.
Expand Down Expand Up @@ -428,6 +437,15 @@ def __new__(
raise ValueError(
f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
)
else:
rank_zero_warn(
"From v0.10 an `'Binary*'`, `'Multiclass*', `'Multilabel*'` version now exist of each classification"
" metric. Moving forward we recommend using these versions. This base metric will still work as it did"
" prior to v0.10 until v0.11. From v0.11 the `task` argument introduced in this metric will be required"
" and the general order of arguments may change, such that this metric will just function as an single"
" entrypoint to calling the three specialized versions.",
DeprecationWarning,
)
return super().__new__(cls)

def __init__(
Expand Down
19 changes: 18 additions & 1 deletion src/torchmetrics/classification/average_precision.py
Original file line number Diff line number Diff line change
Expand Up @@ -323,7 +323,15 @@ def compute(self) -> Tensor:


class AveragePrecision(Metric):
"""Computes the average precision score, which summarises the precision recall curve into one number. Works for
r"""
.. note::
From v0.10 an `'binary_*'`, `'multiclass_*', `'multilabel_*'` version now exist of each classification
metric. Moving forward we recommend using these versions. This base metric will still work as it did
prior to v0.10 until v0.11. From v0.11 the `task` argument introduced in this metric will be required
and the general order of arguments may change, such that this metric will just function as an single
entrypoint to calling the three specialized versions.
Computes the average precision score, which summarises the precision recall curve into one number. Works for
both binary and multiclass problems. In the case of multiclass, the values will be calculated based on a one-
vs-the-rest approach.
Expand Down Expand Up @@ -406,6 +414,15 @@ def __new__(
raise ValueError(
f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
)
else:
rank_zero_warn(
"From v0.10 an `'Binary*'`, `'Multiclass*', `'Multilabel*'` version now exist of each classification"
" metric. Moving forward we recommend using these versions. This base metric will still work as it did"
" prior to v0.10 until v0.11. From v0.11 the `task` argument introduced in this metric will be required"
" and the general order of arguments may change, such that this metric will just function as an single"
" entrypoint to calling the three specialized versions.",
DeprecationWarning,
)
return super().__new__(cls)

def __init__(
Expand Down
20 changes: 19 additions & 1 deletion src/torchmetrics/classification/calibration_error.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.prints import rank_zero_warn


class BinaryCalibrationError(Metric):
Expand Down Expand Up @@ -222,7 +223,15 @@ def compute(self) -> Tensor:


class CalibrationError(Metric):
r"""`Computes the Top-label Calibration Error`_
r"""
.. note::
From v0.10 an `'binary_*'`, `'multiclass_*', `'multilabel_*'` version now exist of each classification
metric. Moving forward we recommend using these versions. This base metric will still work as it did
prior to v0.10 until v0.11. From v0.11 the `task` argument introduced in this metric will be required
and the general order of arguments may change, such that this metric will just function as an single
entrypoint to calling the three specialized versions.
`Computes the Top-label Calibration Error`_
Three different norms are implemented, each corresponding to variations on the calibration error metric.
L1 norm (Expected Calibration Error)
Expand Down Expand Up @@ -282,6 +291,15 @@ def __new__(
raise ValueError(
f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
)
else:
rank_zero_warn(
"From v0.10 an `'Binary*'`, `'Multiclass*', `'Multilabel*'` version now exist of each classification"
" metric. Moving forward we recommend using these versions. This base metric will still work as it did"
" prior to v0.10 until v0.11. From v0.11 the `task` argument introduced in this metric will be required"
" and the general order of arguments may change, such that this metric will just function as an single"
" entrypoint to calling the three specialized versions.",
DeprecationWarning,
)
return super().__new__(cls)

def __init__(
Expand Down
20 changes: 19 additions & 1 deletion src/torchmetrics/classification/cohen_kappa.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
_multiclass_cohen_kappa_arg_validation,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.prints import rank_zero_warn


class BinaryCohenKappa(BinaryConfusionMatrix):
Expand Down Expand Up @@ -182,7 +183,15 @@ def compute(self) -> Tensor:


class CohenKappa(Metric):
r"""Calculates `Cohen's kappa score`_ that measures inter-annotator agreement. It is defined as
r"""
.. note::
From v0.10 an `'binary_*'`, `'multiclass_*', `'multilabel_*'` version now exist of each classification
metric. Moving forward we recommend using these versions. This base metric will still work as it did
prior to v0.10 until v0.11. From v0.11 the `task` argument introduced in this metric will be required
and the general order of arguments may change, such that this metric will just function as an single
entrypoint to calling the three specialized versions.
Calculates `Cohen's kappa score`_ that measures inter-annotator agreement. It is defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
Expand Down Expand Up @@ -253,6 +262,15 @@ def __new__(
raise ValueError(
f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
)
else:
rank_zero_warn(
"From v0.10 an `'Binary*'`, `'Multiclass*', `'Multilabel*'` version now exist of each classification"
" metric. Moving forward we recommend using these versions. This base metric will still work as it did"
" prior to v0.10 until v0.11. From v0.11 the `task` argument introduced in this metric will be required"
" and the general order of arguments may change, such that this metric will just function as an single"
" entrypoint to calling the three specialized versions.",
DeprecationWarning,
)
return super().__new__(cls)

def __init__(
Expand Down
20 changes: 19 additions & 1 deletion src/torchmetrics/classification/confusion_matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
_multilabel_confusion_matrix_update,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.prints import rank_zero_warn


class BinaryConfusionMatrix(Metric):
Expand Down Expand Up @@ -318,7 +319,15 @@ def compute(self) -> Tensor:


class ConfusionMatrix(Metric):
r"""Computes the `confusion matrix`_.
r"""
.. note::
From v0.10 an `'binary_*'`, `'multiclass_*', `'multilabel_*'` version now exist of each classification
metric. Moving forward we recommend using these versions. This base metric will still work as it did
prior to v0.10 until v0.11. From v0.11 the `task` argument introduced in this metric will be required
and the general order of arguments may change, such that this metric will just function as an single
entrypoint to calling the three specialized versions.
Computes the `confusion matrix`_.
Works with binary, multiclass, and multilabel data. Accepts probabilities or logits from a model output
or integer class values in prediction. Works with multi-dimensional preds and target, but it should be noted that
Expand Down Expand Up @@ -412,6 +421,15 @@ def __new__(
raise ValueError(
f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
)
else:
rank_zero_warn(
"From v0.10 an `'Binary*'`, `'Multiclass*', `'Multilabel*'` version now exist of each classification"
" metric. Moving forward we recommend using these versions. This base metric will still work as it did"
" prior to v0.10 until v0.11. From v0.11 the `task` argument introduced in this metric will be required"
" and the general order of arguments may change, such that this metric will just function as an single"
" entrypoint to calling the three specialized versions.",
DeprecationWarning,
)
return super().__new__(cls)

def __init__(
Expand Down
39 changes: 37 additions & 2 deletions src/torchmetrics/classification/f_beta.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.enums import AverageMethod
from torchmetrics.utilities.prints import rank_zero_warn


class BinaryFBetaScore(BinaryStatScores):
Expand Down Expand Up @@ -716,7 +717,15 @@ def __init__(


class FBetaScore(StatScores):
r"""Computes `F-score`_, specifically:
r"""
.. note::
From v0.10 an `'binary_*'`, `'multiclass_*', `'multilabel_*'` version now exist of each classification
metric. Moving forward we recommend using these versions. This base metric will still work as it did
prior to v0.10 until v0.11. From v0.11 the `task` argument introduced in this metric will be required
and the general order of arguments may change, such that this metric will just function as an single
entrypoint to calling the three specialized versions.
Computes `F-score`_, specifically:
.. math::
F_\beta = (1 + \beta^2) * \frac{\text{precision} * \text{recall}}
Expand Down Expand Up @@ -848,6 +857,15 @@ def __new__(
raise ValueError(
f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
)
else:
rank_zero_warn(
"From v0.10 an `'Binary*'`, `'Multiclass*', `'Multilabel*'` version now exist of each classification"
" metric. Moving forward we recommend using these versions. This base metric will still work as it did"
" prior to v0.10 until v0.11. From v0.11 the `task` argument introduced in this metric will be required"
" and the general order of arguments may change, such that this metric will just function as an single"
" entrypoint to calling the three specialized versions.",
DeprecationWarning,
)
return super().__new__(cls)

def __init__(
Expand Down Expand Up @@ -891,7 +909,15 @@ def compute(self) -> Tensor:


class F1Score(FBetaScore):
"""Computes F1 metric.
r"""
.. note::
From v0.10 an `'binary_*'`, `'multiclass_*', `'multilabel_*'` version now exist of each classification
metric. Moving forward we recommend using these versions. This base metric will still work as it did
prior to v0.10 until v0.11. From v0.11 the `task` argument introduced in this metric will be required
and the general order of arguments may change, such that this metric will just function as an single
entrypoint to calling the three specialized versions.
Computes F1 metric.
F1 metrics correspond to a harmonic mean of the precision and recall scores.
Works with binary, multiclass, and multilabel data. Accepts logits or probabilities from a model
Expand Down Expand Up @@ -1013,6 +1039,15 @@ def __new__(
raise ValueError(
f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
)
else:
rank_zero_warn(
"From v0.10 an `'Binary*'`, `'Multiclass*', `'Multilabel*'` version now exist of each classification"
" metric. Moving forward we recommend using these versions. This base metric will still work as it did"
" prior to v0.10 until v0.11. From v0.11 the `task` argument introduced in this metric will be required"
" and the general order of arguments may change, such that this metric will just function as an single"
" entrypoint to calling the three specialized versions.",
DeprecationWarning,
)
return super().__new__(cls)

def __init__(
Expand Down
20 changes: 19 additions & 1 deletion src/torchmetrics/classification/hamming.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
_hamming_distance_update,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.prints import rank_zero_warn


class BinaryHammingDistance(BinaryStatScores):
Expand Down Expand Up @@ -318,7 +319,15 @@ def compute(self) -> Tensor:


class HammingDistance(Metric):
r"""Computes the average `Hamming distance`_ (also known as Hamming loss) between targets and predictions:
r"""
.. note::
From v0.10 an `'binary_*'`, `'multiclass_*', `'multilabel_*'` version now exist of each classification
metric. Moving forward we recommend using these versions. This base metric will still work as it did
prior to v0.10 until v0.11. From v0.11 the `task` argument introduced in this metric will be required
and the general order of arguments may change, such that this metric will just function as an single
entrypoint to calling the three specialized versions.
Computes the average `Hamming distance`_ (also known as Hamming loss) between targets and predictions:
.. math::
\text{Hamming distance} = \frac{1}{N \cdot L}\sum_i^N \sum_l^L 1(y_{il} \neq \hat{y_{il}})
Expand Down Expand Up @@ -389,6 +398,15 @@ def __new__(
raise ValueError(
f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
)
else:
rank_zero_warn(
"From v0.10 an `'Binary*'`, `'Multiclass*', `'Multilabel*'` version now exist of each classification"
" metric. Moving forward we recommend using these versions. This base metric will still work as it did"
" prior to v0.10 until v0.11. From v0.11 the `task` argument introduced in this metric will be required"
" and the general order of arguments may change, such that this metric will just function as an single"
" entrypoint to calling the three specialized versions.",
DeprecationWarning,
)
return super().__new__(cls)

def __init__(
Expand Down
20 changes: 19 additions & 1 deletion src/torchmetrics/classification/hinge.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
_multiclass_hinge_loss_update,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.prints import rank_zero_warn


class BinaryHingeLoss(Metric):
Expand Down Expand Up @@ -205,7 +206,15 @@ def compute(self) -> Tensor:


class HingeLoss(Metric):
r"""Computes the mean `Hinge loss`_, typically used for Support Vector Machines (SVMs).
r"""
.. note::
From v0.10 an `'binary_*'`, `'multiclass_*', `'multilabel_*'` version now exist of each classification
metric. Moving forward we recommend using these versions. This base metric will still work as it did
prior to v0.10 until v0.11. From v0.11 the `task` argument introduced in this metric will be required
and the general order of arguments may change, such that this metric will just function as an single
entrypoint to calling the three specialized versions.
Computes the mean `Hinge loss`_, typically used for Support Vector Machines (SVMs).
In the binary case it is defined as:
Expand Down Expand Up @@ -299,6 +308,15 @@ def __new__(
raise ValueError(
f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
)
else:
rank_zero_warn(
"From v0.10 an `'Binary*'`, `'Multiclass*', `'Multilabel*'` version now exist of each classification"
" metric. Moving forward we recommend using these versions. This base metric will still work as it did"
" prior to v0.10 until v0.11. From v0.11 the `task` argument introduced in this metric will be required"
" and the general order of arguments may change, such that this metric will just function as an single"
" entrypoint to calling the three specialized versions.",
DeprecationWarning,
)
return super().__new__(cls)

def __init__(
Expand Down
Loading

0 comments on commit b16607c

Please sign in to comment.