Skip to content

Commit

Permalink
Merge branch 'master' into cleanup/compute_on_step_detection
Browse files Browse the repository at this point in the history
  • Loading branch information
Borda authored May 5, 2022
2 parents eb96614 + a39f3e7 commit b8d1923
Show file tree
Hide file tree
Showing 14 changed files with 26 additions and 114 deletions.
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Removed deprecated `compute_on_step` argument in detection ([#1005](https://github.com/PyTorchLightning/metrics/pull/1005))


- Removed deprecated `compute_on_step` argument in text ([#1004](https://github.com/PyTorchLightning/metrics/pull/1004))


- Removed deprecated `compute_on_step` argument in audio ([#1007](https://github.com/PyTorchLightning/metrics/pull/1007))


Expand Down
9 changes: 1 addition & 8 deletions torchmetrics/text/bert.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,12 +82,6 @@ class BERTScore(Metric):
of the files from `BERT_score`_.
baseline_path: A path to the user's own local csv/tsv file with the baseline scale.
baseline_url: A url path to the user's own csv/tsv file with the baseline scale.
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Returns:
Expand Down Expand Up @@ -131,10 +125,9 @@ def __init__(
rescale_with_baseline: bool = False,
baseline_path: Optional[str] = None,
baseline_url: Optional[str] = None,
compute_on_step: Optional[bool] = None,
**kwargs: Dict[str, Any],
):
super().__init__(compute_on_step=compute_on_step, **kwargs)
super().__init__(**kwargs)
self.model_name_or_path = model_name_or_path or _DEFAULT_MODEL
self.num_layers = num_layers
self.all_layers = all_layers
Expand Down
11 changes: 2 additions & 9 deletions torchmetrics/text/bleu.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
# Authors: torchtext authors and @sluks
# Date: 2020-07-18
# Link: https://pytorch.org/text/_modules/torchtext/data/metrics.html#bleu_score
from typing import Any, Dict, Optional, Sequence
from typing import Any, Dict, Sequence

import torch
from torch import Tensor, tensor
Expand All @@ -31,12 +31,6 @@ class BLEUScore(Metric):
Args:
n_gram: Gram value ranged from 1 to 4
smooth: Whether or not to apply smoothing, see [2]
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
Expand Down Expand Up @@ -66,10 +60,9 @@ def __init__(
self,
n_gram: int = 4,
smooth: bool = False,
compute_on_step: Optional[bool] = None,
**kwargs: Dict[str, Any],
):
super().__init__(compute_on_step=compute_on_step, **kwargs)
super().__init__(**kwargs)
self.n_gram = n_gram
self.smooth = smooth

Expand Down
11 changes: 2 additions & 9 deletions torchmetrics/text/cer.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import Any, Dict, List, Optional, Union
from typing import Any, Dict, List, Union

import torch
from torch import Tensor, tensor
Expand Down Expand Up @@ -43,12 +43,6 @@ class CharErrorRate(Metric):
Compute CharErrorRate score of transcribed segments against references.
Args:
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Returns:
Expand All @@ -68,10 +62,9 @@ class CharErrorRate(Metric):

def __init__(
self,
compute_on_step: Optional[bool] = None,
**kwargs: Dict[str, Any],
):
super().__init__(compute_on_step=compute_on_step, **kwargs)
super().__init__(**kwargs)
self.add_state("errors", tensor(0, dtype=torch.float), dist_reduce_fx="sum")
self.add_state("total", tensor(0, dtype=torch.float), dist_reduce_fx="sum")

Expand Down
9 changes: 1 addition & 8 deletions torchmetrics/text/chrf.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,12 +58,6 @@ class CHRFScore(Metric):
lowercase: An indication whether to enable case-insesitivity.
whitespace: An indication whether keep whitespaces during n-gram extraction.
return_sentence_level_score: An indication whether a sentence-level chrF/chrF++ score to be returned.
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
Expand Down Expand Up @@ -100,10 +94,9 @@ def __init__(
lowercase: bool = False,
whitespace: bool = False,
return_sentence_level_score: bool = False,
compute_on_step: Optional[bool] = None,
**kwargs: Dict[str, Any],
):
super().__init__(compute_on_step=compute_on_step, **kwargs)
super().__init__(**kwargs)

if not isinstance(n_char_order, int) or n_char_order < 1:
raise ValueError("Expected argument `n_char_order` to be an integer greater than or equal to 1.")
Expand Down
11 changes: 2 additions & 9 deletions torchmetrics/text/eed.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
from typing import Any, Dict, List, Sequence, Tuple, Union

from torch import Tensor, stack
from typing_extensions import Literal
Expand All @@ -33,12 +33,6 @@ class ExtendedEditDistance(Metric):
rho: coverage cost, penalty for repetition of characters
deletion: penalty for deletion of character
insertion: penalty for insertion or substitution of character
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Return:
Expand Down Expand Up @@ -69,10 +63,9 @@ def __init__(
rho: float = 0.3,
deletion: float = 0.2,
insertion: float = 1.0,
compute_on_step: Optional[bool] = None,
**kwargs: Dict[str, Any],
):
super().__init__(compute_on_step=compute_on_step, **kwargs)
super().__init__(**kwargs)

if language not in ("en", "ja"):
raise ValueError(f"Expected argument `language` to either be `en` or `ja` but got {language}")
Expand Down
11 changes: 2 additions & 9 deletions torchmetrics/text/mer.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import Any, Dict, List, Optional, Union
from typing import Any, Dict, List, Union

import torch
from torch import Tensor, tensor
Expand Down Expand Up @@ -40,12 +40,6 @@ class MatchErrorRate(Metric):
Args:
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Returns:
Expand All @@ -65,10 +59,9 @@ class MatchErrorRate(Metric):

def __init__(
self,
compute_on_step: Optional[bool] = None,
**kwargs: Dict[str, Any],
):
super().__init__(compute_on_step=compute_on_step, **kwargs)
super().__init__(**kwargs)
self.add_state("errors", tensor(0, dtype=torch.float), dist_reduce_fx="sum")
self.add_state("total", tensor(0, dtype=torch.float), dist_reduce_fx="sum")

Expand Down
11 changes: 2 additions & 9 deletions torchmetrics/text/rouge.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
from typing import Any, Callable, Dict, List, Sequence, Tuple, Union

from torch import Tensor
from typing_extensions import Literal
Expand Down Expand Up @@ -49,12 +49,6 @@ class ROUGEScore(Metric):
rouge_keys: A list of rouge types to calculate.
Keys that are allowed are ``rougeL``, ``rougeLsum``, and ``rouge1`` through ``rouge9``.
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
Expand Down Expand Up @@ -97,10 +91,9 @@ def __init__(
tokenizer: Callable[[str], Sequence[str]] = None,
accumulate: Literal["avg", "best"] = "best",
rouge_keys: Union[str, Tuple[str, ...]] = ("rouge1", "rouge2", "rougeL", "rougeLsum"), # type: ignore
compute_on_step: Optional[bool] = None,
**kwargs: Dict[str, Any],
):
super().__init__(compute_on_step=compute_on_step, **kwargs)
super().__init__(**kwargs)
if use_stemmer or "rougeLsum" in rouge_keys:
if not _NLTK_AVAILABLE:
raise ModuleNotFoundError(
Expand Down
11 changes: 2 additions & 9 deletions torchmetrics/text/sacre_bleu.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
# Authors: torchtext authors and @sluks
# Date: 2020-07-18
# Link: https://pytorch.org/text/_modules/torchtext/data/metrics.html#bleu_score
from typing import Any, Dict, Optional, Sequence
from typing import Any, Dict, Sequence

from typing_extensions import Literal

Expand All @@ -41,12 +41,6 @@ class SacreBLEUScore(BLEUScore):
tokenize: Tokenization technique to be used.
Supported tokenization: ``['none', '13a', 'zh', 'intl', 'char']``
lowercase: If ``True``, BLEU score over lowercased text is calculated.
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
Expand Down Expand Up @@ -80,10 +74,9 @@ def __init__(
smooth: bool = False,
tokenize: Literal["none", "13a", "zh", "intl", "char"] = "13a",
lowercase: bool = False,
compute_on_step: Optional[bool] = None,
**kwargs: Dict[str, Any],
):
super().__init__(n_gram=n_gram, smooth=smooth, compute_on_step=compute_on_step, **kwargs)
super().__init__(n_gram=n_gram, smooth=smooth, **kwargs)
if tokenize not in AVAILABLE_TOKENIZERS:
raise ValueError(f"Argument `tokenize` expected to be one of {AVAILABLE_TOKENIZERS} but got {tokenize}.")

Expand Down
11 changes: 2 additions & 9 deletions torchmetrics/text/squad.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Optional
from typing import Any, Dict

import torch
from torch import Tensor
Expand All @@ -31,12 +31,6 @@ class SQuAD(Metric):
Answering Dataset (SQuAD).
Args:
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
Expand All @@ -61,10 +55,9 @@ class SQuAD(Metric):

def __init__(
self,
compute_on_step: Optional[bool] = None,
**kwargs: Dict[str, Any],
):
super().__init__(compute_on_step=compute_on_step, **kwargs)
super().__init__(**kwargs)

self.add_state(name="f1_score", default=torch.tensor(0, dtype=torch.float), dist_reduce_fx="sum")
self.add_state(name="exact_match", default=torch.tensor(0, dtype=torch.float), dist_reduce_fx="sum")
Expand Down
9 changes: 1 addition & 8 deletions torchmetrics/text/ter.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,6 @@ class TranslationEditRate(Metric):
lowercase: An indication whether to enable case-insesitivity.
asian_support: An indication whether asian characters to be processed.
return_sentence_level_score: An indication whether a sentence-level TER to be returned.
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
Expand Down Expand Up @@ -67,10 +61,9 @@ def __init__(
lowercase: bool = True,
asian_support: bool = False,
return_sentence_level_score: bool = False,
compute_on_step: Optional[bool] = None,
**kwargs: Dict[str, Any],
):
super().__init__(compute_on_step=compute_on_step, **kwargs)
super().__init__(**kwargs)
if not isinstance(normalize, bool):
raise ValueError(f"Expected argument `normalize` to be of type boolean but got {normalize}.")
if not isinstance(no_punctuation, bool):
Expand Down
11 changes: 2 additions & 9 deletions torchmetrics/text/wer.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional, Union
from typing import Any, Dict, List, Union

import torch
from torch import Tensor, tensor
Expand Down Expand Up @@ -39,12 +39,6 @@ class WordErrorRate(Metric):
Compute WER score of transcribed segments against references.
Args:
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Returns:
Expand All @@ -64,10 +58,9 @@ class WordErrorRate(Metric):

def __init__(
self,
compute_on_step: Optional[bool] = None,
**kwargs: Dict[str, Any],
):
super().__init__(compute_on_step=compute_on_step, **kwargs)
super().__init__(**kwargs)
self.add_state("errors", tensor(0, dtype=torch.float), dist_reduce_fx="sum")
self.add_state("total", tensor(0, dtype=torch.float), dist_reduce_fx="sum")

Expand Down
11 changes: 2 additions & 9 deletions torchmetrics/text/wil.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import Any, Dict, List, Optional, Union
from typing import Any, Dict, List, Union

from torch import Tensor, tensor

Expand All @@ -39,12 +39,6 @@ class WordInfoLost(Metric):
Args:
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
.. deprecated:: v0.8
Argument has no use anymore and will be removed v0.9.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Expand All @@ -64,10 +58,9 @@ class WordInfoLost(Metric):

def __init__(
self,
compute_on_step: Optional[bool] = None,
**kwargs: Dict[str, Any],
):
super().__init__(compute_on_step=compute_on_step, **kwargs)
super().__init__(**kwargs)
self.add_state("errors", tensor(0.0), dist_reduce_fx="sum")
self.add_state("target_total", tensor(0.0), dist_reduce_fx="sum")
self.add_state("preds_total", tensor(0.0), dist_reduce_fx="sum")
Expand Down
Loading

0 comments on commit b8d1923

Please sign in to comment.