diff --git a/requirements/typing.txt b/requirements/typing.txt index 8160a60ae69..18b8e08be1a 100644 --- a/requirements/typing.txt +++ b/requirements/typing.txt @@ -1,4 +1,5 @@ -mypy==0.982 +mypy==1.0.0 + types-PyYAML types-emoji types-protobuf diff --git a/src/torchmetrics/functional/regression/mae.py b/src/torchmetrics/functional/regression/mae.py index c6af0769ca6..2b665eec181 100644 --- a/src/torchmetrics/functional/regression/mae.py +++ b/src/torchmetrics/functional/regression/mae.py @@ -29,8 +29,8 @@ def _mean_absolute_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, target: Ground truth tensor """ _check_same_shape(preds, target) - preds = preds if preds.is_floating_point else preds.float() - target = target if target.is_floating_point else target.float() + preds = preds if preds.is_floating_point else preds.float() # type: ignore[truthy-function] # todo + target = target if target.is_floating_point else target.float() # type: ignore[truthy-function] # todo sum_abs_error = torch.sum(torch.abs(preds - target)) n_obs = target.numel() return sum_abs_error, n_obs diff --git a/src/torchmetrics/functional/text/bert.py b/src/torchmetrics/functional/text/bert.py index 745fc6a85c4..63ce7df9a6b 100644 --- a/src/torchmetrics/functional/text/bert.py +++ b/src/torchmetrics/functional/text/bert.py @@ -60,7 +60,7 @@ def _get_embeddings_and_idf_scale( all_layers: bool = False, idf: bool = False, verbose: bool = False, - user_forward_fn: Callable[[Module, Dict[str, Tensor]], Tensor] = None, + user_forward_fn: Optional[Callable[[Module, Dict[str, Tensor]], Tensor]] = None, ) -> Tuple[Tensor, Tensor]: """Calculate sentence embeddings and the inverse-document-frequency scaling factor. @@ -249,7 +249,7 @@ def bert_score( all_layers: bool = False, model: Optional[Module] = None, user_tokenizer: Any = None, - user_forward_fn: Callable[[Module, Dict[str, Tensor]], Tensor] = None, + user_forward_fn: Optional[Callable[[Module, Dict[str, Tensor]], Tensor]] = None, verbose: bool = False, idf: bool = False, device: Optional[Union[str, torch.device]] = None, diff --git a/src/torchmetrics/functional/text/rouge.py b/src/torchmetrics/functional/text/rouge.py index eafc370efc0..2a97bc234c9 100644 --- a/src/torchmetrics/functional/text/rouge.py +++ b/src/torchmetrics/functional/text/rouge.py @@ -163,8 +163,8 @@ def find_union(lcs_tables: Sequence[Sequence[int]]) -> Sequence[int]: def _normalize_and_tokenize_text( text: str, stemmer: Optional[Any] = None, - normalizer: Callable[[str], str] = None, - tokenizer: Callable[[str], Sequence[str]] = None, + normalizer: Optional[Callable[[str], str]] = None, + tokenizer: Optional[Callable[[str], Sequence[str]]] = None, ) -> Sequence[str]: """Rouge score should be calculated only over lowercased words and digits. Optionally, Porter stemmer can be used to strip word suffixes to improve matching. The text normalization follows the implemantion from `Rouge @@ -282,8 +282,8 @@ def _rouge_score_update( rouge_keys_values: List[Union[int, str]], accumulate: str, stemmer: Optional[Any] = None, - normalizer: Callable[[str], str] = None, - tokenizer: Callable[[str], Sequence[str]] = None, + normalizer: Optional[Callable[[str], str]] = None, + tokenizer: Optional[Callable[[str], Sequence[str]]] = None, ) -> Dict[Union[int, str], List[Dict[str, Tensor]]]: """Update the rouge score with the current set of predicted and target sentences. @@ -412,8 +412,8 @@ def rouge_score( target: Union[str, Sequence[str], Sequence[Sequence[str]]], accumulate: Literal["avg", "best"] = "best", use_stemmer: bool = False, - normalizer: Callable[[str], str] = None, - tokenizer: Callable[[str], Sequence[str]] = None, + normalizer: Optional[Callable[[str], str]] = None, + tokenizer: Optional[Callable[[str], Sequence[str]]] = None, rouge_keys: Union[str, Tuple[str, ...]] = ("rouge1", "rouge2", "rougeL", "rougeLsum"), ) -> Dict[str, Tensor]: """Calculate `Calculate Rouge Score`_ , used for automatic summarization. diff --git a/src/torchmetrics/metric.py b/src/torchmetrics/metric.py index f35309c7a70..f44cba6c487 100644 --- a/src/torchmetrics/metric.py +++ b/src/torchmetrics/metric.py @@ -688,7 +688,7 @@ def persistent(self, mode: bool = False) -> None: def state_dict( # type: ignore[override] # todo self, - destination: Dict[str, Any] = None, + destination: Optional[Dict[str, Any]] = None, prefix: str = "", keep_vars: bool = False, ) -> Dict[str, Any]: diff --git a/src/torchmetrics/text/bert.py b/src/torchmetrics/text/bert.py index e877debe327..85b9be6d844 100644 --- a/src/torchmetrics/text/bert.py +++ b/src/torchmetrics/text/bert.py @@ -133,7 +133,7 @@ def __init__( all_layers: bool = False, model: Optional[Module] = None, user_tokenizer: Optional[Any] = None, - user_forward_fn: Callable[[Module, Dict[str, Tensor]], Tensor] = None, + user_forward_fn: Optional[Callable[[Module, Dict[str, Tensor]], Tensor]] = None, verbose: bool = False, idf: bool = False, device: Optional[Union[str, torch.device]] = None, diff --git a/src/torchmetrics/text/rouge.py b/src/torchmetrics/text/rouge.py index e0ce6e7ead9..509dd75af1e 100644 --- a/src/torchmetrics/text/rouge.py +++ b/src/torchmetrics/text/rouge.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Callable, Dict, List, Sequence, Tuple, Union +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union from torch import Tensor from typing_extensions import Literal @@ -97,8 +97,8 @@ class ROUGEScore(Metric): def __init__( self, use_stemmer: bool = False, - normalizer: Callable[[str], str] = None, - tokenizer: Callable[[str], Sequence[str]] = None, + normalizer: Optional[Callable[[str], str]] = None, + tokenizer: Optional[Callable[[str], Sequence[str]]] = None, accumulate: Literal["avg", "best"] = "best", rouge_keys: Union[str, Tuple[str, ...]] = ("rouge1", "rouge2", "rougeL", "rougeLsum"), **kwargs: Any,