diff --git a/tests/benchmarks/test_basic_parity.py b/tests/benchmarks/test_basic_parity.py index eddbc9a69ad40e..0d3ca1166ad10a 100644 --- a/tests/benchmarks/test_basic_parity.py +++ b/tests/benchmarks/test_basic_parity.py @@ -86,7 +86,7 @@ def _hook_memory(): return used_memory -def measure_loops(cls_model, kind, num_runs: int=10, num_epochs: int=10): +def measure_loops(cls_model, kind, num_runs: int = 10, num_epochs: int = 10): """Returns an array with the last loss from each epoch for each run.""" hist_losses = [] hist_durations = [] @@ -116,7 +116,7 @@ def measure_loops(cls_model, kind, num_runs: int=10, num_epochs: int=10): return {"losses": hist_losses, "durations": hist_durations, "memory": hist_memory} -def vanilla_loop(cls_model, idx: Optional[int], device_type: str = "cuda", num_epochs: int=10): +def vanilla_loop(cls_model, idx: Optional[int], device_type: str = "cuda", num_epochs: int = 10): device = torch.device(device_type) # set seed seed_everything(idx) @@ -148,7 +148,7 @@ def vanilla_loop(cls_model, idx: Optional[int], device_type: str = "cuda", num_e return epoch_losses[-1], _hook_memory() -def lightning_loop(cls_model, idx: Optional[int], device_type: str = "cuda", num_epochs: int=10): +def lightning_loop(cls_model, idx: Optional[int], device_type: str = "cuda", num_epochs: int = 10): seed_everything(idx) torch.backends.cudnn.deterministic = True diff --git a/tests/callbacks/test_early_stopping.py b/tests/callbacks/test_early_stopping.py index 35ca9338394761..07cb895fcf78fe 100644 --- a/tests/callbacks/test_early_stopping.py +++ b/tests/callbacks/test_early_stopping.py @@ -267,7 +267,9 @@ def validation_epoch_end(self, outputs): @pytest.mark.parametrize("step_freeze, min_steps, min_epochs", [(5, 1, 1), (5, 1, 3), (3, 15, 1)]) -def test_min_steps_override_early_stopping_functionality(tmpdir, step_freeze: int, min_steps: int, min_epochs: int) -> None: +def test_min_steps_override_early_stopping_functionality( + tmpdir, step_freeze: int, min_steps: int, min_epochs: int +) -> None: """Excepted Behaviour: IF `min_steps` was set to a higher value than the `trainer.global_step` when `early_stopping` is being triggered, THEN the trainer should continue until reaching `trainer.global_step` == `min_steps`, and stop. diff --git a/tests/callbacks/test_pruning.py b/tests/callbacks/test_pruning.py index b97405071d8072..7689d81d4b3805 100644 --- a/tests/callbacks/test_pruning.py +++ b/tests/callbacks/test_pruning.py @@ -59,13 +59,13 @@ def apply(cls, module, name, amount): def train_with_pruning_callback( tmpdir, - parameters_to_prune: bool=False, - use_global_unstructured: bool=False, - pruning_fn: str="l1_unstructured", - use_lottery_ticket_hypothesis: bool=False, + parameters_to_prune: bool = False, + use_global_unstructured: bool = False, + pruning_fn: str = "l1_unstructured", + use_lottery_ticket_hypothesis: bool = False, strategy=None, - accelerator: str="cpu", - devices: int=1, + accelerator: str = "cpu", + devices: int = 1, ) -> None: model = TestModel() diff --git a/tests/callbacks/test_stochastic_weight_avg.py b/tests/callbacks/test_stochastic_weight_avg.py index 27f9be0fcd38c7..34921ab9bbd3c2 100644 --- a/tests/callbacks/test_stochastic_weight_avg.py +++ b/tests/callbacks/test_stochastic_weight_avg.py @@ -112,12 +112,12 @@ def on_train_end(self, trainer: Trainer, pl_module: LightningModule) -> None: def train_with_swa( tmpdir, - batchnorm: bool=True, + batchnorm: bool = True, strategy=None, - accelerator: str="cpu", - devices: int=1, - interval: str="epoch", - iterable_dataset: bool=False, + accelerator: str = "cpu", + devices: int = 1, + interval: str = "epoch", + iterable_dataset: bool = False, ) -> None: model = SwaTestModel(batchnorm=batchnorm, interval=interval, iterable_dataset=iterable_dataset) swa_start = 2 diff --git a/tests/checkpointing/test_checkpoint_callback_frequency.py b/tests/checkpointing/test_checkpoint_callback_frequency.py index e9f5d71cb5b9b4..345bef230283fe 100644 --- a/tests/checkpointing/test_checkpoint_callback_frequency.py +++ b/tests/checkpointing/test_checkpoint_callback_frequency.py @@ -56,7 +56,9 @@ def test_default_checkpoint_freq(save_mock, tmpdir, epochs: int, val_check_inter ["k", "epochs", "val_check_interval", "expected"], [(1, 1, 1.0, 1), (2, 2, 1.0, 2), (2, 1, 0.25, 4), (2, 2, 0.3, 6)] ) @pytest.mark.parametrize("save_last", (False, True)) -def test_top_k(save_mock, tmpdir, k: int, epochs: int, val_check_interval: float, expected: int, save_last: bool) -> None: +def test_top_k( + save_mock, tmpdir, k: int, epochs: int, val_check_interval: float, expected: int, save_last: bool +) -> None: class TestModel(BoringModel): def __init__(self): super().__init__() diff --git a/tests/core/test_metric_result_integration.py b/tests/core/test_metric_result_integration.py index 19fa5860c19c39..f62287e4cb8c8c 100644 --- a/tests/core/test_metric_result_integration.py +++ b/tests/core/test_metric_result_integration.py @@ -371,7 +371,7 @@ def __repr__(self) -> str: return f"{self.__class__.__name__}(sum={self.sum}, count={self.count})" -def result_collection_reload(accelerator: str="auto", devices: int=1, **kwargs) -> None: +def result_collection_reload(accelerator: str = "auto", devices: int = 1, **kwargs) -> None: """This test is going to validate _ResultCollection is properly being reload and final accumulation with Fault Tolerant Training is correct.""" diff --git a/tests/helpers/advanced_models.py b/tests/helpers/advanced_models.py index 17e40f0c9270bd..33216f8b83c053 100644 --- a/tests/helpers/advanced_models.py +++ b/tests/helpers/advanced_models.py @@ -81,7 +81,8 @@ class BasicGAN(LightningModule): """Implements a basic GAN for the purpose of illustrating multiple optimizers.""" def __init__( - self, hidden_dim: int = 128, learning_rate: float = 0.001, b1: float = 0.5, b2: float = 0.999, **kwargs) -> None: + self, hidden_dim: int = 128, learning_rate: float = 0.001, b1: float = 0.5, b2: float = 0.999, **kwargs + ) -> None: super().__init__() self.hidden_dim = hidden_dim self.learning_rate = learning_rate @@ -218,7 +219,9 @@ def train_dataloader(self): class ParityModuleCIFAR(LightningModule): - def __init__(self, backbone: str="resnet101", hidden_dim: int=1024, learning_rate: float=1e-3, pretrained: bool=True) -> None: + def __init__( + self, backbone: str = "resnet101", hidden_dim: int = 1024, learning_rate: float = 1e-3, pretrained: bool = True + ) -> None: super().__init__() self.save_hyperparameters() diff --git a/tests/helpers/datamodules.py b/tests/helpers/datamodules.py index c19985689b5479..32667c63d3230f 100644 --- a/tests/helpers/datamodules.py +++ b/tests/helpers/datamodules.py @@ -101,7 +101,7 @@ def sample(self): class ClassifDataModule(SklearnDataModule): - def __init__(self, num_features: int=32, length: int=800, num_classes: int=3, batch_size: int=10) -> None: + def __init__(self, num_features: int = 32, length: int = 800, num_classes: int = 3, batch_size: int = 10) -> None: data = make_classification( n_samples=length, n_features=num_features, n_classes=num_classes, n_clusters_per_class=1, random_state=42 ) @@ -109,7 +109,7 @@ def __init__(self, num_features: int=32, length: int=800, num_classes: int=3, ba class RegressDataModule(SklearnDataModule): - def __init__(self, num_features: int=16, length: int=800, batch_size: int=10) -> None: + def __init__(self, num_features: int = 16, length: int = 800, batch_size: int = 10) -> None: x, y = make_regression(n_samples=length, n_features=num_features, random_state=42) y = [[v] for v in y] super().__init__((x, y), x_type=torch.float32, y_type=torch.float32, batch_size=batch_size) diff --git a/tests/helpers/datasets.py b/tests/helpers/datasets.py index 3210aa52efc85a..123da8495fc85c 100644 --- a/tests/helpers/datasets.py +++ b/tests/helpers/datasets.py @@ -57,7 +57,8 @@ class MNIST(Dataset): cache_folder_name = "complete" def __init__( - self, root: str, train: bool = True, normalize: tuple = (0.1307, 0.3081), download: bool = True, **kwargs) -> None: + self, root: str, train: bool = True, normalize: tuple = (0.1307, 0.3081), download: bool = True, **kwargs + ) -> None: super().__init__() self.root = root self.train = train # training set or test set @@ -185,7 +186,7 @@ def _download(self, data_folder: str) -> None: class AverageDataset(Dataset): - def __init__(self, dataset_len: int=300, sequence_len: int=100) -> None: + def __init__(self, dataset_len: int = 300, sequence_len: int = 100) -> None: self.dataset_len = dataset_len self.sequence_len = sequence_len self.input_seq = torch.randn(dataset_len, sequence_len, 10) diff --git a/tests/helpers/deterministic_model.py b/tests/helpers/deterministic_model.py index a177c06d04a991..12013875c63bd4 100644 --- a/tests/helpers/deterministic_model.py +++ b/tests/helpers/deterministic_model.py @@ -56,7 +56,7 @@ def step(self, batch, batch_idx): return out - def count_num_graphs(self, result, num_graphs: int=0): + def count_num_graphs(self, result, num_graphs: int = 0): for k, v in result.items(): if isinstance(v, torch.Tensor) and v.grad_fn is not None: num_graphs += 1 diff --git a/tests/helpers/pipelines.py b/tests/helpers/pipelines.py index 39df94de75fd15..477167e5e17e62 100644 --- a/tests/helpers/pipelines.py +++ b/tests/helpers/pipelines.py @@ -93,7 +93,7 @@ def run_model_test( @torch.no_grad() -def run_model_prediction(trained_model, dataloader, min_acc: float=0.50) -> None: +def run_model_prediction(trained_model, dataloader, min_acc: float = 0.50) -> None: orig_device = trained_model.device # run prediction on 1 batch trained_model.cpu() diff --git a/tests/helpers/simple_models.py b/tests/helpers/simple_models.py index 9a58810e81481a..4210cde6cd29c1 100644 --- a/tests/helpers/simple_models.py +++ b/tests/helpers/simple_models.py @@ -22,7 +22,7 @@ class ClassificationModel(LightningModule): - def __init__(self, lr: float=0.01) -> None: + def __init__(self, lr: float = 0.01) -> None: super().__init__() self.lr = lr diff --git a/tests/helpers/utils.py b/tests/helpers/utils.py index 72ab603c6cd92b..ec9639a8635929 100644 --- a/tests/helpers/utils.py +++ b/tests/helpers/utils.py @@ -60,13 +60,13 @@ def load_model_from_checkpoint(logger, root_weights_dir, module_class: Type[Bori return trained_model -def assert_ok_model_acc(trainer, key: str="test_acc", thr: float=0.5) -> None: +def assert_ok_model_acc(trainer, key: str = "test_acc", thr: float = 0.5) -> None: # this model should get 0.80+ acc acc = trainer.callback_metrics[key] assert acc > thr, f"Model failed to get expected {thr} accuracy. {key} = {acc}" -def reset_seed(seed: Optional[int]=0) -> None: +def reset_seed(seed: Optional[int] = 0) -> None: seed_everything(seed) diff --git a/tests/loops/batch/test_truncated_bptt.py b/tests/loops/batch/test_truncated_bptt.py index 8b99d8b67ad02f..835871a9183db8 100644 --- a/tests/loops/batch/test_truncated_bptt.py +++ b/tests/loops/batch/test_truncated_bptt.py @@ -25,7 +25,7 @@ class LSTMModel(LightningModule): """LSTM sequence-to-sequence model for testing TBPTT with automatic optimization.""" - def __init__(self, truncated_bptt_steps: int=2, input_size: int=1, hidden_size: int=8) -> None: + def __init__(self, truncated_bptt_steps: int = 2, input_size: int = 1, hidden_size: int = 8) -> None: super().__init__() self.input_size = input_size self.hidden_size = hidden_size diff --git a/tests/loops/epoch/test_training_epoch_loop.py b/tests/loops/epoch/test_training_epoch_loop.py index 735e5508200188..22d02b47c11004 100644 --- a/tests/loops/epoch/test_training_epoch_loop.py +++ b/tests/loops/epoch/test_training_epoch_loop.py @@ -63,7 +63,9 @@ ), ], ) -def test_prepare_outputs_training_epoch_end_automatic(num_optimizers: int, batch_outputs: List[List[Union[Dict[int, Dict[str, Any]], Dict[str, Any]]]], expected) -> None: +def test_prepare_outputs_training_epoch_end_automatic( + num_optimizers: int, batch_outputs: List[List[Union[Dict[int, Dict[str, Any]], Dict[str, Any]]]], expected +) -> None: """Test that the loop converts the nested lists of outputs to the format that the `training_epoch_end` hook currently expects in the case of automatic optimization.""" prepared = TrainingEpochLoop._prepare_outputs_training_epoch_end( @@ -89,7 +91,9 @@ def test_prepare_outputs_training_epoch_end_automatic(num_optimizers: int, batch ([[_out00, _out01], [_out02, _out03], [], [_out10]], [[_out00, _out01], [_out02, _out03], [_out10]]), ], ) -def test_prepare_outputs_training_epoch_end_manual(batch_outputs: List[List[Union[Dict[int, Dict[str, Any]], Dict[str, Any]]]], expected) -> None: +def test_prepare_outputs_training_epoch_end_manual( + batch_outputs: List[List[Union[Dict[int, Dict[str, Any]], Dict[str, Any]]]], expected +) -> None: """Test that the loop converts the nested lists of outputs to the format that the `training_epoch_end` hook currently expects in the case of manual optimization.""" prepared = TrainingEpochLoop._prepare_outputs_training_epoch_end( @@ -115,7 +119,9 @@ def test_prepare_outputs_training_epoch_end_manual(batch_outputs: List[List[Unio (2, [{0: _out00, 1: _out01}, {0: _out10, 1: _out11}], [[_out00, _out10], [_out01, _out11]]), ], ) -def test_prepare_outputs_training_batch_end_automatic(num_optimizers: int, batch_end_outputs: List[Union[Dict[int, Dict[str, Any]], Dict[str, Any]]], expected) -> None: +def test_prepare_outputs_training_batch_end_automatic( + num_optimizers: int, batch_end_outputs: List[Union[Dict[int, Dict[str, Any]], Dict[str, Any]]], expected +) -> None: """Test that the loop converts the nested lists of outputs to the format that the `on_train_batch_end` hook currently expects in the case of automatic optimization.""" prepared = TrainingEpochLoop._prepare_outputs_training_batch_end( @@ -137,7 +143,9 @@ def test_prepare_outputs_training_batch_end_automatic(num_optimizers: int, batch ([_out00, _out01, None, _out03], [_out00, _out01, _out03]), ], ) -def test_prepare_outputs_training_batch_end_manual(batch_end_outputs: List[Union[Dict[int, Dict[str, Any]], Dict[str, Any]]], expected) -> None: +def test_prepare_outputs_training_batch_end_manual( + batch_end_outputs: List[Union[Dict[int, Dict[str, Any]], Dict[str, Any]]], expected +) -> None: """Test that the loop converts the nested lists of outputs to the format that the `on_train_batch_end` hook currently expects in the case of manual optimization.""" prepared = TrainingEpochLoop._prepare_outputs_training_batch_end( diff --git a/tests/loops/optimization/test_optimizer_loop.py b/tests/loops/optimization/test_optimizer_loop.py index 41ed6d82588d07..0677ce852d0432 100644 --- a/tests/loops/optimization/test_optimizer_loop.py +++ b/tests/loops/optimization/test_optimizer_loop.py @@ -143,7 +143,9 @@ class CustomException(Exception): @pytest.mark.parametrize("stop_epoch", (0, 1)) @pytest.mark.parametrize("stop_batch", (0, 1, 2)) @pytest.mark.parametrize("n_optimizers,stop_optimizer", [(2, 0), (2, 1), (3, 2)]) -def test_loop_restart_progress_multiple_optimizers(tmpdir, n_optimizers, stop_optimizer, stop_epoch, stop_batch) -> None: +def test_loop_restart_progress_multiple_optimizers( + tmpdir, n_optimizers, stop_optimizer, stop_epoch, stop_batch +) -> None: """Test that Lightning can resume from a point where a training_step failed while in the middle of processing several optimizer steps for one batch. diff --git a/tests/loops/test_loops.py b/tests/loops/test_loops.py index 0bc8400e8b6613..1f58ef748a8ce2 100644 --- a/tests/loops/test_loops.py +++ b/tests/loops/test_loops.py @@ -317,7 +317,9 @@ def on_load_checkpoint(self, state_dict: Dict) -> None: @pytest.mark.parametrize("stop_epoch", (1, 2)) @pytest.mark.parametrize("stop_batch", (1, 2)) @pytest.mark.parametrize("n_dataloaders,stop_dataloader", [(2, 0), (2, 1), (3, 2)]) -def test_loop_restart_progress_multiple_dataloaders(tmpdir, n_dataloaders, stop_dataloader, stop_epoch, stop_batch: int) -> None: +def test_loop_restart_progress_multiple_dataloaders( + tmpdir, n_dataloaders, stop_dataloader, stop_epoch, stop_batch: int +) -> None: n_batches = 5 n_epochs = 3 @@ -388,7 +390,9 @@ def val_dataloader(self): @pytest.mark.parametrize("stop_epoch", (1, 2)) @pytest.mark.parametrize("stop_batch", (1, 2)) @pytest.mark.parametrize("stop_optimizer", (1, 2)) -def test_loop_state_on_exception(accumulate_grad_batches, stop_epoch, stop_batch, stop_optimizer: int, n_optimizers: int, tmpdir) -> None: +def test_loop_state_on_exception( + accumulate_grad_batches, stop_epoch, stop_batch, stop_optimizer: int, n_optimizers: int, tmpdir +) -> None: stop_optimizer = stop_optimizer if stop_optimizer < n_optimizers else 0 n_epochs = 3 n_batches = 3 diff --git a/tests/models/data/horovod/train_default_model.py b/tests/models/data/horovod/train_default_model.py index a24abdf79ea526..a8ed485ebdff06 100644 --- a/tests/models/data/horovod/train_default_model.py +++ b/tests/models/data/horovod/train_default_model.py @@ -44,7 +44,7 @@ parser.add_argument("--on-gpu", action="store_true", default=False) -def run_test_from_config(trainer_options, on_gpu, check_size: bool=True) -> None: +def run_test_from_config(trainer_options, on_gpu, check_size: bool = True) -> None: """Trains the default model with the given config.""" set_random_main_port() reset_seed() diff --git a/tests/models/test_amp.py b/tests/models/test_amp.py index 19356f5a80e178..60687fcaa9eaa7 100644 --- a/tests/models/test_amp.py +++ b/tests/models/test_amp.py @@ -54,7 +54,7 @@ def test_step(self, batch, batch_idx) -> Dict[str, Any]: output = self._step(batch) return {"y": output} - def predict_step(self, batch, batch_idx: int, dataloader_idx: int=0): + def predict_step(self, batch, batch_idx: int, dataloader_idx: int = 0): self._assert_autocast_enabled() output = self(batch) is_bfloat16 = self.trainer.precision_plugin.precision == "bf16" diff --git a/tests/models/test_gpu.py b/tests/models/test_gpu.py index 874d6f4e477369..b4ec703ebb4039 100644 --- a/tests/models/test_gpu.py +++ b/tests/models/test_gpu.py @@ -219,7 +219,9 @@ def test_parse_gpu_fail_on_non_existent_id_2(mocked_device_count) -> None: @pytest.mark.parametrize("gpus", [-1, "-1"]) -def test_parse_gpu_returns_none_when_no_devices_are_available(mocked_device_count_0, gpus: Union[None, List[int], int, str]) -> None: +def test_parse_gpu_returns_none_when_no_devices_are_available( + mocked_device_count_0, gpus: Union[None, List[int], int, str] +) -> None: with pytest.raises(MisconfigurationException): device_parser.parse_gpu_ids(gpus) @@ -238,7 +240,9 @@ def test_parse_gpu_returns_none_when_no_devices_are_available(mocked_device_coun @mock.patch("torch.cuda.device_count", return_value=1) @mock.patch("torch.cuda.is_available", return_value=True) @pytest.mark.parametrize("gpus", [[0, 1, 2], 2, "0"]) -def test_torchelastic_gpu_parsing(mocked_device_count, mocked_is_available, gpus: Union[None, List[int], int, str]) -> None: +def test_torchelastic_gpu_parsing( + mocked_device_count, mocked_is_available, gpus: Union[None, List[int], int, str] +) -> None: """Ensure when using torchelastic and nproc_per_node is set to the default of 1 per GPU device That we omit sanitizing the gpus as only one of the GPUs is visible.""" trainer = Trainer(gpus=gpus) diff --git a/tests/models/test_grad_norm.py b/tests/models/test_grad_norm.py index 5fc5f074cc4392..ba7268f7e90d7b 100644 --- a/tests/models/test_grad_norm.py +++ b/tests/models/test_grad_norm.py @@ -58,7 +58,7 @@ def on_after_backward(self) -> None: @pytest.mark.parametrize("norm_type", [1.0, 1.25, 2, 3, 5, 10, "inf"]) -def test_grad_tracking(tmpdir, norm_type, rtol: float=5e-3) -> None: +def test_grad_tracking(tmpdir, norm_type, rtol: float = 5e-3) -> None: # rtol=5e-3 respects the 3 decimals rounding in `.grad_norms` and above reset_seed() diff --git a/tests/models/test_hooks.py b/tests/models/test_hooks.py index b10efaaa58ff70..645e59b4bf5eeb 100644 --- a/tests/models/test_hooks.py +++ b/tests/models/test_hooks.py @@ -282,7 +282,8 @@ def _train_batch(self, *args, **kwargs): @staticmethod def _auto_train_batch( - trainer, model, batches, device=torch.device("cpu"), current_epoch: int=0, current_batch: int=0, **kwargs): + trainer, model, batches, device=torch.device("cpu"), current_epoch: int = 0, current_batch: int = 0, **kwargs + ): using_native_amp = kwargs.get("amp_backend") == "native" using_deepspeed = kwargs.get("strategy") == "deepspeed" out = [] diff --git a/tests/models/test_horovod.py b/tests/models/test_horovod.py index 91d1052dfdaa4f..5dc4ef836456e9 100644 --- a/tests/models/test_horovod.py +++ b/tests/models/test_horovod.py @@ -42,7 +42,7 @@ TEST_SCRIPT = os.path.join(os.path.dirname(__file__), "data", "horovod", "train_default_model.py") -def _run_horovod(trainer_options, on_gpu: bool=False) -> None: +def _run_horovod(trainer_options, on_gpu: bool = False) -> None: """Execute the training script across multiple workers in parallel.""" num_processes = trainer_options.get("gpus", 2) # for Horovod, we interpret `gpus` to be set per worker diff --git a/tests/models/test_restore.py b/tests/models/test_restore.py index 7972c285cf6304..9999804d89b921 100644 --- a/tests/models/test_restore.py +++ b/tests/models/test_restore.py @@ -58,7 +58,7 @@ def on_train_end(self, trainer: Trainer, pl_module: LightningModule) -> None: class ValTestLossBoringModel(BoringModel): - def __init__(self, batch_size: int=4) -> None: + def __init__(self, batch_size: int = 4) -> None: super().__init__() self.save_hyperparameters() diff --git a/tests/models/test_sync_batchnorm.py b/tests/models/test_sync_batchnorm.py index 925673e6d74e1e..5ca3e03658aec4 100644 --- a/tests/models/test_sync_batchnorm.py +++ b/tests/models/test_sync_batchnorm.py @@ -26,7 +26,7 @@ class SyncBNModule(LightningModule): - def __init__(self, gpu_count: int=1, **kwargs) -> None: + def __init__(self, gpu_count: int = 1, **kwargs) -> None: super().__init__() self.gpu_count = gpu_count diff --git a/tests/plugins/test_double_plugin.py b/tests/plugins/test_double_plugin.py index e3e56ed99fde8c..bc8c385a905f80 100644 --- a/tests/plugins/test_double_plugin.py +++ b/tests/plugins/test_double_plugin.py @@ -68,7 +68,7 @@ def test_step(self, batch, batch_idx) -> Dict[str, Any]: loss = self.loss(batch, output) return {"y": loss} - def predict_step(self, batch, batch_idx: int, dataloader_idx: int=0): + def predict_step(self, batch, batch_idx: int, dataloader_idx: int = 0): assert batch.dtype == torch.float64 assert torch.tensor([0.0]).dtype == torch.float64 assert torch.tensor([0.0], dtype=torch.float16).dtype == torch.float16 @@ -111,7 +111,7 @@ def test_step(self, batch, batch_idx) -> Dict[str, Any]: loss = self.loss(batch, output) return {"y": loss} - def predict_step(self, batch, batch_idx: int, dataloader_idx: int=0): + def predict_step(self, batch, batch_idx: int, dataloader_idx: int = 0): assert batch.dtype == torch.float64 output = self.layer(batch) assert output.dtype == torch.float64 diff --git a/tests/profiler/test_profiler.py b/tests/profiler/test_profiler.py index f32559288ccc9c..9f76ace1d1bbba 100644 --- a/tests/profiler/test_profiler.py +++ b/tests/profiler/test_profiler.py @@ -82,7 +82,7 @@ def test_simple_profiler_iterable_durations(simple_profiler, action: str, expect np.testing.assert_allclose(simple_profiler.recorded_durations[action][:-1], expected, rtol=0.2) -def test_simple_profiler_overhead(simple_profiler, n_iter: int=5) -> None: +def test_simple_profiler_overhead(simple_profiler, n_iter: int = 5) -> None: """Ensure that the profiler doesn't introduce too much overhead during training.""" for _ in range(n_iter): with simple_profiler.profile("no-op"): @@ -306,7 +306,7 @@ def test_advanced_profiler_iterable_durations(advanced_profiler, action: str, ex @pytest.mark.flaky(reruns=3) -def test_advanced_profiler_overhead(advanced_profiler, n_iter: int=5) -> None: +def test_advanced_profiler_overhead(advanced_profiler, n_iter: int = 5) -> None: """ensure that the profiler doesn't introduce too much overhead during training.""" for _ in range(n_iter): with advanced_profiler.profile("no-op"): diff --git a/tests/strategies/test_ddp_fully_sharded_with_full_state_dict.py b/tests/strategies/test_ddp_fully_sharded_with_full_state_dict.py index 22186b94b07ed3..4c07bbdd262888 100644 --- a/tests/strategies/test_ddp_fully_sharded_with_full_state_dict.py +++ b/tests/strategies/test_ddp_fully_sharded_with_full_state_dict.py @@ -109,7 +109,7 @@ def test_fully_sharded_strategy_checkpoint_multi_gpus(tmpdir) -> None: _run_multiple_stages(trainer, model) -def _assert_save_equality(trainer, ckpt_path, cls: Type[TestFSDPModel]=TestFSDPModel) -> None: +def _assert_save_equality(trainer, ckpt_path, cls: Type[TestFSDPModel] = TestFSDPModel) -> None: # Use FullySharded to get the state dict for the sake of comparison model_state_dict = trainer.strategy.lightning_module_state_dict() diff --git a/tests/strategies/test_ddp_strategy.py b/tests/strategies/test_ddp_strategy.py index 6663d2d920a8b5..f5f1392c09c3bb 100644 --- a/tests/strategies/test_ddp_strategy.py +++ b/tests/strategies/test_ddp_strategy.py @@ -54,7 +54,7 @@ def test_ddp_with_2_gpus() -> None: class BarrierModel(BoringModel): - def setup(self, stage: Optional[str]=None) -> None: + def setup(self, stage: Optional[str] = None) -> None: assert not isinstance(self.trainer.strategy.model, DistributedDataParallel) self.trainer.strategy.barrier("barrier before model is wrapped") diff --git a/tests/strategies/test_deepspeed_strategy.py b/tests/strategies/test_deepspeed_strategy.py index f14ea46aff6fcb..b7479f008b0eed 100644 --- a/tests/strategies/test_deepspeed_strategy.py +++ b/tests/strategies/test_deepspeed_strategy.py @@ -233,7 +233,9 @@ def backward(self, loss: Tensor, optimizer: Optimizer, optimizer_idx: int, *args ) @mock.patch("deepspeed.init_distributed", autospec=True) @mock.patch("pytorch_lightning.Trainer.log_dir", new_callable=mock.PropertyMock, return_value="abc") -def test_deepspeed_auto_batch_size_config_select(mock_deepspeed_distributed, mock_log_dir, tmpdir, dataset_cls, value) -> None: +def test_deepspeed_auto_batch_size_config_select( + mock_deepspeed_distributed, mock_log_dir, tmpdir, dataset_cls, value +) -> None: """Test to ensure that the batch size is correctly set as expected for deepspeed logging purposes.""" class TestModel(BoringModel): @@ -544,7 +546,7 @@ def test_step(self, batch, batch_idx) -> None: self.log("test_loss", F.cross_entropy(logits, y), prog_bar=False, sync_dist=True) self.log("test_acc", self.test_acc(logits, y), prog_bar=True, sync_dist=True) - def predict_step(self, batch, batch_idx: int, dataloader_idx: int=0): + def predict_step(self, batch, batch_idx: int, dataloader_idx: int = 0): x, y = batch logits = self.forward(x) self.test_acc(logits, y) diff --git a/tests/trainer/logging_/test_eval_loop_logging.py b/tests/trainer/logging_/test_eval_loop_logging.py index 00278d5973c027..27fb0ae19e2ee6 100644 --- a/tests/trainer/logging_/test_eval_loop_logging.py +++ b/tests/trainer/logging_/test_eval_loop_logging.py @@ -775,7 +775,10 @@ def test_dataloader(self): assert results == [{"foo/dataloader_idx_0": 1, "foobar": 3}, {"foo/dataloader_idx_1": 2, "foobar": 3}] -inputs0: Tuple[List[Dict[str, Any]], RunningStage] = ([{"log": torch.tensor(5)}, {"no_log": torch.tensor(6)}], RunningStage.TESTING) +inputs0: Tuple[List[Dict[str, Any]], RunningStage] = ( + [{"log": torch.tensor(5)}, {"no_log": torch.tensor(6)}], + RunningStage.TESTING, +) expected0 = """ ──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── Test metric DataLoader 0 DataLoader 1 diff --git a/tests/trainer/optimization/test_manual_optimization.py b/tests/trainer/optimization/test_manual_optimization.py index e6ade079c7fb41..a4429fa22cfa37 100644 --- a/tests/trainer/optimization/test_manual_optimization.py +++ b/tests/trainer/optimization/test_manual_optimization.py @@ -755,7 +755,9 @@ def on_train_end(self) -> None: assert self.adam_step_mock.call_count == 2 -def train_manual_optimization(tmpdir, strategy, model_cls: Type[TesManualOptimizationDDPModel]=TesManualOptimizationDDPModel) -> None: +def train_manual_optimization( + tmpdir, strategy, model_cls: Type[TesManualOptimizationDDPModel] = TesManualOptimizationDDPModel +) -> None: seed_everything(42) diff --git a/tests/trainer/test_data_loading.py b/tests/trainer/test_data_loading.py index 6c90bb23c9a1a2..cfb2082ce1ca0b 100644 --- a/tests/trainer/test_data_loading.py +++ b/tests/trainer/test_data_loading.py @@ -289,7 +289,7 @@ def test_step(self, batch, batch_idx): assert len(self.trainer.test_dataloaders[0]) == 10 return super().test_step(batch, batch_idx) - def predict_step(self, batch, batch_idx: int, dataloader_idx: int=0): + def predict_step(self, batch, batch_idx: int, dataloader_idx: int = 0): assert len(self.trainer.predict_dataloaders[0]) == 10 return super().predict_step(batch, batch_idx, dataloader_idx=dataloader_idx) diff --git a/tests/trainer/test_dataloaders.py b/tests/trainer/test_dataloaders.py index 94d072e29cb8bc..69a4b71e605afc 100644 --- a/tests/trainer/test_dataloaders.py +++ b/tests/trainer/test_dataloaders.py @@ -224,10 +224,14 @@ def on_train_batch_start(self, trainer: Trainer, pl_module: LightningModule, bat def on_train_epoch_start(self, trainer: Trainer, pl_module: LightningModule) -> None: self.train_epoch_count += 1 - def on_validation_batch_start(self, trainer: Trainer, pl_module: LightningModule, batch, batch_idx: int, dataloader_idx: int) -> None: + def on_validation_batch_start( + self, trainer: Trainer, pl_module: LightningModule, batch, batch_idx: int, dataloader_idx: int + ) -> None: self.val_batches_seen += 1 - def on_test_batch_start(self, trainer: Trainer, pl_module: LightningModule, batch, batch_idx: int, dataloader_idx: int) -> None: + def on_test_batch_start( + self, trainer: Trainer, pl_module: LightningModule, batch, batch_idx: int, dataloader_idx: int + ) -> None: self.test_batches_seen += 1 def on_validation_epoch_start(self, trainer: Trainer, pl_module: LightningModule) -> None: @@ -240,7 +244,9 @@ def on_test_epoch_start(self, trainer: Trainer, pl_module: LightningModule) -> N @pytest.mark.parametrize( ["limit_train_batches", "limit_val_batches", "limit_test_batches"], [(0.0, 0.0, 0.0), (1.0, 1.0, 1.0)] ) -def test_inf_dataloaders_with_limit_percent_batches(tmpdir, limit_train_batches, limit_val_batches, limit_test_batches) -> None: +def test_inf_dataloaders_with_limit_percent_batches( + tmpdir, limit_train_batches, limit_val_batches, limit_test_batches +) -> None: """Verify inf train, val & test dataloaders (e.g. IterableDataset) passed with batch limit in percent.""" ckpt_callback = ModelCheckpoint(monitor="val_log", save_top_k=1, mode="max", verbose=False) @@ -418,7 +424,9 @@ def test_datasets_dataloaders_with_limit_num_batches( ["limit_train_batches", "limit_val_batches", "limit_test_batches"], [(0.0, 0.0, 0.0), (0, 0, 0.5), (1.0, 1.0, 1.0), (0.2, 0.4, 0.4)], ) -def test_dataloaders_with_limit_percent_batches(tmpdir, limit_train_batches: int, limit_val_batches, limit_test_batches) -> None: +def test_dataloaders_with_limit_percent_batches( + tmpdir, limit_train_batches: int, limit_val_batches, limit_test_batches +) -> None: """Verify num_batches for train, val & test dataloaders passed with batch limit in percent.""" model = MultiEvalDataLoaderModel() # train, multiple val and multiple test passed with percent_check @@ -818,7 +826,7 @@ def gen(self): class DistribSamplerCallback(Callback): - def __init__(self, expected_seeds: Tuple[int, ...]=(0, 0, 0)) -> None: + def __init__(self, expected_seeds: Tuple[int, ...] = (0, 0, 0)) -> None: self.expected_seed = expected_seeds def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None: diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index c4e4f09e89e6eb..98d3045d537e37 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -1397,7 +1397,15 @@ def __init__(self, output_dir: str, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self.output_dir = output_dir - def write_on_batch_end(self, trainer: Trainer, pl_module: LightningModule, prediction, batch_indices: Optional[Sequence[int]], *args, **kwargs) -> None: + def write_on_batch_end( + self, + trainer: Trainer, + pl_module: LightningModule, + prediction, + batch_indices: Optional[Sequence[int]], + *args, + **kwargs, + ) -> None: assert prediction.shape == torch.Size([1, 2]) assert len(batch_indices) == 1 self.write_on_batch_end_called = True @@ -1423,11 +1431,11 @@ def predict( strategy=None, accelerator=None, devices=None, - model: Optional[LightningModule]=None, + model: Optional[LightningModule] = None, plugins=None, - datamodule: bool=True, - enable_progress_bar: bool=True, - use_callbacks: bool=True, + datamodule: bool = True, + enable_progress_bar: bool = True, + use_callbacks: bool = True, ) -> None: dataloaders = [torch.utils.data.DataLoader(RandomDataset(32, 2)), torch.utils.data.DataLoader(RandomDataset(32, 2))] diff --git a/tests/utilities/distributed.py b/tests/utilities/distributed.py index 0d286f8b864487..27bb7d24bbbf93 100644 --- a/tests/utilities/distributed.py +++ b/tests/utilities/distributed.py @@ -21,7 +21,7 @@ import pytorch_lightning -def call_training_script(module_file, cli_args: List[str], method, tmpdir, timeout: int=60, as_module: bool=False): +def call_training_script(module_file, cli_args: List[str], method, tmpdir, timeout: int = 60, as_module: bool = False): file = Path(module_file.__file__).absolute() cli_args = cli_args.split(" ") if cli_args else [] cli_args += ["--tmpdir", str(tmpdir)] diff --git a/tests/utilities/test_auto_restart.py b/tests/utilities/test_auto_restart.py index 80a1f5f8041dd4..2024db976421bf 100644 --- a/tests/utilities/test_auto_restart.py +++ b/tests/utilities/test_auto_restart.py @@ -224,7 +224,9 @@ def test_fast_forward_on_random_sampler() -> None: class RangeIterableDataset(IterableDataset): - def __init__(self, data, num_workers: int, batch_size: int, state_dict=None, attr_name: str = "iter_sampler") -> None: + def __init__( + self, data, num_workers: int, batch_size: int, state_dict=None, attr_name: str = "iter_sampler" + ) -> None: self.data = list(data) self.batch_size = batch_size self.num_workers = num_workers @@ -657,7 +659,9 @@ def test_fault_tolerant_not_supported() -> None: assert not _fault_tolerant_training() -def create_iterable_dataset(batch_size, num_workers, attr_name: str="iter_sampler", wrap: bool = True) -> Union[CaptureIterableDataset, RangeIterableDataset]: +def create_iterable_dataset( + batch_size, num_workers, attr_name: str = "iter_sampler", wrap: bool = True +) -> Union[CaptureIterableDataset, RangeIterableDataset]: dataset = RangeIterableDataset(range(50), num_workers=num_workers, batch_size=batch_size, attr_name=attr_name) if wrap: dataset = CaptureIterableDataset(dataset) @@ -1035,7 +1039,9 @@ def run(should_fail, resume): class TestAutoRestartModelUnderSignal(BoringModel): - def __init__(self, should_signal: bool, failure_on_step: bool, failure_on_training: bool, on_last_batch: bool) -> None: + def __init__( + self, should_signal: bool, failure_on_step: bool, failure_on_training: bool, on_last_batch: bool + ) -> None: super().__init__() self.should_signal = should_signal self.failure_on_step = failure_on_step @@ -1123,7 +1129,9 @@ def _exit_gracefully_on_signal(self) -> None: @pytest.mark.parametrize("failure_on_step", [False, True]) @mock.patch.dict(os.environ, {"PL_FAULT_TOLERANT_TRAINING": "1"}) @RunIf(skip_windows=True) -def test_auto_restart_under_signal(on_last_batch, val_check_interval, failure_on_training, failure_on_step, tmpdir) -> None: +def test_auto_restart_under_signal( + on_last_batch, val_check_interval, failure_on_training, failure_on_step, tmpdir +) -> None: """This test asserts that if a signal is being sent during the training / validation phase, the model should restart in a reproducible way.""" diff --git a/tests/utilities/test_cli.py b/tests/utilities/test_cli.py index f4f53380c176cf..1c614cf4aa5011 100644 --- a/tests/utilities/test_cli.py +++ b/tests/utilities/test_cli.py @@ -926,7 +926,7 @@ def test_registries() -> None: @MODEL_REGISTRY class TestModel(BoringModel): - def __init__(self, foo, bar: int=5) -> None: + def __init__(self, foo, bar: int = 5) -> None: super().__init__() self.foo = foo self.bar = bar @@ -952,7 +952,7 @@ def test_lightning_cli_model_choices() -> None: @DATAMODULE_REGISTRY class MyDataModule(BoringDataModule): - def __init__(self, foo, bar: int=5) -> None: + def __init__(self, foo, bar: int = 5) -> None: super().__init__() self.foo = foo self.bar = bar @@ -1150,7 +1150,9 @@ def test_argv_transformation_multiple_callbacks_with_config() -> None: ), ], ) -def test_argv_transformations_with_optimizers_and_lr_schedulers(args: List[str], expected, nested_key: str, registry) -> None: +def test_argv_transformations_with_optimizers_and_lr_schedulers( + args: List[str], expected, nested_key: str, registry +) -> None: base = ["any.py", "--trainer.max_epochs=1"] argv = base + args new_argv = LightningArgumentParser._convert_argv_issue_84(registry.classes, nested_key, argv)