Skip to content

Commit

Permalink
[pre-commit.ci] auto fixes from pre-commit.com hooks
Browse files Browse the repository at this point in the history
for more information, see https://pre-commit.ci
  • Loading branch information
pre-commit-ci[bot] committed Feb 17, 2022
1 parent 9b8df69 commit e490ee7
Show file tree
Hide file tree
Showing 38 changed files with 134 additions and 74 deletions.
6 changes: 3 additions & 3 deletions tests/benchmarks/test_basic_parity.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def _hook_memory():
return used_memory


def measure_loops(cls_model, kind, num_runs: int=10, num_epochs: int=10):
def measure_loops(cls_model, kind, num_runs: int = 10, num_epochs: int = 10):
"""Returns an array with the last loss from each epoch for each run."""
hist_losses = []
hist_durations = []
Expand Down Expand Up @@ -116,7 +116,7 @@ def measure_loops(cls_model, kind, num_runs: int=10, num_epochs: int=10):
return {"losses": hist_losses, "durations": hist_durations, "memory": hist_memory}


def vanilla_loop(cls_model, idx: Optional[int], device_type: str = "cuda", num_epochs: int=10):
def vanilla_loop(cls_model, idx: Optional[int], device_type: str = "cuda", num_epochs: int = 10):
device = torch.device(device_type)
# set seed
seed_everything(idx)
Expand Down Expand Up @@ -148,7 +148,7 @@ def vanilla_loop(cls_model, idx: Optional[int], device_type: str = "cuda", num_e
return epoch_losses[-1], _hook_memory()


def lightning_loop(cls_model, idx: Optional[int], device_type: str = "cuda", num_epochs: int=10):
def lightning_loop(cls_model, idx: Optional[int], device_type: str = "cuda", num_epochs: int = 10):
seed_everything(idx)
torch.backends.cudnn.deterministic = True

Expand Down
4 changes: 3 additions & 1 deletion tests/callbacks/test_early_stopping.py
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,9 @@ def validation_epoch_end(self, outputs):


@pytest.mark.parametrize("step_freeze, min_steps, min_epochs", [(5, 1, 1), (5, 1, 3), (3, 15, 1)])
def test_min_steps_override_early_stopping_functionality(tmpdir, step_freeze: int, min_steps: int, min_epochs: int) -> None:
def test_min_steps_override_early_stopping_functionality(
tmpdir, step_freeze: int, min_steps: int, min_epochs: int
) -> None:
"""Excepted Behaviour: IF `min_steps` was set to a higher value than the `trainer.global_step` when
`early_stopping` is being triggered, THEN the trainer should continue until reaching `trainer.global_step` ==
`min_steps`, and stop.
Expand Down
12 changes: 6 additions & 6 deletions tests/callbacks/test_pruning.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,13 +59,13 @@ def apply(cls, module, name, amount):

def train_with_pruning_callback(
tmpdir,
parameters_to_prune: bool=False,
use_global_unstructured: bool=False,
pruning_fn: str="l1_unstructured",
use_lottery_ticket_hypothesis: bool=False,
parameters_to_prune: bool = False,
use_global_unstructured: bool = False,
pruning_fn: str = "l1_unstructured",
use_lottery_ticket_hypothesis: bool = False,
strategy=None,
accelerator: str="cpu",
devices: int=1,
accelerator: str = "cpu",
devices: int = 1,
) -> None:
model = TestModel()

Expand Down
10 changes: 5 additions & 5 deletions tests/callbacks/test_stochastic_weight_avg.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,12 +112,12 @@ def on_train_end(self, trainer: Trainer, pl_module: LightningModule) -> None:

def train_with_swa(
tmpdir,
batchnorm: bool=True,
batchnorm: bool = True,
strategy=None,
accelerator: str="cpu",
devices: int=1,
interval: str="epoch",
iterable_dataset: bool=False,
accelerator: str = "cpu",
devices: int = 1,
interval: str = "epoch",
iterable_dataset: bool = False,
) -> None:
model = SwaTestModel(batchnorm=batchnorm, interval=interval, iterable_dataset=iterable_dataset)
swa_start = 2
Expand Down
4 changes: 3 additions & 1 deletion tests/checkpointing/test_checkpoint_callback_frequency.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,9 @@ def test_default_checkpoint_freq(save_mock, tmpdir, epochs: int, val_check_inter
["k", "epochs", "val_check_interval", "expected"], [(1, 1, 1.0, 1), (2, 2, 1.0, 2), (2, 1, 0.25, 4), (2, 2, 0.3, 6)]
)
@pytest.mark.parametrize("save_last", (False, True))
def test_top_k(save_mock, tmpdir, k: int, epochs: int, val_check_interval: float, expected: int, save_last: bool) -> None:
def test_top_k(
save_mock, tmpdir, k: int, epochs: int, val_check_interval: float, expected: int, save_last: bool
) -> None:
class TestModel(BoringModel):
def __init__(self):
super().__init__()
Expand Down
2 changes: 1 addition & 1 deletion tests/core/test_metric_result_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -371,7 +371,7 @@ def __repr__(self) -> str:
return f"{self.__class__.__name__}(sum={self.sum}, count={self.count})"


def result_collection_reload(accelerator: str="auto", devices: int=1, **kwargs) -> None:
def result_collection_reload(accelerator: str = "auto", devices: int = 1, **kwargs) -> None:
"""This test is going to validate _ResultCollection is properly being reload and final accumulation with Fault
Tolerant Training is correct."""

Expand Down
7 changes: 5 additions & 2 deletions tests/helpers/advanced_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,8 @@ class BasicGAN(LightningModule):
"""Implements a basic GAN for the purpose of illustrating multiple optimizers."""

def __init__(
self, hidden_dim: int = 128, learning_rate: float = 0.001, b1: float = 0.5, b2: float = 0.999, **kwargs) -> None:
self, hidden_dim: int = 128, learning_rate: float = 0.001, b1: float = 0.5, b2: float = 0.999, **kwargs
) -> None:
super().__init__()
self.hidden_dim = hidden_dim
self.learning_rate = learning_rate
Expand Down Expand Up @@ -218,7 +219,9 @@ def train_dataloader(self):


class ParityModuleCIFAR(LightningModule):
def __init__(self, backbone: str="resnet101", hidden_dim: int=1024, learning_rate: float=1e-3, pretrained: bool=True) -> None:
def __init__(
self, backbone: str = "resnet101", hidden_dim: int = 1024, learning_rate: float = 1e-3, pretrained: bool = True
) -> None:
super().__init__()
self.save_hyperparameters()

Expand Down
4 changes: 2 additions & 2 deletions tests/helpers/datamodules.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,15 +101,15 @@ def sample(self):


class ClassifDataModule(SklearnDataModule):
def __init__(self, num_features: int=32, length: int=800, num_classes: int=3, batch_size: int=10) -> None:
def __init__(self, num_features: int = 32, length: int = 800, num_classes: int = 3, batch_size: int = 10) -> None:
data = make_classification(
n_samples=length, n_features=num_features, n_classes=num_classes, n_clusters_per_class=1, random_state=42
)
super().__init__(data, x_type=torch.float32, y_type=torch.long, batch_size=batch_size)


class RegressDataModule(SklearnDataModule):
def __init__(self, num_features: int=16, length: int=800, batch_size: int=10) -> None:
def __init__(self, num_features: int = 16, length: int = 800, batch_size: int = 10) -> None:
x, y = make_regression(n_samples=length, n_features=num_features, random_state=42)
y = [[v] for v in y]
super().__init__((x, y), x_type=torch.float32, y_type=torch.float32, batch_size=batch_size)
5 changes: 3 additions & 2 deletions tests/helpers/datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,8 @@ class MNIST(Dataset):
cache_folder_name = "complete"

def __init__(
self, root: str, train: bool = True, normalize: tuple = (0.1307, 0.3081), download: bool = True, **kwargs) -> None:
self, root: str, train: bool = True, normalize: tuple = (0.1307, 0.3081), download: bool = True, **kwargs
) -> None:
super().__init__()
self.root = root
self.train = train # training set or test set
Expand Down Expand Up @@ -185,7 +186,7 @@ def _download(self, data_folder: str) -> None:


class AverageDataset(Dataset):
def __init__(self, dataset_len: int=300, sequence_len: int=100) -> None:
def __init__(self, dataset_len: int = 300, sequence_len: int = 100) -> None:
self.dataset_len = dataset_len
self.sequence_len = sequence_len
self.input_seq = torch.randn(dataset_len, sequence_len, 10)
Expand Down
2 changes: 1 addition & 1 deletion tests/helpers/deterministic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def step(self, batch, batch_idx):

return out

def count_num_graphs(self, result, num_graphs: int=0):
def count_num_graphs(self, result, num_graphs: int = 0):
for k, v in result.items():
if isinstance(v, torch.Tensor) and v.grad_fn is not None:
num_graphs += 1
Expand Down
2 changes: 1 addition & 1 deletion tests/helpers/pipelines.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def run_model_test(


@torch.no_grad()
def run_model_prediction(trained_model, dataloader, min_acc: float=0.50) -> None:
def run_model_prediction(trained_model, dataloader, min_acc: float = 0.50) -> None:
orig_device = trained_model.device
# run prediction on 1 batch
trained_model.cpu()
Expand Down
2 changes: 1 addition & 1 deletion tests/helpers/simple_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@


class ClassificationModel(LightningModule):
def __init__(self, lr: float=0.01) -> None:
def __init__(self, lr: float = 0.01) -> None:
super().__init__()

self.lr = lr
Expand Down
4 changes: 2 additions & 2 deletions tests/helpers/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,13 +60,13 @@ def load_model_from_checkpoint(logger, root_weights_dir, module_class: Type[Bori
return trained_model


def assert_ok_model_acc(trainer, key: str="test_acc", thr: float=0.5) -> None:
def assert_ok_model_acc(trainer, key: str = "test_acc", thr: float = 0.5) -> None:
# this model should get 0.80+ acc
acc = trainer.callback_metrics[key]
assert acc > thr, f"Model failed to get expected {thr} accuracy. {key} = {acc}"


def reset_seed(seed: Optional[int]=0) -> None:
def reset_seed(seed: Optional[int] = 0) -> None:
seed_everything(seed)


Expand Down
2 changes: 1 addition & 1 deletion tests/loops/batch/test_truncated_bptt.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
class LSTMModel(LightningModule):
"""LSTM sequence-to-sequence model for testing TBPTT with automatic optimization."""

def __init__(self, truncated_bptt_steps: int=2, input_size: int=1, hidden_size: int=8) -> None:
def __init__(self, truncated_bptt_steps: int = 2, input_size: int = 1, hidden_size: int = 8) -> None:
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
Expand Down
16 changes: 12 additions & 4 deletions tests/loops/epoch/test_training_epoch_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,9 @@
),
],
)
def test_prepare_outputs_training_epoch_end_automatic(num_optimizers: int, batch_outputs: List[List[Union[Dict[int, Dict[str, Any]], Dict[str, Any]]]], expected) -> None:
def test_prepare_outputs_training_epoch_end_automatic(
num_optimizers: int, batch_outputs: List[List[Union[Dict[int, Dict[str, Any]], Dict[str, Any]]]], expected
) -> None:
"""Test that the loop converts the nested lists of outputs to the format that the `training_epoch_end` hook
currently expects in the case of automatic optimization."""
prepared = TrainingEpochLoop._prepare_outputs_training_epoch_end(
Expand All @@ -89,7 +91,9 @@ def test_prepare_outputs_training_epoch_end_automatic(num_optimizers: int, batch
([[_out00, _out01], [_out02, _out03], [], [_out10]], [[_out00, _out01], [_out02, _out03], [_out10]]),
],
)
def test_prepare_outputs_training_epoch_end_manual(batch_outputs: List[List[Union[Dict[int, Dict[str, Any]], Dict[str, Any]]]], expected) -> None:
def test_prepare_outputs_training_epoch_end_manual(
batch_outputs: List[List[Union[Dict[int, Dict[str, Any]], Dict[str, Any]]]], expected
) -> None:
"""Test that the loop converts the nested lists of outputs to the format that the `training_epoch_end` hook
currently expects in the case of manual optimization."""
prepared = TrainingEpochLoop._prepare_outputs_training_epoch_end(
Expand All @@ -115,7 +119,9 @@ def test_prepare_outputs_training_epoch_end_manual(batch_outputs: List[List[Unio
(2, [{0: _out00, 1: _out01}, {0: _out10, 1: _out11}], [[_out00, _out10], [_out01, _out11]]),
],
)
def test_prepare_outputs_training_batch_end_automatic(num_optimizers: int, batch_end_outputs: List[Union[Dict[int, Dict[str, Any]], Dict[str, Any]]], expected) -> None:
def test_prepare_outputs_training_batch_end_automatic(
num_optimizers: int, batch_end_outputs: List[Union[Dict[int, Dict[str, Any]], Dict[str, Any]]], expected
) -> None:
"""Test that the loop converts the nested lists of outputs to the format that the `on_train_batch_end` hook
currently expects in the case of automatic optimization."""
prepared = TrainingEpochLoop._prepare_outputs_training_batch_end(
Expand All @@ -137,7 +143,9 @@ def test_prepare_outputs_training_batch_end_automatic(num_optimizers: int, batch
([_out00, _out01, None, _out03], [_out00, _out01, _out03]),
],
)
def test_prepare_outputs_training_batch_end_manual(batch_end_outputs: List[Union[Dict[int, Dict[str, Any]], Dict[str, Any]]], expected) -> None:
def test_prepare_outputs_training_batch_end_manual(
batch_end_outputs: List[Union[Dict[int, Dict[str, Any]], Dict[str, Any]]], expected
) -> None:
"""Test that the loop converts the nested lists of outputs to the format that the `on_train_batch_end` hook
currently expects in the case of manual optimization."""
prepared = TrainingEpochLoop._prepare_outputs_training_batch_end(
Expand Down
4 changes: 3 additions & 1 deletion tests/loops/optimization/test_optimizer_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,9 @@ class CustomException(Exception):
@pytest.mark.parametrize("stop_epoch", (0, 1))
@pytest.mark.parametrize("stop_batch", (0, 1, 2))
@pytest.mark.parametrize("n_optimizers,stop_optimizer", [(2, 0), (2, 1), (3, 2)])
def test_loop_restart_progress_multiple_optimizers(tmpdir, n_optimizers, stop_optimizer, stop_epoch, stop_batch) -> None:
def test_loop_restart_progress_multiple_optimizers(
tmpdir, n_optimizers, stop_optimizer, stop_epoch, stop_batch
) -> None:
"""Test that Lightning can resume from a point where a training_step failed while in the middle of processing
several optimizer steps for one batch.
Expand Down
8 changes: 6 additions & 2 deletions tests/loops/test_loops.py
Original file line number Diff line number Diff line change
Expand Up @@ -317,7 +317,9 @@ def on_load_checkpoint(self, state_dict: Dict) -> None:
@pytest.mark.parametrize("stop_epoch", (1, 2))
@pytest.mark.parametrize("stop_batch", (1, 2))
@pytest.mark.parametrize("n_dataloaders,stop_dataloader", [(2, 0), (2, 1), (3, 2)])
def test_loop_restart_progress_multiple_dataloaders(tmpdir, n_dataloaders, stop_dataloader, stop_epoch, stop_batch: int) -> None:
def test_loop_restart_progress_multiple_dataloaders(
tmpdir, n_dataloaders, stop_dataloader, stop_epoch, stop_batch: int
) -> None:
n_batches = 5
n_epochs = 3

Expand Down Expand Up @@ -388,7 +390,9 @@ def val_dataloader(self):
@pytest.mark.parametrize("stop_epoch", (1, 2))
@pytest.mark.parametrize("stop_batch", (1, 2))
@pytest.mark.parametrize("stop_optimizer", (1, 2))
def test_loop_state_on_exception(accumulate_grad_batches, stop_epoch, stop_batch, stop_optimizer: int, n_optimizers: int, tmpdir) -> None:
def test_loop_state_on_exception(
accumulate_grad_batches, stop_epoch, stop_batch, stop_optimizer: int, n_optimizers: int, tmpdir
) -> None:
stop_optimizer = stop_optimizer if stop_optimizer < n_optimizers else 0
n_epochs = 3
n_batches = 3
Expand Down
2 changes: 1 addition & 1 deletion tests/models/data/horovod/train_default_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@
parser.add_argument("--on-gpu", action="store_true", default=False)


def run_test_from_config(trainer_options, on_gpu, check_size: bool=True) -> None:
def run_test_from_config(trainer_options, on_gpu, check_size: bool = True) -> None:
"""Trains the default model with the given config."""
set_random_main_port()
reset_seed()
Expand Down
2 changes: 1 addition & 1 deletion tests/models/test_amp.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def test_step(self, batch, batch_idx) -> Dict[str, Any]:
output = self._step(batch)
return {"y": output}

def predict_step(self, batch, batch_idx: int, dataloader_idx: int=0):
def predict_step(self, batch, batch_idx: int, dataloader_idx: int = 0):
self._assert_autocast_enabled()
output = self(batch)
is_bfloat16 = self.trainer.precision_plugin.precision == "bf16"
Expand Down
8 changes: 6 additions & 2 deletions tests/models/test_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,9 @@ def test_parse_gpu_fail_on_non_existent_id_2(mocked_device_count) -> None:


@pytest.mark.parametrize("gpus", [-1, "-1"])
def test_parse_gpu_returns_none_when_no_devices_are_available(mocked_device_count_0, gpus: Union[None, List[int], int, str]) -> None:
def test_parse_gpu_returns_none_when_no_devices_are_available(
mocked_device_count_0, gpus: Union[None, List[int], int, str]
) -> None:
with pytest.raises(MisconfigurationException):
device_parser.parse_gpu_ids(gpus)

Expand All @@ -238,7 +240,9 @@ def test_parse_gpu_returns_none_when_no_devices_are_available(mocked_device_coun
@mock.patch("torch.cuda.device_count", return_value=1)
@mock.patch("torch.cuda.is_available", return_value=True)
@pytest.mark.parametrize("gpus", [[0, 1, 2], 2, "0"])
def test_torchelastic_gpu_parsing(mocked_device_count, mocked_is_available, gpus: Union[None, List[int], int, str]) -> None:
def test_torchelastic_gpu_parsing(
mocked_device_count, mocked_is_available, gpus: Union[None, List[int], int, str]
) -> None:
"""Ensure when using torchelastic and nproc_per_node is set to the default of 1 per GPU device That we omit
sanitizing the gpus as only one of the GPUs is visible."""
trainer = Trainer(gpus=gpus)
Expand Down
2 changes: 1 addition & 1 deletion tests/models/test_grad_norm.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def on_after_backward(self) -> None:


@pytest.mark.parametrize("norm_type", [1.0, 1.25, 2, 3, 5, 10, "inf"])
def test_grad_tracking(tmpdir, norm_type, rtol: float=5e-3) -> None:
def test_grad_tracking(tmpdir, norm_type, rtol: float = 5e-3) -> None:
# rtol=5e-3 respects the 3 decimals rounding in `.grad_norms` and above
reset_seed()

Expand Down
3 changes: 2 additions & 1 deletion tests/models/test_hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,8 @@ def _train_batch(self, *args, **kwargs):

@staticmethod
def _auto_train_batch(
trainer, model, batches, device=torch.device("cpu"), current_epoch: int=0, current_batch: int=0, **kwargs):
trainer, model, batches, device=torch.device("cpu"), current_epoch: int = 0, current_batch: int = 0, **kwargs
):
using_native_amp = kwargs.get("amp_backend") == "native"
using_deepspeed = kwargs.get("strategy") == "deepspeed"
out = []
Expand Down
2 changes: 1 addition & 1 deletion tests/models/test_horovod.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@
TEST_SCRIPT = os.path.join(os.path.dirname(__file__), "data", "horovod", "train_default_model.py")


def _run_horovod(trainer_options, on_gpu: bool=False) -> None:
def _run_horovod(trainer_options, on_gpu: bool = False) -> None:
"""Execute the training script across multiple workers in parallel."""
num_processes = trainer_options.get("gpus", 2)
# for Horovod, we interpret `gpus` to be set per worker
Expand Down
2 changes: 1 addition & 1 deletion tests/models/test_restore.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def on_train_end(self, trainer: Trainer, pl_module: LightningModule) -> None:


class ValTestLossBoringModel(BoringModel):
def __init__(self, batch_size: int=4) -> None:
def __init__(self, batch_size: int = 4) -> None:
super().__init__()
self.save_hyperparameters()

Expand Down
2 changes: 1 addition & 1 deletion tests/models/test_sync_batchnorm.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@


class SyncBNModule(LightningModule):
def __init__(self, gpu_count: int=1, **kwargs) -> None:
def __init__(self, gpu_count: int = 1, **kwargs) -> None:
super().__init__()

self.gpu_count = gpu_count
Expand Down
4 changes: 2 additions & 2 deletions tests/plugins/test_double_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def test_step(self, batch, batch_idx) -> Dict[str, Any]:
loss = self.loss(batch, output)
return {"y": loss}

def predict_step(self, batch, batch_idx: int, dataloader_idx: int=0):
def predict_step(self, batch, batch_idx: int, dataloader_idx: int = 0):
assert batch.dtype == torch.float64
assert torch.tensor([0.0]).dtype == torch.float64
assert torch.tensor([0.0], dtype=torch.float16).dtype == torch.float16
Expand Down Expand Up @@ -111,7 +111,7 @@ def test_step(self, batch, batch_idx) -> Dict[str, Any]:
loss = self.loss(batch, output)
return {"y": loss}

def predict_step(self, batch, batch_idx: int, dataloader_idx: int=0):
def predict_step(self, batch, batch_idx: int, dataloader_idx: int = 0):
assert batch.dtype == torch.float64
output = self.layer(batch)
assert output.dtype == torch.float64
Expand Down
Loading

0 comments on commit e490ee7

Please sign in to comment.