From aa77cc7b68fb6a476f2f7703543903a770c918f9 Mon Sep 17 00:00:00 2001 From: justusschock Date: Tue, 10 Nov 2020 10:28:41 +0100 Subject: [PATCH 1/6] Makes automatic optimization a model attribute --- pytorch_lightning/core/lightning.py | 8 ++++++++ pytorch_lightning/trainer/connectors/model_connector.py | 4 ++++ pytorch_lightning/trainer/trainer.py | 9 ++++++++- 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/pytorch_lightning/core/lightning.py b/pytorch_lightning/core/lightning.py index 3d38f65892983..479c542fb97d7 100644 --- a/pytorch_lightning/core/lightning.py +++ b/pytorch_lightning/core/lightning.py @@ -159,6 +159,14 @@ def on_gpu(self): """ return self.device.type == "cuda" + @property + def automatic_optimization(self) -> bool: + """ + If False you are responsible for calling .backward, .step, zero_grad. + Meant to be used with multiple optimizers by advanced users. + """ + return True + def print(self, *args, **kwargs) -> None: r""" Prints only from process 0. Use this in any distributed mode to log only once. diff --git a/pytorch_lightning/trainer/connectors/model_connector.py b/pytorch_lightning/trainer/connectors/model_connector.py index ca7fce6e2d77d..57b06700b9614 100644 --- a/pytorch_lightning/trainer/connectors/model_connector.py +++ b/pytorch_lightning/trainer/connectors/model_connector.py @@ -35,6 +35,10 @@ def copy_trainer_model_properties(self, model): else: ref_model = model + automatic_optimization = ref_model.automatic_optimization and self.trainer.train_loop.automatic_optimization + self.trainer.train_loop.automatic_optimization = automatic_optimization + ref_model.automatic_optimization = automatic_optimization + for m in [model, ref_model]: m.trainer = self.trainer m.logger = self.trainer.logger diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 2d4e2c0d9e4bd..3ad82502aada2 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -208,7 +208,8 @@ def __init__( log_every_n_steps: How often to log within steps (defaults to every 50 steps). automatic_optimization: If False you are responsible for calling .backward, .step, zero_grad. - Meant to be used with multiple optimizers by advanced users. + Meant to be used with multiple optimizers by advanced users. Passing bool + value is deprecated in v1.1 and will be removed in v1.3. prepare_data_per_node: If True, each LOCAL_RANK=0 will call prepare data. Otherwise only NODE_RANK=0, LOCAL_RANK=0 will prepare data @@ -346,6 +347,12 @@ def __init__( ) # init train loop related flags + # TODO: deprecate in 1.2.0 + if automatic_optimization is None: + automatic_optimization = True + else: + rank_zero_warn("Disable automatic optimization with the trainer flag is deprecated and will be removed in v1.3.0!" + "Please use the property on the LightningModule for disabling automatic optimization") self.train_loop.on_trainer_init( max_epochs, min_epochs, From 5aea8ee2344f3142c7d59f2911ec9ac8032ccfc6 Mon Sep 17 00:00:00 2001 From: Justus Schock <12886177+justusschock@users.noreply.github.com> Date: Tue, 10 Nov 2020 10:31:21 +0100 Subject: [PATCH 2/6] Update trainer.py --- pytorch_lightning/trainer/trainer.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 3ad82502aada2..1fa78f72f6eba 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -351,8 +351,10 @@ def __init__( if automatic_optimization is None: automatic_optimization = True else: - rank_zero_warn("Disable automatic optimization with the trainer flag is deprecated and will be removed in v1.3.0!" - "Please use the property on the LightningModule for disabling automatic optimization") + rank_zero_warn( + "Disable automatic optimization with the trainer flag is deprecated and will be removed in v1.3.0!" + "Please use the property on the LightningModule for disabling automatic optimization" + ) self.train_loop.on_trainer_init( max_epochs, min_epochs, From 6aacdf7f1d85fdb73745e2d7e625f11f817088bd Mon Sep 17 00:00:00 2001 From: justusschock Date: Tue, 10 Nov 2020 10:37:57 +0100 Subject: [PATCH 3/6] remove setting property in model --- pytorch_lightning/trainer/connectors/model_connector.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pytorch_lightning/trainer/connectors/model_connector.py b/pytorch_lightning/trainer/connectors/model_connector.py index 57b06700b9614..dbdceb1532288 100644 --- a/pytorch_lightning/trainer/connectors/model_connector.py +++ b/pytorch_lightning/trainer/connectors/model_connector.py @@ -37,7 +37,6 @@ def copy_trainer_model_properties(self, model): automatic_optimization = ref_model.automatic_optimization and self.trainer.train_loop.automatic_optimization self.trainer.train_loop.automatic_optimization = automatic_optimization - ref_model.automatic_optimization = automatic_optimization for m in [model, ref_model]: m.trainer = self.trainer From 7db6c1ba994e31b580d1e6b262fb229a264d9dee Mon Sep 17 00:00:00 2001 From: Justus Schock <12886177+justusschock@users.noreply.github.com> Date: Wed, 11 Nov 2020 08:23:59 +0100 Subject: [PATCH 4/6] Update pytorch_lightning/core/lightning.py Co-authored-by: Rohit Gupta --- pytorch_lightning/core/lightning.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pytorch_lightning/core/lightning.py b/pytorch_lightning/core/lightning.py index 4288c43abe1d1..c2110863efd52 100644 --- a/pytorch_lightning/core/lightning.py +++ b/pytorch_lightning/core/lightning.py @@ -164,7 +164,6 @@ def on_gpu(self): def automatic_optimization(self) -> bool: """ If False you are responsible for calling .backward, .step, zero_grad. - Meant to be used with multiple optimizers by advanced users. """ return True From 66cd7e06b964e951a83ebd16d048ac47c9e363ac Mon Sep 17 00:00:00 2001 From: Justus Schock <12886177+justusschock@users.noreply.github.com> Date: Wed, 11 Nov 2020 08:24:09 +0100 Subject: [PATCH 5/6] Update pytorch_lightning/trainer/trainer.py Co-authored-by: Rohit Gupta --- pytorch_lightning/trainer/trainer.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index dbdbe3ff99560..4cc6283625680 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -210,8 +210,9 @@ def __init__( log_every_n_steps: How often to log within steps (defaults to every 50 steps). automatic_optimization: If False you are responsible for calling .backward, .step, zero_grad. - Meant to be used with multiple optimizers by advanced users. Passing bool - value is deprecated in v1.1 and will be removed in v1.3. + If False you are responsible for calling .backward, .step, zero_grad in LightningModule. + This argument has been moved to LightningModule. It is deprecated here in v1.1 and + will be removed in v1.3. prepare_data_per_node: If True, each LOCAL_RANK=0 will call prepare data. Otherwise only NODE_RANK=0, LOCAL_RANK=0 will prepare data From 68393859aa1229646778ae4ff188bc5a0545a0e6 Mon Sep 17 00:00:00 2001 From: Justus Schock <12886177+justusschock@users.noreply.github.com> Date: Wed, 11 Nov 2020 08:25:09 +0100 Subject: [PATCH 6/6] Update trainer.py --- pytorch_lightning/trainer/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 4cc6283625680..10c73ebeebc5b 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -135,7 +135,7 @@ def __init__( amp_backend: str = 'native', amp_level: str = 'O2', distributed_backend: Optional[str] = None, - automatic_optimization: bool = True, + automatic_optimization: Optional[bool] = None, move_metrics_to_cpu: bool = False, ): r"""