From c4d9fb80f4562d93e52269cb69988ddf832c3ac4 Mon Sep 17 00:00:00 2001 From: Siyu Wang Date: Tue, 16 Nov 2021 16:25:31 -0800 Subject: [PATCH] fix make doc and update precision reference --- pytorch_lightning/accelerators/accelerator.py | 7 +++---- pytorch_lightning/plugins/training_type/ipu.py | 2 +- pytorch_lightning/plugins/training_type/sharded.py | 2 +- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/pytorch_lightning/accelerators/accelerator.py b/pytorch_lightning/accelerators/accelerator.py index 676985e541e3b5..eb69180d464e57 100644 --- a/pytorch_lightning/accelerators/accelerator.py +++ b/pytorch_lightning/accelerators/accelerator.py @@ -280,10 +280,9 @@ def amp_backend(self) -> Optional[LightningEnum]: @property def precision(self) -> Union[str, int]: - """ - .. deprecated - This method is deprecated will be removed soon. - Use :`training_type_plugin.precision_plugin.precision` instead. + """This method is deprecated and will be removed soon. + + Use `training_type_plugin.precision_plugin.precision` instead. """ rank_zero_deprecation( f"`{self.__class__.__name__}.precision` was and will be removed soon" diff --git a/pytorch_lightning/plugins/training_type/ipu.py b/pytorch_lightning/plugins/training_type/ipu.py index 26e2f381e63002..78a21980624bb4 100644 --- a/pytorch_lightning/plugins/training_type/ipu.py +++ b/pytorch_lightning/plugins/training_type/ipu.py @@ -118,7 +118,7 @@ def setup(self) -> None: self.lightning_module.trainer._update_dataloader = self._convert_to_poptorch_loader def pre_dispatch(self) -> None: - model = LightningIPUModule(self.lightning_module, self.precision) + model = LightningIPUModule(self.lightning_module, self.precision_plugin.precision) self.model = model # reset the backup diff --git a/pytorch_lightning/plugins/training_type/sharded.py b/pytorch_lightning/plugins/training_type/sharded.py index 475d1e44095d88..eb4cb485347086 100644 --- a/pytorch_lightning/plugins/training_type/sharded.py +++ b/pytorch_lightning/plugins/training_type/sharded.py @@ -75,7 +75,7 @@ def _reinit_optimizers_with_oss(self, optimizers: List[Union[Optimizer, Lightnin optim_class = type(optimizer) zero_optimizer = OSS(params=optimizer.param_groups, optim=optim_class, **optimizer.defaults) if _FAIRSCALE_OSS_FP16_BROADCAST_AVAILABLE: - precision = self._precision or self.precision + precision = self._precision or self.precision_plugin.precision is_fp16 = precision in ("mixed", 16) # For multi-node training, compressing the model shards in fp16 before broadcasting # improves performance. When using PyTorch AMP, it will not degrade