From 31c68d107efb43bf87dc26179a8d5f26fc1bc4a0 Mon Sep 17 00:00:00 2001 From: DuYicong515 Date: Mon, 21 Mar 2022 10:06:39 -0700 Subject: [PATCH] Remove `AcceleratorConnector.num_gpus` and deprecate `Trainer.num_gpus` (#12384) --- CHANGELOG.md | 8 +++- .../connectors/accelerator_connector.py | 6 --- pytorch_lightning/trainer/trainer.py | 6 ++- tests/deprecated_api/test_remove_1-8.py | 37 +++++++++++++++++++ tests/models/test_gpu.py | 26 ------------- tests/trainer/test_trainer.py | 6 ++- 6 files changed, 53 insertions(+), 36 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0c1ed46c2f351..604a3396dee2b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -527,7 +527,10 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Deprecated `Trainer.devices` in favor of `Trainer.num_devices` and `Trainer.device_ids` ([#12151](https://github.com/PyTorchLightning/pytorch-lightning/pull/12151)) -- Deprecated `Trainer.root_gpu` in favor of `Trainer.strategy.root_device.index` when GPU is used. ([#12262](https://github.com/PyTorchLightning/pytorch-lightning/pull/12262)) +- Deprecated `Trainer.root_gpu` in favor of `Trainer.strategy.root_device.index` when GPU is used ([#12262](https://github.com/PyTorchLightning/pytorch-lightning/pull/12262)) + + +- Deprecated `Trainer.num_gpus` in favor of `Trainer.num_devices` when GPU is used ([#12384](https://github.com/PyTorchLightning/pytorch-lightning/pull/12384)) ### Removed @@ -720,6 +723,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Removed `AcceleratorConnector.root_gpu` property ([#12262](https://github.com/PyTorchLightning/pytorch-lightning/pull/12262)) +- Removed `AcceleratorConnector.num_gpus` property ([#12384](https://github.com/PyTorchLightning/pytorch-lightning/pull/12384)) + + ### Fixed - Fixed an issue where `ModelCheckpoint` could delete older checkpoints when `dirpath` has changed during resumed training ([#12045](https://github.com/PyTorchLightning/pytorch-lightning/pull/12045)) diff --git a/pytorch_lightning/trainer/connectors/accelerator_connector.py b/pytorch_lightning/trainer/connectors/accelerator_connector.py index 566bf68d784b1..aa9b25c64e28c 100644 --- a/pytorch_lightning/trainer/connectors/accelerator_connector.py +++ b/pytorch_lightning/trainer/connectors/accelerator_connector.py @@ -815,12 +815,6 @@ def num_ipus(self) -> int: return self.devices return 0 - @property - def num_gpus(self) -> int: - if isinstance(self.accelerator, GPUAccelerator): - return self.devices - return 0 - @property def gpus(self) -> Optional[Union[List[int], str, int]]: return self._gpus diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 0972d3c7db6a8..f718e0f64ee43 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -2071,7 +2071,11 @@ def ipus(self) -> int: @property def num_gpus(self) -> int: - return self._accelerator_connector.num_gpus + rank_zero_deprecation( + "`Trainer.num_gpus` was deprecated in v1.6 and will be removed in v1.8." + " Please use `Trainer.num_devices` instead." + ) + return self.num_devices if isinstance(self.accelerator, GPUAccelerator) else 0 @property def devices(self) -> int: diff --git a/tests/deprecated_api/test_remove_1-8.py b/tests/deprecated_api/test_remove_1-8.py index 1b1fd6f01576a..035f76a748ee1 100644 --- a/tests/deprecated_api/test_remove_1-8.py +++ b/tests/deprecated_api/test_remove_1-8.py @@ -925,3 +925,40 @@ def test_root_gpu_property_0_passing(monkeypatch, gpus, expected_root_gpu, strat "Please use `Trainer.strategy.root_device.index` instead." ): assert Trainer(gpus=gpus, strategy=strategy).root_gpu == expected_root_gpu + + +@pytest.mark.parametrize( + ["gpus", "expected_num_gpus", "strategy"], + [ + pytest.param(None, 0, None, id="None - expect 0 gpu to use."), + pytest.param(0, 0, None, id="Oth gpu, expect 1 gpu to use."), + pytest.param(1, 1, None, id="1st gpu, expect 1 gpu to use."), + pytest.param(-1, 16, "ddp", id="-1 - use all gpus"), + pytest.param("-1", 16, "ddp", id="'-1' - use all gpus"), + pytest.param(3, 3, "ddp", id="3rd gpu - 1 gpu to use (backend:ddp)"), + ], +) +def test_trainer_gpu_parse(monkeypatch, gpus, expected_num_gpus, strategy): + monkeypatch.setattr(torch.cuda, "is_available", lambda: True) + monkeypatch.setattr(torch.cuda, "device_count", lambda: 16) + with pytest.deprecated_call( + match="`Trainer.num_gpus` was deprecated in v1.6 and will be removed in v1.8." + " Please use `Trainer.num_devices` instead." + ): + assert Trainer(gpus=gpus, strategy=strategy).num_gpus == expected_num_gpus + + +@pytest.mark.parametrize( + ["gpus", "expected_num_gpus", "strategy"], + [ + pytest.param(None, 0, None, id="None - expect 0 gpu to use."), + pytest.param(None, 0, "ddp", id="None - expect 0 gpu to use."), + ], +) +def test_trainer_num_gpu_0(monkeypatch, gpus, expected_num_gpus, strategy): + monkeypatch.setattr(torch.cuda, "device_count", lambda: 0) + with pytest.deprecated_call( + match="`Trainer.num_gpus` was deprecated in v1.6 and will be removed in v1.8." + " Please use `Trainer.num_devices` instead." + ): + assert Trainer(gpus=gpus, strategy=strategy).num_gpus == expected_num_gpus diff --git a/tests/models/test_gpu.py b/tests/models/test_gpu.py index f5acffc4c71da..719cfb43024b1 100644 --- a/tests/models/test_gpu.py +++ b/tests/models/test_gpu.py @@ -92,32 +92,6 @@ def device_count(): monkeypatch.setattr(torch.cuda, "device_count", device_count) -@pytest.mark.parametrize( - ["gpus", "expected_num_gpus", "strategy"], - [ - pytest.param(None, 0, None, id="None - expect 0 gpu to use."), - pytest.param(0, 0, None, id="Oth gpu, expect 1 gpu to use."), - pytest.param(1, 1, None, id="1st gpu, expect 1 gpu to use."), - pytest.param(-1, PRETEND_N_OF_GPUS, "ddp", id="-1 - use all gpus"), - pytest.param("-1", PRETEND_N_OF_GPUS, "ddp", id="'-1' - use all gpus"), - pytest.param(3, 3, "ddp", id="3rd gpu - 1 gpu to use (backend:ddp)"), - ], -) -def test_trainer_gpu_parse(mocked_device_count, gpus, expected_num_gpus, strategy): - assert Trainer(gpus=gpus, strategy=strategy).num_gpus == expected_num_gpus - - -@pytest.mark.parametrize( - ["gpus", "expected_num_gpus", "strategy"], - [ - pytest.param(None, 0, None, id="None - expect 0 gpu to use."), - pytest.param(None, 0, "ddp", id="None - expect 0 gpu to use."), - ], -) -def test_trainer_num_gpu_0(mocked_device_count_0, gpus, expected_num_gpus, strategy): - assert Trainer(gpus=gpus, strategy=strategy).num_gpus == expected_num_gpus - - # Asking for a gpu when non are available will result in a MisconfigurationException @pytest.mark.parametrize( ["gpus", "expected_root_gpu", "strategy"], diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 7e44d85ae7ea0..b6980fc860a20 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -1222,7 +1222,8 @@ def test_trainer_config_accelerator( assert isinstance(trainer.strategy, strategy_cls) assert strategy_cls.strategy_name == strategy_name assert isinstance(trainer.accelerator, accelerator_cls) - assert trainer.num_gpus == num_gpus + trainer_num_gpus = trainer.num_devices if isinstance(trainer.accelerator, GPUAccelerator) else 0 + assert trainer_num_gpus == num_gpus def test_trainer_subclassing(): @@ -2097,7 +2098,8 @@ def test_trainer_config_strategy(monkeypatch, trainer_kwargs, strategy_cls, stra assert isinstance(trainer.strategy, strategy_cls) assert strategy_cls.strategy_name == strategy_name assert isinstance(trainer.accelerator, accelerator_cls) - assert trainer.num_gpus == num_gpus + trainer_num_gpus = trainer.num_devices if isinstance(trainer.accelerator, GPUAccelerator) else 0 + assert trainer_num_gpus == num_gpus assert trainer.num_nodes == trainer_kwargs.get("num_nodes", 1)