Skip to content

Commit

Permalink
use trainer.data_parallel_device_ids in logger_connector
Browse files Browse the repository at this point in the history
  • Loading branch information
DuYicong515 committed Feb 23, 2022
1 parent d1081af commit c48500e
Showing 1 changed file with 1 addition and 8 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
from pytorch_lightning.accelerators import GPUAccelerator
from pytorch_lightning.loggers import LightningLoggerBase, LoggerCollection, TensorBoardLogger
from pytorch_lightning.plugins.environments.slurm_environment import SLURMEnvironment
from pytorch_lightning.strategies import ParallelStrategy, SingleDeviceStrategy
from pytorch_lightning.trainer.connectors.logger_connector.result import _METRICS, _OUT_DICT, _PBAR_DICT
from pytorch_lightning.trainer.states import RunningStage
from pytorch_lightning.utilities import memory
Expand Down Expand Up @@ -224,13 +223,7 @@ def _log_gpus_metrics(self) -> None:
self.trainer.lightning_module.log(key, mem, prog_bar=False, logger=True)
else:
gpu_id = int(key.split("/")[0].split(":")[1])
parallel_device_ids = []
if isinstance(self.trainer.accelerator, GPUAccelerator):
if isinstance(self.trainer.strategy, ParallelStrategy):
parallel_device_ids = [i for i in range(len(self.trainer.strategy.parallel_devices))]
elif isinstance(self.strategy, SingleDeviceStrategy):
parallel_device_ids = [0]
if gpu_id in parallel_device_ids:
if gpu_id in self.trainer.data_parallel_device_ids:
self.trainer.lightning_module.log(
key, mem, prog_bar=False, logger=True, on_step=True, on_epoch=False
)
Expand Down

0 comments on commit c48500e

Please sign in to comment.