diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 5c72b155b03431..678f065007f81e 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -2320,7 +2320,10 @@ def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch, ignore_keys_for # Run delayed LR scheduler now that metrics are populated if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau): - self.lr_scheduler.step(metrics[self.args.metric_for_best_model]) + metric_to_check = self.args.metric_for_best_model + if not metric_to_check.startswith("eval_"): + metric_to_check = f"eval_{metric_to_check}" + self.lr_scheduler.step(metrics[metric_to_check]) if self.control.should_save: self._save_checkpoint(model, trial, metrics=metrics)