diff --git a/src/transformers/generation/utils.py b/src/transformers/generation/utils.py index 1982841df66733..5ef0c0eb81c87a 100644 --- a/src/transformers/generation/utils.py +++ b/src/transformers/generation/utils.py @@ -1866,8 +1866,8 @@ def _tensor_or_none(token, device=None): "The attention mask and the pad token id were not set. As a consequence, you may observe " "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results." ) - logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{pad_token_tensor} for open-end generation.") pad_token_tensor = eos_token_tensor[0] + logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{pad_token_tensor} for open-end generation.") # Sanity checks/warnings if self.config.is_encoder_decoder and decoder_start_token_tensor is None: