diff --git a/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py b/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py index 3239b507a8172f..2c23409ea9eeed 100644 --- a/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py +++ b/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py @@ -23,6 +23,7 @@ from transformers import DonutProcessor, NougatProcessor, TrOCRProcessor from transformers.testing_utils import ( + _run_slow_tests, require_levenshtein, require_nltk, require_sentencepiece, @@ -561,6 +562,29 @@ def prepare_config_and_inputs(self): "labels": decoder_token_labels, } + def check_encoder_decoder_model_output_attentions( + self, + config, + decoder_config, + decoder_input_ids, + decoder_attention_mask, + labels=None, + pixel_values=None, + **kwargs, + ): + if not _run_slow_tests: + return + + super().check_encoder_decoder_model_output_attentions( + config, + decoder_config, + decoder_input_ids, + decoder_attention_mask, + labels=None, + pixel_values=None, + **kwargs, + ) + @require_torch class Swin2BartModelTest(EncoderDecoderMixin, unittest.TestCase): @@ -677,6 +701,29 @@ def prepare_config_and_inputs(self): "labels": decoder_input_ids, } + def check_encoder_decoder_model_output_attentions( + self, + config, + decoder_config, + decoder_input_ids, + decoder_attention_mask, + labels=None, + pixel_values=None, + **kwargs, + ): + if not _run_slow_tests: + return + + super().check_encoder_decoder_model_output_attentions( + config, + decoder_config, + decoder_input_ids, + decoder_attention_mask, + labels=None, + pixel_values=None, + **kwargs, + ) + # there are no published pretrained TrOCR checkpoints for now def test_real_model_save_load_from_pretrained(self): pass