diff --git a/tests/models/videomae/test_modeling_videomae.py b/tests/models/videomae/test_modeling_videomae.py index 1f1cabe55ffbfd..22e0e734ebd988 100644 --- a/tests/models/videomae/test_modeling_videomae.py +++ b/tests/models/videomae/test_modeling_videomae.py @@ -197,9 +197,7 @@ def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch mask = torch.ones((self.model_tester.num_masks,)) - mask = torch.cat( - [mask, torch.zeros(self.model_tester.seq_length - mask.size(0))] - ) + mask = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0))]) batch_size = inputs_dict["pixel_values"].shape[0] bool_masked_pos = mask.expand(batch_size, -1).bool() inputs_dict["bool_masked_pos"] = bool_masked_pos.to(torch_device) diff --git a/tests/models/vit_mae/test_modeling_tf_vit_mae.py b/tests/models/vit_mae/test_modeling_tf_vit_mae.py index 6a77e95102c969..c7a7bbf4702b6c 100644 --- a/tests/models/vit_mae/test_modeling_tf_vit_mae.py +++ b/tests/models/vit_mae/test_modeling_tf_vit_mae.py @@ -172,6 +172,7 @@ class TFViTMAEModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCa test_onnx = False test_resize_embeddings = False test_head_masking = False + has_attentions = False def setUp(self): self.model_tester = TFViTMAEModelTester(self)