diff --git a/tests/models/musicgen/test_modeling_musicgen.py b/tests/models/musicgen/test_modeling_musicgen.py index 772285316140..02ab3b538c26 100644 --- a/tests/models/musicgen/test_modeling_musicgen.py +++ b/tests/models/musicgen/test_modeling_musicgen.py @@ -502,7 +502,7 @@ def prepare_config_and_inputs_for_common(self): class MusicgenTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (MusicgenForConditionalGeneration,) if is_torch_available() else () greedy_sample_model_classes = (MusicgenForConditionalGeneration,) if is_torch_available() else () - pipeline_model_mapping = {} + pipeline_model_mapping = {"text-to-audio": MusicgenForConditionalGeneration} if is_torch_available() else {} test_pruning = False # training is not supported yet for MusicGen test_headmasking = False test_resize_embeddings = False diff --git a/tests/models/vits/test_modeling_vits.py b/tests/models/vits/test_modeling_vits.py index e767b036bea7..459a5587cfe8 100644 --- a/tests/models/vits/test_modeling_vits.py +++ b/tests/models/vits/test_modeling_vits.py @@ -40,6 +40,7 @@ ids_tensor, random_attention_mask, ) +from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): @@ -153,8 +154,9 @@ def create_and_check_model_forward(self, config, inputs_dict): @require_torch -class VitsModelTest(ModelTesterMixin, unittest.TestCase): +class VitsModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (VitsModel,) if is_torch_available() else () + pipeline_model_mapping = {"text-to-audio": VitsModel} if is_torch_available() else {} is_encoder_decoder = False test_pruning = False test_headmasking = False