From 493b24ba109ed680f40ec81ef73e4cc303e810ee Mon Sep 17 00:00:00 2001 From: swu671 Date: Mon, 18 Sep 2023 14:52:34 -0400 Subject: [PATCH] add IdeficsRMSNorm to ALL_LAYERNORM_LAYERS and fixup --- src/transformers/models/idefics/modeling_idefics.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/transformers/models/idefics/modeling_idefics.py b/src/transformers/models/idefics/modeling_idefics.py index b52b7d5f93ba55..847170143c82af 100644 --- a/src/transformers/models/idefics/modeling_idefics.py +++ b/src/transformers/models/idefics/modeling_idefics.py @@ -31,6 +31,7 @@ from ...activations import ACT2FN from ...modeling_outputs import ModelOutput from ...modeling_utils import PretrainedConfig +from ...pytorch_utils import ALL_LAYERNORM_LAYERS from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, @@ -261,7 +262,7 @@ def freeze_model(model, module_exceptions=[]): } module_exceptions_mapped = [mapping[m] for m in module_exceptions] for module in model.modules(): - if module_exceptions and any([isinstance(module, t) for t in module_exceptions_mapped]): + if module_exceptions and any(isinstance(module, t) for t in module_exceptions_mapped): module.requires_grad_(True) # Explicitely setting it to true to avoid any mistakes else: module.requires_grad_(False) @@ -496,6 +497,9 @@ def forward(self, hidden_states): return self.weight * hidden_states +ALL_LAYERNORM_LAYERS.append(IdeficsRMSNorm) + + # this was adapted from LlamaRotaryEmbedding class IdeficsEmbedding(torch.nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):