diff --git a/intel_extension_for_transformers/transformers/modeling/modeling_auto.py b/intel_extension_for_transformers/transformers/modeling/modeling_auto.py index fd4b1a81d5a..a5be8cdc519 100644 --- a/intel_extension_for_transformers/transformers/modeling/modeling_auto.py +++ b/intel_extension_for_transformers/transformers/modeling/modeling_auto.py @@ -845,11 +845,7 @@ def forward(self, input: torch.Tensor) -> tuple[torch.Tensor, None]: or device_map == "cpu" or device_map == torch.device("cpu") ) and model.config.model_type == "mpt": - config = AutoConfig.from_pretrained( - os.path.join(os.path.dirname(__file__), "mosaicml_mpt-7b_config.json"), - torchscript=True - ) - model.config = config + model.config.architectures = ["MptForCausalLM"] model.eval() model_type = model.config.model_type.replace("_", "-") diff --git a/intel_extension_for_transformers/transformers/modeling/mosaicml_mpt-7b_config.json b/intel_extension_for_transformers/transformers/modeling/mosaicml_mpt-7b_config.json deleted file mode 100644 index 9a9cc31be91..00000000000 --- a/intel_extension_for_transformers/transformers/modeling/mosaicml_mpt-7b_config.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "architectures": [ - "MptForCausalLM" - ], - "attn_config": { - "alibi": true, - "alibi_bias_max": 8, - "attn_impl": "torch", - "attn_pdrop": 0, - "attn_type": "multihead_attention", - "attn_uses_sequence_id": false, - "clip_qkv": null, - "prefix_lm": false, - "qk_ln": false, - "softmax_scale": null - }, - "d_model": 4096, - "emb_pdrop": 0, - "embedding_fraction": 1.0, - "expansion_ratio": 4, - "init_config": { - "emb_init_std": null, - "emb_init_uniform_lim": null, - "fan_mode": "fan_in", - "init_div_is_residual": true, - "init_gain": 0, - "init_nonlinearity": "relu", - "init_std": 0.02, - "name": "kaiming_normal_", - "verbose": 0 - }, - "init_device": "cpu", - "learned_pos_emb": true, - "logit_scale": null, - "max_seq_len": 2048, - "model_type": "mpt", - "n_heads": 32, - "n_layers": 32, - "no_bias": true, - "norm_type": "low_precision_layernorm", - "resid_pdrop": 0, - "tokenizer_name": "EleutherAI/gpt-neox-20b", - "torch_dtype": "bfloat16", - "transformers_version": "4.28.1", - "use_cache": false, - "verbose": 0, - "vocab_size": 50432 -}