From 80ff72cfedf77f68272f35773579d9d71e5ed10c Mon Sep 17 00:00:00 2001 From: Terry Kong Date: Mon, 11 Nov 2024 19:37:47 -0800 Subject: [PATCH] fix(export): GPT models w/ bias=False convert properly Signed-off-by: Terry Kong --- nemo/export/trt_llm/tensorrt_llm_build.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/nemo/export/trt_llm/tensorrt_llm_build.py b/nemo/export/trt_llm/tensorrt_llm_build.py index cdf8eaac6b1c..4be2d42ebe4d 100755 --- a/nemo/export/trt_llm/tensorrt_llm_build.py +++ b/nemo/export/trt_llm/tensorrt_llm_build.py @@ -118,6 +118,14 @@ def build_and_save_engine( build_config.lora_config = lora_config model = model_cls.from_config(model_config) + if not model_config.bias and model_config.architecture == 'GPTForCausalLM': + # NOTE: GPT models in megatron-core that set bias=False sets the bias false globally + # whereas bias=False in TRTLLM GPT models sets it false everywhere except + # LayerNorm. This change makes TRTLLM's implementation match megatron-core. + for name, module in model.named_modules(): + if isinstance(module, tensorrt_llm.layers.normalization.LayerNorm): + module.bias = None + module.register_parameter('bias', None) model = optimize_model( model, use_parallel_embedding=model_config.use_parallel_embedding,