From 37d01f3338133a71bed1394d93eb0a1f527bf6a8 Mon Sep 17 00:00:00 2001 From: zhentaoyu Date: Fri, 8 Mar 2024 09:45:41 +0800 Subject: [PATCH] fix convert_quantized model bug (#158) Signed-off-by: Yu, Zhentao --- neural_speed/convert/convert_quantized_llama.py | 2 +- neural_speed/convert/convert_quantized_mistral.py | 2 +- neural_speed/convert/convert_quantized_mixtral.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/neural_speed/convert/convert_quantized_llama.py b/neural_speed/convert/convert_quantized_llama.py index b3bbfd9c5..77f145add 100644 --- a/neural_speed/convert/convert_quantized_llama.py +++ b/neural_speed/convert/convert_quantized_llama.py @@ -71,7 +71,7 @@ def convert_to_q4_bestla_tensor(src_name, dst_name, model, fout, q_config, n_hea int_weight = (int_weight - 8) * 16 gptq_scales = gptq_scales / 16 gptq_zeros = (gptq_zeros - 8) * 16 - weight_dtype == "int4" + weight_dtype = "int4" dst = np.zeros((int_weight.shape[0], int_weight.shape[1] * 4), dtype=np.int8) int_weight = np.ascontiguousarray(int_weight.numpy()) diff --git a/neural_speed/convert/convert_quantized_mistral.py b/neural_speed/convert/convert_quantized_mistral.py index a0af0e13a..f036692cb 100644 --- a/neural_speed/convert/convert_quantized_mistral.py +++ b/neural_speed/convert/convert_quantized_mistral.py @@ -74,7 +74,7 @@ def convert_to_q4_bestla_tensor(src_name, dst_name, model, fout, q_config, n_hea int_weight = (int_weight - 8) * 16 gptq_scales = gptq_scales / 16 gptq_zeros = (gptq_zeros - 8) * 16 - weight_dtype == "int4" + weight_dtype = "int4" dst = np.zeros((int_weight.shape[0], int_weight.shape[1] * 4), dtype=np.int8) int_weight = np.ascontiguousarray(int_weight.numpy()) diff --git a/neural_speed/convert/convert_quantized_mixtral.py b/neural_speed/convert/convert_quantized_mixtral.py index abb209f9a..5aea9c0ca 100644 --- a/neural_speed/convert/convert_quantized_mixtral.py +++ b/neural_speed/convert/convert_quantized_mixtral.py @@ -80,7 +80,7 @@ def convert_to_q4_bestla_tensor(src_name, dst_name, model, fout, q_config, n_hea int_weight = (int_weight - 8) * 16 gptq_scales = gptq_scales / 16 gptq_zeros = (gptq_zeros - 8) * 16 - weight_dtype == "int4" + weight_dtype = "int4" dst = np.zeros((int_weight.shape[0], int_weight.shape[1] * 4), dtype=np.int8) int_weight = np.ascontiguousarray(int_weight.numpy())