Skip to content

Commit

Permalink
fix bias issue in autogptq (#375)
Browse files Browse the repository at this point in the history
  • Loading branch information
wenhuach21 authored Dec 6, 2024
1 parent 7acb784 commit 3ac377b
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 3 deletions.
4 changes: 2 additions & 2 deletions auto_round/export/export_to_autogptq/export.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,8 @@ def pack_layer(name, model, layer_config, backend, pbar):
in_features = layer.weight.shape[0]
out_features = layer.weight.shape[1]

##bias = layer.bias is not None and torch.any(layer.bias)
bias = True ## if using the above, llama3 lambada RTN will be NAN , TODO why?
bias = layer.bias is not None
##bias = True ## if using the above, llama3 lambada RTN will be NAN , TODO why?
new_layer = QuantLinear( ##pylint: disable=E1123
bits, group_size, in_features, out_features, bias, weight_dtype=layer.weight.dtype
)
Expand Down
2 changes: 1 addition & 1 deletion auto_round/export/export_to_autoround/export.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def pack_layer(name, model, layer_config, backend, pbar):
elif isinstance(layer, transformers.pytorch_utils.Conv1D):
in_features = layer.weight.shape[0]
out_features = layer.weight.shape[1]
bias = layer.bias is not None and torch.any(layer.bias)
bias = layer.bias is not None

if "awq" not in backend:
new_layer = QuantLinear( ##pylint: disable=E1123
Expand Down

0 comments on commit 3ac377b

Please sign in to comment.