Skip to content

Commit

Permalink
GPTQ evaluation on CPU (#1132)
Browse files Browse the repository at this point in the history
Signed-off-by: YIYANGCAI <[email protected]>
  • Loading branch information
YIYANGCAI authored Aug 15, 2023
1 parent 18706ce commit 36d0bcb
Showing 1 changed file with 2 additions and 12 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ def skip(*args, **kwargs):
# )
# q_model = quantization.fit(model, conf, calib_dataloader=dataloader,)

# method 2: directly use build-in function, for some models like falcon, please use this function
# method 2: directly use INC build-in function, for some models like falcon, please use this function
conf = {
".*":{
'wbits': args.wbits, # 1-8 bits
Expand All @@ -174,18 +174,8 @@ def skip(*args, **kwargs):

results = lm_evaluate(
model="hf-causal",
model_args=f'pretrained="{args.model_name_or_path}",tokenizer="{args.model_name_or_path}",dtype=float32',
model_args='pretrained='+args.model_name_or_path+',tokenizer='+args.model_name_or_path+',dtype=float32',
user_model=q_model.to(DEV), tasks=["lambada_openai"],
device=DEV.type,
batch_size=4
)

# datasets = ['wikitext2']

# for dataset in datasets:
# dataloader, testloader = get_loaders(
# dataset, seed=0, model=args.model_name_or_path, seqlen=model.seqlen
# )
# print(dataset, flush=True)
# ppl = eval_ppl_with_gptq(model, testloader, device)
# results.update({dataset: ppl})

0 comments on commit 36d0bcb

Please sign in to comment.