Skip to content

Commit

Permalink
fix lm_eval import (#1707)
Browse files Browse the repository at this point in the history
Signed-off-by: Kaihui-intel <[email protected]>
  • Loading branch information
Kaihui-intel authored Apr 1, 2024
1 parent 1342063 commit 9d7a052
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 3 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -362,7 +362,7 @@ def eval_func(model):
args=args,
)
else:
from intel_extension_for_transformers.llm.evaluation.lm_eval import evaluate
from intel_extension_for_transformers.transformers.llm.evaluation.lm_eval import evaluate
results = evaluate(
model="hf-causal",
model_args='pretrained=' + args.model + ',tokenizer=' + args.model + ',dtype=float32',
Expand All @@ -385,7 +385,7 @@ def eval_func(model):

if args.performance:
user_model.eval()
from intel_extension_for_transformers.llm.evaluation.lm_eval import evaluate
from intel_extension_for_transformers.transformers.llm.evaluation.lm_eval import evaluate
import time

samples = args.iters * args.batch_size
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

def eval_model(model, model_name, tokenizer, tasks=["lambada_openai", "hellaswag", "winogrande", "piqa"], eval_bs=32):
try:
from intel_extension_for_transformers.llm.evaluation.lm_eval import evaluate as lm_evaluate
from intel_extension_for_transformers.transformers.llm.evaluation.lm_eval import evaluate as lm_evaluate
print("evaluation with itrex lm-eval", flush=True)

if str(model.device) == "cpu":
Expand Down

0 comments on commit 9d7a052

Please sign in to comment.