Skip to content

Commit 42621db

Browse files
authored
Upgrade dependency packages of INC woq (#1192)
## Describe your changes Upgrade dependency packages of INC woq: - lm-eval==0.4.2 - intel-extension-for-transformers (latest version is v1.4.2) ## Checklist before requesting a review - [ ] Add unit tests for this change. - [ ] Make sure all tests can pass. - [ ] Update documents if necessary. - [x] Lint and apply fixes to your code by running `lintrunner -a` - [ ] Is this a user-facing change? If yes, give a description of this change to be included in the release notes. - [ ] Is this PR including examples changes? If yes, please remember to update [example documentation](https://github.com/microsoft/Olive/blob/main/docs/source/examples.md) in a follow-up PR. ## (Optional) Issue link --------- Signed-off-by: yuwenzho <[email protected]>
1 parent 87760d3 commit 42621db

File tree

2 files changed

+16
-13
lines changed

2 files changed

+16
-13
lines changed

examples/open_llama/requirements-woq.txt

+2-3
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
datasets
2-
# TODO: remove commit hash once update
3-
git+https://github.com/EleutherAI/lm-evaluation-harness.git@cc9778fbe4fa1a709be2abed9deb6180fd40e7e2
4-
intel-extension-for-transformers>=1.2.2
2+
intel-extension-for-transformers
3+
lm-eval==0.4.2
54
neural-compressor>=2.3
65
onnxruntime
76
sentencepiece

examples/open_llama/user_script.py

+14-10
Original file line numberDiff line numberDiff line change
@@ -132,26 +132,30 @@ def calib_dataloader(data_dir, batch_size, *args, **kwargs):
132132

133133

134134
def eval_accuracy(model: OliveModelHandler, data_dir, batch_size, device, execution_providers):
135-
from intel_extension_for_transformers.llm.evaluation.lm_eval import evaluate
135+
from intel_extension_for_transformers.transformers.llm.evaluation.lm_eval import LMEvalParser, evaluate
136136

137137
results = {}
138138
if model.framework == Framework.PYTORCH:
139-
results = evaluate(
140-
model="hf-causal",
139+
eval_args = LMEvalParser(
140+
model="hf",
141141
model_args=(
142142
f"pretrained={model.model_path or model.hf_config.model_name},tokenizer={model_id},dtype=float32"
143143
),
144144
batch_size=batch_size,
145-
tasks=["lambada_openai"],
145+
tasks="lambada_openai",
146+
device="cpu",
146147
)
148+
results = evaluate(eval_args)
149+
147150
elif model.framework == Framework.ONNX:
148151
output_config_file = Path(model.model_path).resolve().parent / "config.json"
149152
config.to_json_file(output_config_file, use_diff=False)
150-
results = evaluate(
151-
model="hf-causal",
152-
model_args=f"pretrained={Path(model.model_path).resolve().parent},tokenizer={model_id}",
153+
eval_args = LMEvalParser(
154+
model="hf",
155+
model_args=f"pretrained={Path(model.model_path).resolve().parent},tokenizer={model_id},model_format=onnx",
153156
batch_size=batch_size,
154-
tasks=["lambada_openai"],
155-
model_format="onnx",
157+
tasks="lambada_openai",
158+
device="cpu",
156159
)
157-
return results["results"]["lambada_openai"]["acc"]
160+
results = evaluate(eval_args)
161+
return results["results"]["lambada_openai"]["acc,none"]

0 commit comments

Comments
 (0)