Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add csv check for igpu benchmark workflow #11610

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 30 additions & 0 deletions .github/workflows/llm_performance_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -659,6 +659,8 @@ jobs:
set PYTHONIOENCODING=utf-8
python run.py >> %CSV_SAVE_PATH%\32-32_int4_fp16\log\%LOG_FILE% 2>&1
if %ERRORLEVEL% neq 0 (exit /b 1)
python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test1
if %ERRORLEVEL% neq 0 (exit /b 1)

call conda deactivate

Expand All @@ -682,6 +684,8 @@ jobs:
set PYTHONIOENCODING=utf-8
python run.py >> %CSV_SAVE_PATH%\32-32_int4_fp16\log\%LOG_FILE% 2>&1
if %ERRORLEVEL% neq 0 (exit /b 1)
python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test2
if %ERRORLEVEL% neq 0 (exit /b 1)

call conda deactivate

Expand All @@ -705,6 +709,8 @@ jobs:
set PYTHONIOENCODING=utf-8
python run.py >> %CSV_SAVE_PATH%\32-32_int4_fp16\log\%LOG_FILE% 2>&1
if %ERRORLEVEL% neq 0 if %ERRORLEVEL% neq -1073740791 (exit /b 1)
python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3
if %ERRORLEVEL% neq 0 (exit /b 1)

call conda deactivate

Expand Down Expand Up @@ -750,6 +756,8 @@ jobs:
set PYTHONIOENCODING=utf-8
python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16\log\%LOG_FILE% 2>&1
if %ERRORLEVEL% neq 0 (exit /b 1)
python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test1
if %ERRORLEVEL% neq 0 (exit /b 1)

call conda deactivate

Expand All @@ -773,6 +781,8 @@ jobs:
set PYTHONIOENCODING=utf-8
python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16\log\%LOG_FILE% 2>&1
if %ERRORLEVEL% neq 0 (exit /b 1)
python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test2
if %ERRORLEVEL% neq 0 (exit /b 1)

call conda deactivate

Expand All @@ -796,6 +806,8 @@ jobs:
set PYTHONIOENCODING=utf-8
python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16\log\%LOG_FILE% 2>&1
if %ERRORLEVEL% neq 0 (exit /b 1)
python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3
if %ERRORLEVEL% neq 0 (exit /b 1)

call conda deactivate

Expand Down Expand Up @@ -840,6 +852,8 @@ jobs:
set PYTHONIOENCODING=utf-8
python run.py >> %CSV_SAVE_PATH%\2048-256_int4_fp16\log\%LOG_FILE% 2>&1
if %ERRORLEVEL% neq 0 (exit /b 1)
python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test1
if %ERRORLEVEL% neq 0 (exit /b 1)

call conda deactivate

Expand All @@ -863,6 +877,8 @@ jobs:
set PYTHONIOENCODING=utf-8
python run.py >> %CSV_SAVE_PATH%\2048-256_int4_fp16\log\%LOG_FILE% 2>&1
if %ERRORLEVEL% neq 0 (exit /b 1)
python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test2
if %ERRORLEVEL% neq 0 (exit /b 1)

call conda deactivate

Expand All @@ -886,6 +902,8 @@ jobs:
set PYTHONIOENCODING=utf-8
python run.py >> %CSV_SAVE_PATH%\2048-256_int4_fp16\log\%LOG_FILE% 2>&1
if %ERRORLEVEL% neq 0 (exit /b 1)
python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3
if %ERRORLEVEL% neq 0 (exit /b 1)

call conda deactivate

Expand Down Expand Up @@ -930,6 +948,8 @@ jobs:
set PYTHONIOENCODING=utf-8
python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16_loadlowbit\log\%LOG_FILE% 2>&1
if %ERRORLEVEL% neq 0 (exit /b 1)
python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test1
if %ERRORLEVEL% neq 0 (exit /b 1)

call conda deactivate

Expand All @@ -953,6 +973,8 @@ jobs:
set PYTHONIOENCODING=utf-8
python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16_loadlowbit\log\%LOG_FILE% 2>&1
if %ERRORLEVEL% neq 0 (exit /b 1)
python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test2
if %ERRORLEVEL% neq 0 (exit /b 1)

call conda deactivate

Expand All @@ -976,6 +998,8 @@ jobs:
set PYTHONIOENCODING=utf-8
python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16_loadlowbit\log\%LOG_FILE% 2>&1
if %ERRORLEVEL% neq 0 (exit /b 1)
python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3
if %ERRORLEVEL% neq 0 (exit /b 1)

call conda deactivate

Expand Down Expand Up @@ -1019,6 +1043,8 @@ jobs:
set PYTHONIOENCODING=utf-8
python run.py >> %CSV_SAVE_PATH%\1024-128\log\%LOG_FILE% 2>&1
if %ERRORLEVEL% neq 0 (exit /b 1)
python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test1
if %ERRORLEVEL% neq 0 (exit /b 1)

call conda deactivate

Expand All @@ -1042,6 +1068,8 @@ jobs:
set PYTHONIOENCODING=utf-8
python run.py >> %CSV_SAVE_PATH%\1024-128\log\%LOG_FILE% 2>&1
if %ERRORLEVEL% neq 0 (exit /b 1)
python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test2
if %ERRORLEVEL% neq 0 (exit /b 1)

call conda deactivate

Expand All @@ -1065,6 +1093,8 @@ jobs:
set PYTHONIOENCODING=utf-8
python run.py >> %CSV_SAVE_PATH%\1024-128\log\%LOG_FILE% 2>&1
if %ERRORLEVEL% neq 0 (exit /b 1)
python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3
if %ERRORLEVEL% neq 0 (exit /b 1)

call conda deactivate

Expand Down
26 changes: 13 additions & 13 deletions python/llm/test/benchmark/igpu-perf/1024-128.yaml
Original file line number Diff line number Diff line change
@@ -1,18 +1,18 @@
repo_id:
- 'THUDM/chatglm3-6b'
- 'THUDM/glm-4-9b-chat'
- 'baichuan-inc/Baichuan2-7B-Chat'
- 'baichuan-inc/Baichuan2-13B-Chat'
- 'meta-llama/Llama-2-7b-chat-hf'
- 'meta-llama/Llama-2-13b-chat-hf'
- 'meta-llama/Meta-Llama-3-8B-Instruct'
- 'mistralai/Mistral-7B-Instruct-v0.2'
- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
- 'RWKV/v5-Eagle-7B-HF'
- '01-ai/Yi-6B-Chat'
- 'Qwen/Qwen-VL-Chat'
- 'openbmb/MiniCPM-1B-sft-bf16'
- 'openbmb/MiniCPM-2B-sft-bf16'
#!- 'THUDM/glm-4-9b-chat'
#!- 'baichuan-inc/Baichuan2-7B-Chat'
#!- 'baichuan-inc/Baichuan2-13B-Chat'
#!- 'meta-llama/Llama-2-7b-chat-hf'
#!- 'meta-llama/Llama-2-13b-chat-hf'
#!- 'meta-llama/Meta-Llama-3-8B-Instruct'
#!- 'mistralai/Mistral-7B-Instruct-v0.2'
#!- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
#!- 'RWKV/v5-Eagle-7B-HF'
#!- '01-ai/Yi-6B-Chat'
#!- 'Qwen/Qwen-VL-Chat'
#!- 'openbmb/MiniCPM-1B-sft-bf16'
#!- 'openbmb/MiniCPM-2B-sft-bf16'
local_model_hub: 'path to your local model hub'
warm_up: 1
num_trials: 3
Expand Down
8 changes: 4 additions & 4 deletions python/llm/test/benchmark/igpu-perf/1024-128_437.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
repo_id:
- 'Qwen/Qwen1.5-7B-Chat'
- 'Qwen/Qwen2-7B-Instruct'
- 'microsoft/Phi-3-mini-4k-instruct'
- 'microsoft/Phi-3-mini-128k-instruct'
- 'microsoft/phi-3-vision-128k-instruct'
#!- 'Qwen/Qwen2-7B-Instruct'
#!- 'microsoft/Phi-3-mini-4k-instruct'
#!- 'microsoft/Phi-3-mini-128k-instruct'
#!- 'microsoft/phi-3-vision-128k-instruct'
local_model_hub: 'path to your local model hub'
warm_up: 1
num_trials: 3
Expand Down
24 changes: 12 additions & 12 deletions python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
repo_id:
- 'THUDM/chatglm3-6b'
- 'THUDM/glm-4-9b-chat'
- 'baichuan-inc/Baichuan2-7B-Chat'
- 'baichuan-inc/Baichuan2-13B-Chat'
- 'meta-llama/Llama-2-7b-chat-hf'
- 'meta-llama/Llama-2-13b-chat-hf'
- 'meta-llama/Meta-Llama-3-8B-Instruct'
- 'mistralai/Mistral-7B-Instruct-v0.2'
- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
- '01-ai/Yi-6B-Chat'
- 'Qwen/Qwen-VL-Chat'
- 'openbmb/MiniCPM-1B-sft-bf16'
- 'openbmb/MiniCPM-2B-sft-bf16'
#!- 'THUDM/glm-4-9b-chat'
#!- 'baichuan-inc/Baichuan2-7B-Chat'
#!- 'baichuan-inc/Baichuan2-13B-Chat'
#!- 'meta-llama/Llama-2-7b-chat-hf'
#!- 'meta-llama/Llama-2-13b-chat-hf'
#!- 'meta-llama/Meta-Llama-3-8B-Instruct'
#!- 'mistralai/Mistral-7B-Instruct-v0.2'
#!- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
#!- '01-ai/Yi-6B-Chat'
#!- 'Qwen/Qwen-VL-Chat'
#!- 'openbmb/MiniCPM-1B-sft-bf16'
#!- 'openbmb/MiniCPM-2B-sft-bf16'
local_model_hub: 'path to your local model hub'
warm_up: 1
num_trials: 3
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
repo_id:
- 'Qwen/Qwen1.5-7B-Chat'
- 'Qwen/Qwen2-7B-Instruct'
- 'microsoft/Phi-3-mini-4k-instruct'
- 'microsoft/Phi-3-mini-128k-instruct'
- 'microsoft/phi-3-vision-128k-instruct'
#!- 'Qwen/Qwen2-7B-Instruct'
#!- 'microsoft/Phi-3-mini-4k-instruct'
#!- 'microsoft/Phi-3-mini-128k-instruct'
#!- 'microsoft/phi-3-vision-128k-instruct'
local_model_hub: 'path to your local model hub'
warm_up: 1
num_trials: 3
Expand Down
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
repo_id:
- 'THUDM/chatglm3-6b'
- 'THUDM/glm-4-9b-chat'
- 'baichuan-inc/Baichuan2-7B-Chat'
- 'baichuan-inc/Baichuan2-13B-Chat'
- 'meta-llama/Llama-2-7b-chat-hf'
- 'meta-llama/Llama-2-13b-chat-hf'
- 'meta-llama/Meta-Llama-3-8B-Instruct'
- 'mistralai/Mistral-7B-Instruct-v0.2'
- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
- '01-ai/Yi-6B-Chat'
- 'Qwen/Qwen-VL-Chat'
- 'openbmb/MiniCPM-1B-sft-bf16'
- 'openbmb/MiniCPM-2B-sft-bf16'
#!- 'THUDM/glm-4-9b-chat'
#!- 'baichuan-inc/Baichuan2-7B-Chat'
#!- 'baichuan-inc/Baichuan2-13B-Chat'
#!- 'meta-llama/Llama-2-7b-chat-hf'
#!- 'meta-llama/Llama-2-13b-chat-hf'
#!- 'meta-llama/Meta-Llama-3-8B-Instruct'
#!- 'mistralai/Mistral-7B-Instruct-v0.2'
#!- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
#!- '01-ai/Yi-6B-Chat'
#!- 'Qwen/Qwen-VL-Chat'
#!- 'openbmb/MiniCPM-1B-sft-bf16'
#!- 'openbmb/MiniCPM-2B-sft-bf16'
local_model_hub: 'path to your local model hub'
warm_up: 1
num_trials: 3
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
repo_id:
- 'Qwen/Qwen1.5-7B-Chat'
- 'Qwen/Qwen2-7B-Instruct'
- 'microsoft/Phi-3-mini-4k-instruct'
- 'microsoft/Phi-3-mini-128k-instruct'
- 'microsoft/phi-3-vision-128k-instruct'
#!- 'Qwen/Qwen2-7B-Instruct'
#!- 'microsoft/Phi-3-mini-4k-instruct'
#!- 'microsoft/Phi-3-mini-128k-instruct'
#!- 'microsoft/phi-3-vision-128k-instruct'
local_model_hub: 'path to your local model hub'
warm_up: 1
num_trials: 3
Expand Down
24 changes: 12 additions & 12 deletions python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
repo_id:
- 'THUDM/chatglm3-6b'
- 'THUDM/glm-4-9b-chat'
- 'baichuan-inc/Baichuan2-7B-Chat'
- 'baichuan-inc/Baichuan2-13B-Chat'
- 'meta-llama/Llama-2-7b-chat-hf'
- 'meta-llama/Llama-2-13b-chat-hf'
- 'meta-llama/Meta-Llama-3-8B-Instruct'
- 'mistralai/Mistral-7B-Instruct-v0.2'
- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
- '01-ai/Yi-6B-Chat'
- 'Qwen/Qwen-VL-Chat'
- 'openbmb/MiniCPM-1B-sft-bf16'
- 'openbmb/MiniCPM-2B-sft-bf16'
#!- 'THUDM/glm-4-9b-chat'
#!- 'baichuan-inc/Baichuan2-7B-Chat'
#!- 'baichuan-inc/Baichuan2-13B-Chat'
#!- 'meta-llama/Llama-2-7b-chat-hf'
#!- 'meta-llama/Llama-2-13b-chat-hf'
#!- 'meta-llama/Meta-Llama-3-8B-Instruct'
#!- 'mistralai/Mistral-7B-Instruct-v0.2'
#!- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
#!- '01-ai/Yi-6B-Chat'
#!- 'Qwen/Qwen-VL-Chat'
#!- 'openbmb/MiniCPM-1B-sft-bf16'
#!- 'openbmb/MiniCPM-2B-sft-bf16'
local_model_hub: 'path to your local model hub'
warm_up: 1
num_trials: 3
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
repo_id:
- 'Qwen/Qwen1.5-7B-Chat'
- 'Qwen/Qwen2-7B-Instruct'
- 'microsoft/Phi-3-mini-4k-instruct'
- 'microsoft/Phi-3-mini-128k-instruct'
- 'microsoft/phi-3-vision-128k-instruct'
#!- 'Qwen/Qwen2-7B-Instruct'
#!- 'microsoft/Phi-3-mini-4k-instruct'
#!- 'microsoft/Phi-3-mini-128k-instruct'
#!- 'microsoft/phi-3-vision-128k-instruct'
local_model_hub: 'path to your local model hub'
warm_up: 1
num_trials: 3
Expand Down
74 changes: 74 additions & 0 deletions python/llm/test/benchmark/igpu-perf/check_csv_results.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

# Python program to check if the number of lines in html meets expectation

import os
import sys
import argparse
import pandas as pd
from omegaconf import OmegaConf
from datetime import date

def main():
parser = argparse.ArgumentParser(description="check if the number of lines in html meets expectation")
parser.add_argument("-y", "--yaml-file", type=str, dest="yaml_name", help="name of yaml", required=True)
parser.add_argument("--suffix", type=str, dest="file_suffix", help="the suffix of the csv_file")
args = parser.parse_args()

all_csv_files:list[str] = [file for file in os.listdir() if file.endswith('.csv')]

conf = OmegaConf.load(args.yaml_name)
test_apis : list[str] = conf['test_api']
in_out_pairs : list[str] = conf['in_out_pairs']
#print(f"test_apis: {type(test_apis)} \n {test_apis}")
#print(f"test_apis: {type(in_out_pairs)} \n {in_out_pairs}")
for api in test_apis:
for in_out in in_out_pairs:
csv_name_info:str = f"{in_out}-{api}-results-"
#print(csv_name_info)
csv_file = [file for file in all_csv_files if (csv_name_info in file) and file.endswith(f"_{args.file_suffix}.csv")][0]
#print(csv_file)

csv_dataframe = pd.read_csv(csv_file, index_col=0)
actual_test_num = len(csv_dataframe)
actual_test_cases = []
for index, row in csv_dataframe.iterrows():
actual_test_cases.append(row['model'] + ":" + row['input/output tokens'].split('-')[0] + ":" + str(row['batch_size']))


all_test_cases = []
for model in conf.repo_id:
if not OmegaConf.is_list(conf["batch_size"]):
batch_list = [conf["batch_size"]]
else:
batch_list = conf["batch_size"]
for batch_size in batch_list:
model_id_input = model + ':' + in_out.split('-')[0] + ':' + str(batch_size)
all_test_cases.append(model_id_input)
exclude_test_cases = []
if 'exclude' in conf and conf['exclude'] is not None:
exclude_test_cases = conf['exclude']
expected_test_num = len(all_test_cases) - len(exclude_test_cases)
if actual_test_num != expected_test_num:
print("---------------The test cases should be tested!------------")
for test_case in all_test_cases:
if test_case not in actual_test_cases and test_case not in exclude_test_cases:
print(test_case)
raise ValueError("The above tests failed. Please check the errors in the log.")

if __name__ == "__main__":
sys.exit(main())
Loading