Skip to content

Commit

Permalink
[Benchmark] Get benchmark reports. (#155)
Browse files Browse the repository at this point in the history
* added output_folders

* updated subprocess run for output folders

* get report done

* fixed the output_folder issues

* add the func of run_benchmark

* add return

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

---------

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
Zhenzhong1 and pre-commit-ci[bot] authored Oct 10, 2024
1 parent deb35d9 commit 946c439
Show file tree
Hide file tree
Showing 3 changed files with 55 additions and 4 deletions.
37 changes: 33 additions & 4 deletions evals/benchmark/benchmark.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import argparse
import os
import subprocess
from datetime import datetime

import yaml
Expand Down Expand Up @@ -251,18 +253,21 @@ def run_service_test(example, service_type, service, test_suite_config):
)

# Run the test using locust_runtests function
output_folders = []
for index, run_yaml_path in enumerate(run_yaml_paths, start=1):
print(f"[OPEA BENCHMARK] 🚀 The {index} time test is running, run yaml: {run_yaml_path}...")
locust_runtests(None, run_yaml_path)
output_folders.append(locust_runtests(None, run_yaml_path))

print(f"[OPEA BENCHMARK] 🚀 Test completed for {service_name} at {url}")

return output_folders


def process_service(example, service_type, case_data, test_suite_config):
service = case_data.get(service_type)
if service and service.get("run_test"):
print(f"[OPEA BENCHMARK] 🚀 Example: {example} Service: {service.get('service_name')}, Running test...")
run_service_test(example, service_type, service, test_suite_config)
return run_service_test(example, service_type, service, test_suite_config)


def check_test_suite_config(test_suite_config):
Expand All @@ -284,7 +289,7 @@ def check_test_suite_config(test_suite_config):
raise ValueError("Must specify either run_time or user_queries.")


if __name__ == "__main__":
def run_benchmark(report=False):
# Load test suit configuration
yaml_content = load_yaml("./benchmark.yaml")
# Extract data
Expand Down Expand Up @@ -324,9 +329,33 @@ def check_test_suite_config(test_suite_config):
"visualqna": ["lvm", "lvmserve", "e2e"],
}

all_output_folders = []
# Process each example's services
for example in parsed_data["examples"]:
case_data = parsed_data["all_case_data"].get(example, {})
service_types = example_service_map.get(example, [])
for service_type in service_types:
process_service(example, service_type, case_data, test_suite_config)
output_folder = process_service(example, service_type, case_data, test_suite_config)
if output_folder is not None:
all_output_folders.append(output_folder)

if report:
print(all_output_folders)
all_results = dict()
for each_bench_folders in all_output_folders:
for folder in each_bench_folders:
from stresscli.commands.report import get_report_results

results = get_report_results(folder)
all_results[folder] = results
print(f"results = {results}\n")

return all_results


if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Read and parse JSON/YAML files and output JSON file")
parser.add_argument("--report", help="Return the perf", action="store_true")
args = parser.parse_args()

run_benchmark(report=args.report)
2 changes: 2 additions & 0 deletions evals/benchmark/stresscli/commands/load_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,8 @@ def locust_runtests(kubeconfig, profile):

click.echo(f"Load test results saved to {base_folder}")

return base_folder


def collect_metrics(collector, services, output_dir, namespace=None):
"""Collect metrics from the specified services and output directory.
Expand Down
20 changes: 20 additions & 0 deletions evals/benchmark/stresscli/commands/report.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,26 @@ def report(ctx, folder, format, output):
csvwriter.writerow(row)


def get_report_results(folder):
"""Print the test report."""
print(f"Get report results from: {folder}")
output_data = {}
testcases = get_testcases(folder)
for testcase in testcases:
include = "|".join([TESTSPEC_SECTION_NAME, CSV_SECTION_NAME, METRICS_SECTION_NAME])
extracted_data = export_testdata(testcase, folder, include)
if extracted_data:
output_data[testcase] = extracted_data

result = {}
for testcase, data in output_data.items():
testcase_result = {}
for key, value in data.items():
testcase_result[key] = value
result[testcase] = testcase_result
return result


def export_testspec(testcase, folder):
testspec_path = os.path.join(folder, f"{testcase}_testspec.yaml")
extracted_data = {}
Expand Down

0 comments on commit 946c439

Please sign in to comment.