Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

autotuning-uperf: new workload visualization #25

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
1 change: 1 addition & 0 deletions autotuning-uperf/.env
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
MATBENCH_RESULTS_DIRNAME=results
2 changes: 2 additions & 0 deletions autotuning-uperf/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# matrix-benchmarking-plugins
Workload plugins for the MatrixBenchmarking project
55 changes: 55 additions & 0 deletions autotuning-uperf/exec/run_benchmark.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
#! /bin/bash

set -e
set -o pipefail
set -o nounset

SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
BENCHMARK_NAME=sample
if tty -s; then
ARTIFACT_BASE="/tmp/matrix-benchmarking_$(date +%Y%m%d)"
mkdir -p "$ARTIFACT_BASE"

ARTIFACT_DIR="$ARTIFACT_BASE/$(printf '%03d' $(ls "${ARTIFACT_BASE}/" | grep __ | wc -l))__benchmark__$BENCHMARK_NAME"

mkdir -p "$ARTIFACT_DIR"

echo "Running interactively."
echo "Using '$ARTIFACT_DIR' to store the test artifacts."
else
echo "Running non-interactively."
ARTIFACT_DIR="$(pwd)"
echo "Using the current directory to store the test artifacts ($ARTIFACT_DIR)."
fi

for i in "$@"; do
key=$(echo $i | cut -d= -f1)
val=$(echo $i | cut -d= -f2)
declare $key=$val # defines a variable 'key'
echo "$key ==> $val"
done

echo
echo "Running in mode '$mode/operation': $*"
echo

sleep 1

# Generate random metrics
if [[ "$mode" == "date" ]]; then
echo "Saving the date ..."
date +%s > "$ARTIFACT_DIR"/date
elif [[ "$mode" == "procs" ]]; then
echo "Saving the number of processes ..."
ps aux | wc -l > "$ARTIFACT_DIR"/procs
elif [[ "$mode" == "memfree" ]]; then
echo "Saving the free memory ..."
cat /proc/meminfo | grep MemFree | awk '{ print $2}' > "$ARTIFACT_DIR"/memfree
else
echo "Invalid mode: $mode"
exit 1
fi

echo "Done"

exit 0
57 changes: 57 additions & 0 deletions autotuning-uperf/parse_draft.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
import types, datetime
import yaml
import os, pathlib

def _parse_trial(dir_name, trial_name):
study_name = dir_name.split("/")[-1]
trial_num = trial_name.split("-")[1]
print("Parsing trial: {} in study: {}".format(trial_num, study_name))

#TODO fil tuning_dict with all tuning params
result_file=pathlib.Path(dir_name) / trial_name / "result.csv"

# In each trial, we repeat the run n times, and put the results of all runs in a result.csv. Each run will be registered to matrix benchmarking separately:
results_list=[]
# Some results may be pruned or incomplete. For now call result 0
if not result_file.exists():
results_list=[0]
else:
with result_file.open() as f:
results_list = [int(x.strip()) for x in f.readline().split(",")]

for i, val in enumerate(results_list):
results = types.SimpleNamespace()
results.nopm = val
results.trial_num = trial_num
entry_import_settings = {
"system": study_name,
"trial": trial_num,
"benchmark": "hammerdb",
#"argument": tuning_dict,
#"id": results.Identifier,
"@repeat": i,
}
print("entry_import_settings: {}".format(entry_import_settings))
print("results: {}".format(str(results)))
store.add_to_matrix(entry_import_settings, elt, results, _duplicated_entry)





def parse_data(results_dir):
#store.register_custom_rewrite_settings(lambda x : x)

for study in os.listdir(results_dir):
# Going through each autotuning "study" which is a set of experiments with different tunables, converging on an optimum
if os.path.isfile(study) or not study.startswith("study-"):
continue

print("Parsing study: {}".format(study))
for trial in os.listdir(pathlib.Path(results_dir) / study):
if os.path.isfile(trial) or not trial.startswith("trial-"):
continue
_parse_trial(str(pathlib.Path(results_dir) / study), trial)


parse_data("./results")
128 changes: 128 additions & 0 deletions autotuning-uperf/plotting/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
from collections import defaultdict
import statistics as stats
import datetime
from collections import OrderedDict

import plotly.graph_objs as go

import matrix_benchmarking.plotting.table_stats as table_stats
from matrix_benchmarking.common import Matrix
from matrix_benchmarking.plotting import COLORS

def register():
Plot("Plot")

table_stats.TableStats.ValueDev(
"latency", "Latency",
lambda entry: entry.results.latency,
".2f", "us (?)",
higher_better=False,
)

class Plot():
def __init__(self, name):
self.name = name
self.id_name = name

table_stats.TableStats._register_stat(self)
Matrix.settings["stats"].add(self.name)

def do_hover(self, meta_value, variables, figure, data, click_info):
return "nothing"

def do_plot(self, ordered_vars, settings, param_lists, variables, cfg):
fig = go.Figure()
cfg__remove_details = cfg.get('perf.rm_details', False)
cfg__legend_pos = cfg.get('perf.legend_pos', False)

XY = defaultdict(dict)
XYerr_pos = defaultdict(dict)
XYerr_neg = defaultdict(dict)

plot_title = None
plot_legend = None

x_key = ordered_vars.pop()

for entry in Matrix.all_records(settings, param_lists):
if plot_title is None:
results = entry.results[0].results if entry.is_gathered else entry.results
plot_title = "uperf 95th percentile latency over 60s with varying kernel tunables."
plot_legend = x_key, "Latency (95th percentile)"

legend_name = " ".join([f"{key}={entry.settings.__dict__[key]}" for key in reversed(ordered_vars)])

if entry.is_gathered:
gather_xy = defaultdict(list)
for _entry in entry.results:
x = _entry.settings.__dict__[x_key]
gather_xy[x].append(_entry.results.latency)

legend_name = entry.settings.study
for x, gather_y in gather_xy.items():
if gather_y[0] is None: continue

XY[legend_name][x] = y = stats.mean(gather_y)
err = stats.stdev(gather_y) if len(gather_y) > 2 else 0
XYerr_pos[legend_name][x] = y + err
XYerr_neg[legend_name][x] = y - err
else:
gather_key_name = [k for k in entry.settings.__dict__.keys() if k.startswith("@")][0]
if entry.results.latency is None: continue
x = entry.settings.__dict__[x_key]
XY[legend_name][x] = entry.results.latency

if not XY:
print("Nothing to plot ...", settings)
return None, "Nothing to plot ..."

data = []
y_max = 0
for legend_name in sorted(XY):
x = list(sorted(XY[legend_name].keys()))
y = list([XY[legend_name][_x] for _x in x])
y_max = max(y + [y_max])

color = COLORS(list(XY.keys()).index(legend_name))

data.append(go.Scatter(name=legend_name,
x=x, y=y,
mode="markers+lines",
line=dict(color=color, width=2),
hoverlabel= {'namelength' :-1},
legendgroup=legend_name,
))

if not XYerr_pos: continue

y_err_pos = list([XYerr_pos[legend_name][_x] for _x in x])
y_err_neg = list([XYerr_neg[legend_name][_x] for _x in x])

y_max = max(y_err_pos + [y_max])

data.append(go.Scatter(name=legend_name,
x=x, y=y_err_pos,
line=dict(color=color, width=0),
mode="lines",
showlegend=False,
legendgroup=legend_name,
))
data.append(go.Scatter(name=legend_name,
x=x, y=y_err_neg,
showlegend=False,
mode="lines",
fill='tonexty',
line=dict(color=color, width=0),
legendgroup=legend_name,
))

fig = go.Figure(data=data)

# Edit the layout
x_title, y_title = plot_legend
fig.update_layout(title=plot_title, title_x=0.5,
xaxis_title=x_title,
yaxis_range=[0, y_max],
yaxis_title=y_title)

return fig, ""
Binary file not shown.
Loading