-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathgenerate.py
executable file
·287 lines (244 loc) · 9.7 KB
/
generate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
#!/usr/bin/env python3
import os
import sys
import shutil
import itertools
import string
import argparse
import json
import textwrap
import re
import unicodedata
# https://stackoverflow.com/a/295466/2570605
def slugify(value, allow_unicode=False):
"""
Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.
Remove characters that aren't alphanumerics, underscores, or hyphens.
Convert to lowercase. Also strip leading and trailing whitespace.
"""
value = str(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
else:
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value).strip().lower()
return re.sub(r'[-\s]+', '-', value)
class Args(dict):
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self[key] = value
def __str__(self):
return ".".join(slugify(v) for v in list(self.values()))
def as_list(x):
return x if type(x) is list else [x]
def iterSuite(suite):
"""
Generates all the permutations of configuration parameters for a suite of experiments.
'suite' should be a dict mapping strings to a list of values.
On each iteration, returns an Args object with an attributed for each key set to one of the selected values
"""
# Single suite
if isinstance(suite, dict):
keys = list(suite.keys())
values = [as_list(v) for v in suite.values()]
for args in itertools.product(*values):
yield Args({k:v for k, v in zip(keys, args)})
# List of suites
elif isinstance(suite, list):
for s in suite:
for a in iterSuite(s):
yield a
def check_local_config(local_config):
"""Make sure paths to input sets and executables are valid"""
if local_config["platform"] in ["emu", "emuchick"]:
return
try:
for benchmark, path in local_config["binaries"].items():
if not os.path.isfile(path):
raise Exception("Invalid path for {} executable: {}".format(benchmark, path))
if local_config["platform"] not in ["native", "emusim", "emusim_profile", "emu", "emusim-chick-box", "emusim-validation"]:
raise Exception("Platform {} not supported".format(local_config["platform"]))
except KeyError as e:
raise Exception("Missing {} field in local_config.json".format(e))
def check_args(args, local_config):
pass
def pass_args(arg_names):
"""Generate a template for a command line"""
return "\n".join("--{0} {{{0}}} \\".format(a) for a in arg_names)
def generate_script(args, script_dir, out_dir, local_config, no_redirect, no_algs):
"""Generate a script to run the experiment specified by the independent variables in args"""
# Add platform name to args
args.platform = local_config["platform"]
script_name = os.path.join(script_dir, "{}.sh".format(args))
# Derived params are calculated from 'args' when the script is generated
args.update({
"name" : str(args),
# 'args' encoded as json
"json" : json.dumps(args),
# Path to the executable
"exe" : local_config["binaries"][args.benchmark],
# Results from performance hooks go here
"outfile" : "{0}/{1}.txt".format(out_dir, str(args)),
# Additional result files can be created here
"outdir" : out_dir,
# All other program output goes here
"logfile" : "/dev/stderr" if no_redirect else "{0}/{1}.log".format(out_dir, str(args))
})
# Add params from local_config
args.update(local_config)
# Set slurm queue requirements
template = """\
#!/bin/bash -e
#SBATCH --job-name={name}
#SBATCH --output=/dev/null
"""
# Set up output files
template += """
export LOGFILE="{logfile}"
export OUTFILE="{outfile}"
export HOOKS_FILENAME=$OUTFILE
echo '{json}' | tee $LOGFILE $OUTFILE >/dev/null
echo `hostname` | tee -a $LOGFILE $OUTFILE >/dev/null
"""
# Run multiple trials
# template += """for trial in $(seq {num_trials}); do """
# Emu hardware (single node) command line
if local_config["platform"] == "native":
template += """
{exe} \\"""
elif local_config["platform"] == "emu":
template += """
emu_handler_and_loader 0 0 {exe} -- \\"""
# Emu hardware (multi node) command line
elif local_config["platform"] == "emuchick":
template += """
emu_multinode_exec 0 --thread_quit_off -- {exe} \\"""
# Emu profiler command line
elif "emusim_profile" in local_config["platform"]:
template += """
{emusim_profile_exe} \\
{outdir}/profile.{name} \\
{emusim_flags} \\
-- \\
{exe} \\"""
# Emu simulator command line
elif "emusim" in local_config["platform"]:
template += """
{emusim_exe} \\
{emusim_flags} \\
-o {outdir}/{name} \\
-- {exe} \\"""
if args.benchmark in ["local_stream", "global_stream", "global_stream_1d", "local_stream_cxx"]:
# Generate the benchmark command line
template += """
{spawn_mode} {log2_num_elements} {num_threads} 1 \\
&>> $LOGFILE
"""
elif args.benchmark in ["global_stream_cxx"]:
# Generate the benchmark command line
template += """
{spawn_mode} {layout} {log2_num_elements} {num_threads} 1 \\
&>> $LOGFILE
"""
elif args.benchmark == "pointer_chase":
# Generate the benchmark command line
template += """
--log2_num_elements {log2_num_elements} \\
--num_threads {num_threads} \\
--block_size {block_size} \\
--spawn_mode {spawn_mode} \\
--sort_mode {sort_mode} \\
--num_trials {num_trials} \\
&>> $LOGFILE
"""
elif args.benchmark == "ping_pong":
# Generate the benchmark command line
template += """
{mode} {log2_num_migrations} {num_threads} {num_trials} \\
&>> $LOGFILE
"""
else:
raise Exception("Unsupported benchmark {}".format(args.benchmark))
# template += "done\n"
# Fill in the blanks
command = textwrap.dedent(template).format(**args)
# Write the script to file
with open(script_name, "w") as f:
f.write(command)
os.system("chmod +x {}".format(script_name))
# Return the path to the generated script
return script_name
def generate_suite(suite, script_dir, out_dir, local_config, no_redirect, no_algs):
"""Generate a script for each permutation of parameters in the test suite"""
script_names = []
check_local_config(local_config)
for args in iterSuite(suite):
check_args(args, local_config)
script_name = generate_script(args, script_dir, out_dir, local_config, no_redirect, no_algs)
script_names.append(script_name)
return script_names
def main():
parser = argparse.ArgumentParser()
parser.add_argument("platform", help="Hardware platform to generate scripts for")
parser.add_argument("suite", help="Path to json file containing test suite definition")
parser.add_argument("dir", help="Output directory for generated scripts and results")
parser.add_argument("-f", "--force", default=False, action="store_true", help="Continue even if the results directory is not empty")
parser.add_argument("--clean", default=False, action="store_true", help="Delete generated results before regenerating scripts")
parser.add_argument("--no-redirect", default=False, action="store_true", help="Don't redirect output to file")
parser.add_argument("--no-algs", default=False, action="store_true", help="Don't run any algorithms, just do incremental graph construction")
args = parser.parse_args()
# Prepare directories
if not os.path.exists(args.dir):
os.makedirs(args.dir)
out_dir = os.path.join(args.dir, "results")
script_dir = os.path.join(args.dir, "scripts")
# Create output dir if it doesn't exist
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# Output dir not empty?
elif os.listdir(out_dir) != []:
# Clean out results
if args.clean:
# os.remove(os.path.join(args.dir, "launched"))
shutil.rmtree(out_dir)
os.makedirs(out_dir)
# Just generate new ones over the top
elif args.force:
pass
# Quit and complain
else:
sys.stderr.write("Found existing results in {}, pass --clean to remove them\n".format(out_dir))
sys.exit(-1)
# Create script dir if it doesn't exist
if not os.path.exists(script_dir):
os.makedirs(script_dir)
# Script dir not empty?
elif os.listdir(script_dir) != []:
# Clean out generated scripts
shutil.rmtree(script_dir)
os.makedirs(script_dir)
# Load the suite definition
with open(args.suite) as f:
suite = json.load(f)
# Load paths to benchmarks/inputs for this machine
with open("local_config.json") as f:
local_config = json.load(f)[args.platform]
local_config["platform"] = args.platform
check_local_config(local_config)
# Generate the scripts
script_names = generate_suite(
suite=suite,
script_dir=script_dir,
out_dir=out_dir,
local_config=local_config,
no_redirect=args.no_redirect,
no_algs=args.no_algs
)
# Write paths to all generated scripts to a file
joblist_file = os.path.join(args.dir, "joblist")
with open(joblist_file, "w") as f:
for script_name in script_names:
f.write(script_name + "\n")
if __name__ == "__main__":
main()