forked from Tada-Project/tada
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtada_a_bigoh.py
64 lines (61 loc) · 2.88 KB
/
tada_a_bigoh.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
"""Run doubling experiments and 'Tada!' you get the time complexity"""
import sys
import perf
from tada.util import arguments
from tada.util import configuration
from tada.util import constants
from tada.util import display
from tada.util import package
from tada.util import run
from tada.util import save
if __name__ == "__main__":
current_size = constants.SIZE_START
# display the welcome message
display.display_welcome_message()
# read and verify the command-line arguments
tada_arguments = arguments.parse(sys.argv[1:])
did_verify_arguments = arguments.verify(tada_arguments)
# incorrect arguments, exit program
if did_verify_arguments is False:
print("Incorrect command-line arguments.")
sys.exit(constants.INCORRECT_ARGUMENTS)
# correct arguments, run doubling experiment
else:
# add the directory to the sys.path
package.add_sys_path(tada_arguments.directory)
# create and save a configuration dictionary from the arguments
configuration.save(constants.CONFIGURATION, vars(tada_arguments))
# save the size of the experiment in the constants.file
save.save_experiment_size(constants.SIZE, current_size)
# save the directory containing functions to be analyzed
save.save_directory(constants.DIRECTORY, tada_arguments.directory)
# perform the small doubling experiment
while current_size <= constants.SIZE_STOP:
# run the benchmark by using it through python
display.display_start_message(current_size)
current_output, current_error = run.run_command(
constants.PYTHON_EXEC
+ constants.SPACE
+ constants.PERF_BENCHMARK
+ constants.PYTHON_EXT
)
# display the standard output and error
display.display_output(current_output.decode(constants.UTF8))
display.display_output(current_error.decode(constants.UTF8))
# read the JSON file containing the results
current_benchmark = perf.Benchmark.load(
constants.RESULTS
+ constants.SEPARATOR
+ configuration.get_experiment_name(vars(tada_arguments), current_size)
+ constants.JSON_EXT
)
# perform additional analysis of the results
# reminder: print('Values {0}'.format(current_benchmark.get_values()))
print("Mean {0}".format(current_benchmark.mean()))
print("Median {0}".format(current_benchmark.median()))
# show that we are done running for a size
display.display_end_message(current_size)
# go to the next size for the doubling experiment
current_size = current_size * constants.FACTOR
# write the next doubling experiment size to the file
save.save_experiment_size(constants.SIZE, current_size)