-
Notifications
You must be signed in to change notification settings - Fork 0
/
run-tests.py
executable file
·162 lines (147 loc) · 5.15 KB
/
run-tests.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
#! /usr/bin/python2
"""
This is the main test script which identifies all forks of a particular
project (or several projects) and runs the test algorithms on each fork.
"""
import fileinput
import fcntl
import json
import sys
import requests
import ConfigParser
import os.path
import datetime
import shutil
from pygithub3 import Github
from subprocess import call
from string import Template
flock = open('.videorooter.lock', 'w')
try:
fcntl.lockf(flock, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
sys.exit(0)
config = ConfigParser.ConfigParser()
config.read('tests.conf')
gh = Github(login=config.get('tests', 'username'),
password=config.get('tests', 'password'))
base_repo = [ u'videorooter/algo-repository-template',
u'commonsmachinery/blockhash' ]
derived_repos = []
derived_branches = []
#
# Iterate over all forks (and forks of forks) of above repositories
# to find the complete list of forks in any generation. Add each fork
# to derived_repos.
#
while base_repo:
repo = base_repo.pop()
i = repo.split('/')
forks = gh.repos.forks.list(user=i[0], repo=i[1])
repo_meta = gh.repos.get(user=i[0], repo=i[1])
derived_repos.append("%s/%s" % (repo, repo_meta.fork))
for i in forks.iterator():
base_repo.append("%s/%s" % (i.owner.login, i.name))
#
# For each repo identified in derived_repos as not a fork, add
# all of its branches to the list of derived branches.
#
for repo in list(set(derived_repos)):
r = repo.split('/')
if r[2] == "False":
branches = gh.repos.list_branches(user=r[0], repo=r[1])
for i in branches.iterator():
derived_branches.append({'base': "%s/%s" % (r[0], r[1]),
'branch': i.name,
'sha': i.commit.sha})
#
# For each fork, only add its branches to the derived branches if the
# last commit is different from the origin.
#
for repo in list(set(derived_repos)):
r = repo.split('/')
if r[2] == "True":
branches = gh.repos.list_branches(user=r[0], repo=r[1])
for i in branches.iterator():
found = 0
for l in derived_branches:
if l['sha'] == i.commit.sha:
found = 1
if found == 0:
derived_branches.append({'base': "%s/%s" % (r[0], r[1]),
'branch': i.name,
'sha': i.commit.sha})
#
# For each branch, check if we have a cached copy of tests run against
# its' latest commit. If we don't have a cached copy, call
# single-test-image.sh to run the tests. The script returns the
# data in src/output.html, so copy this over to the cache.
#
for i in derived_branches:
if i['base'] == 'videorooter/algo-repository-template':
continue
if not os.path.isfile("%s/%s" % (config.get('tests', 'cachepath'), i['sha'])):
#
# Check if we actually have a videorooter.conf to work from
#
r = i['base'].split('/')
l = gh.repos.commits.list(user=r[0], repo=r[1], sha=i['branch'], path='videorooter.conf')
if len(l.all()) > 0:
res = call(['./single-test-image.sh', "http://github.com/%s" % i['base'], i['branch'], i['sha']])
if not res:
shutil.copyfile('src/output.html', "%s/%s" % (config.get('tests', 'cachepath'), i['sha']))
#
# Go through all branches again, get statistics from the output files.
# The "best" result is the one with highest spread at the same threshold,
# ie. calculating accuracy minus false positives. A higher spread means
# a high accuracy and low false positives.
#
# This is ugly, isn't there a better way to parse formatted strings?
#
content = ""
for i in derived_branches:
if i['base'] == 'videorooter/algo-repository-template':
continue
if os.path.isfile("%s/%s" % (config.get('tests', 'cachepath'), i['sha'])):
f = open("%s/%s" % (config.get('tests', 'cachepath'), i['sha']))
acc = {'IMAGE': {}, 'MOVIE': {}}
cc = {'IMAGE': {}, 'MOVIE': {}}
best_result = {'IMAGE': 0, 'MOVIE': 0}
best_t = {'IMAGE': 0, 'MOVIE': 0}
for line in f:
if 'ACC' in line:
l = line.split()
acc[l[0]][int(l[2].strip('t').strip('='))] = float(l[3].strip('%'))
elif 'CC' in line:
l = line.split()
cc[l[0]][int(l[2].strip('t').strip('='))] = float(l[3].strip('%'))
for foo in acc.keys():
for j in acc[foo]:
if (acc[foo][j]-cc[foo][j]) > best_result[foo]:
best_result[foo] = acc[foo][j]-cc[foo][j]
best_t[foo] = j
f.seek(0)
template = open('tmpl/test.html.tmpl')
src = Template(template.read())
subst = { 'testname': "%s/%s" % (i['base'], i['branch']),
'id': i['sha'],
'result_i': best_result['IMAGE'],
't_i': best_t['IMAGE'],
'result_m': best_result['MOVIE'],
't_m': best_t['MOVIE'],
'output': f.read() }
f.close()
i['output'] = src.substitute(subst)
content += i['output']
template.close()
#
# Now we do the actual output!
#
template = open('tmpl/index.html.tmpl')
src = Template(template.read())
subst = { 'runtime': datetime.datetime.utcnow(),
'content': content }
f = open("%s/index.html" % config.get('tests', 'outpath'), 'w')
f.write(src.substitute(subst))
f.close()
template.close()
flock.close()