This repository has been archived by the owner on Apr 25, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 8
/
Copy pathlsif-clang-driver.py
executable file
·138 lines (122 loc) · 5.54 KB
/
lsif-clang-driver.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
#!/usr/bin/env python3
import argparse
import os
import shutil
import json
import multiprocessing as mp
import pathlib
import tempfile
import subprocess
import sys
import time
def run_lsif_clang(q, sema, lsif_clang_abspath, compile_commands_abspath):
err = None
exitcode, output = 0, ''
try:
proc = subprocess.run([lsif_clang_abspath, compile_commands_abspath],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = proc.stdout.decode('utf-8')
# Looks like lsif-clang's exit code is always 0 :(
if proc.returncode != 0 or 'Stack trace (most recent call last) in thread' in output:
exitcode = 1
except Exception as e:
err = e
finally:
q.put((exitcode, output, compile_commands_abspath))
sema.release()
if err:
raise err
def failure_message(output, lsif_clang_abspath, compile_commands_abspath, quiet):
msg = ""
if not quiet:
msg = 'Found lsif-clang failure (stdout+stderr below):'
msg += '\n--------------------------------------------------------------\n'
msg += output
if msg[-1] != '\n':
msg += '\n'
msg += '--------------------------------------------------------------\n'
msg += '\n'
msg += 'Reproduce the failure by running:\n {} {}\n'.format(lsif_clang_abspath, compile_commands_abspath)
return msg
# From https://stackoverflow.com/a/34736291/2682729
class NegateAction(argparse.Action):
def __call__(self, parser, ns, values, option):
setattr(ns, self.dest, option[2:4] != 'no')
def default_main():
parser = argparse.ArgumentParser()
parser.add_argument('lsif_clang_path', help='Path to lsif-clang')
parser.add_argument('compile_commands_path', help='Path to compile_commands.json file, intended to be passed to lsif-clang')
parser.add_argument('--fail-fast', '--no-fail-fast', action=NegateAction, help='Should we exit after finding the first failure? (default: true)', nargs=0)
parser.add_argument('--concurrency', type=int, default=os.cpu_count(), help='Number of lsif-clang processes to spawn at once')
parser.add_argument('--suppress-clang-output', default=False, action="store_true", help="Suppress lsif-clang's output on failure")
parser.set_defaults(fail_fast=True)
args = parser.parse_args()
assert(os.path.exists(args.lsif_clang_path))
lsif_clang_path = pathlib.Path(args.lsif_clang_path)
if not lsif_clang_path.is_absolute():
lsif_clang_path = pathlib.Path.cwd().joinpath(lsif_clang_path)
workdir = pathlib.Path(args.compile_commands_path).parent
if not workdir.is_absolute():
workdir = pathlib.Path.cwd().joinpath(workdir)
jobs = []
with open(args.compile_commands_path) as f:
entries = json.load(f)
for (i, entry) in enumerate(entries):
# Overwrite the working directory so that we can create
# temporary compile_commands.json elsewhere and still have
# everything else work as-is.
entry['command'] += ' -working-directory={}'.format(workdir)
jobs.append(entry)
concurrency = min(args.concurrency, len(jobs))
mp.set_start_method('spawn')
status_queue = mp.Queue()
sema = mp.Semaphore(value=concurrency)
completed = 0
def drain_queue(q):
count = 0
while not q.empty(): # Did any processes complete?
exitcode, output, compile_commands_abspath = q.get()
nonlocal completed
completed += 1
if exitcode != 0:
tmpdir = tempfile.mkdtemp('-repro')
json_copy = '{}/compile_commands.json'.format(tmpdir)
shutil.copyfile(compile_commands_abspath, json_copy)
print(failure_message(output, str(lsif_clang_path), json_copy, args.suppress_clang_output),
file=sys.stderr)
count += 1
if args.fail_fast:
sys.exit(1)
else:
with open(compile_commands_abspath, 'r') as compdb:
entries = json.load(compdb)
entry = entries[0]
print("[{}/{}] Indexed {}".format(completed, len(jobs), entry['file']))
sys.stdout.flush()
return count
num_failures = 0
with tempfile.TemporaryDirectory('-bisect-lsif-clang') as tempdir:
shutil.rmtree(tempdir)
os.mkdir(tempdir)
for (i, job) in enumerate(jobs):
sema.acquire() # TODO: Add timeout here
num_failures += drain_queue(status_queue)
os.mkdir('{}/{}'.format(tempdir, i))
json_file_path = '{}/{}/compile_commands.json'.format(tempdir, i)
with open(json_file_path, 'w') as json_file:
json.dump([job], json_file)
proc = mp.Process(target=run_lsif_clang, args=(status_queue, sema, str(lsif_clang_path), json_file_path))
proc.start()
for _ in range(args.concurrency):
# Make sure to wait for any processes that were spawned at the end
sema.acquire()
num_failures += drain_queue(status_queue)
# There seems to be an off-by-one error sometimes in counting failures,
# not sure why. I would expect that there is no reordering between the
# semaphore and queue operations, but maybe that's allowed?
if num_failures > 0:
print('{}/{} lsif-clang commands failed. 😭'.format(num_failures, len(jobs)))
else:
print('All lsif-clang commands ran successfully! 🎉')
if __name__ == '__main__':
default_main()