10637.3.1
by Guilherme Salgado
Use the default python version instead of a hard-coded version |
1 |
#!/usr/bin/python
|
9265.1.1
by Jonathan Lange
Add lp-dev-tools that lack license confusion, changing the license to match |
2 |
#
|
3 |
# Copyright 2009 Canonical Ltd. This software is licensed under the
|
|
4 |
# GNU Affero General Public License version 3 (see the file LICENSE).
|
|
5 |
||
6 |
"""top-tests.py - Report about slowest tests in the test suite.
|
|
7 |
||
8 |
It parses the output of the testrunner run with -vvv and collects
|
|
9 |
statistics about the test run.
|
|
10 |
"""
|
|
11 |
||
12 |
__metaclass__ = type |
|
13 |
||
14 |
import re |
|
15 |
import operator |
|
16 |
import os |
|
17 |
import sys |
|
18 |
||
19 |
LEN = 20 |
|
20 |
||
21 |
class ParseException(Exception): |
|
22 |
"""Exception raised when there is an error while parsing a log file."""
|
|
23 |
||
24 |
||
25 |
class TestRunnerStats: |
|
26 |
"""Encapsulates information about the time it took to run a testsuite."""
|
|
27 |
||
28 |
LAYER_STARTS_RE = re.compile(r'Running (.+) tests:') |
|
29 |
||
30 |
LAYER_ENDS_RE = re.compile( |
|
31 |
r' Ran (\d+) tests with (\d+) failures and (\d+) errors in ([\d.]+) ' |
|
32 |
'seconds.') |
|
33 |
||
34 |
SETUP_RE = re.compile(r' Set up ([\w.]+) in ([\d.]+) seconds.') |
|
35 |
||
36 |
TEARDOWN_RE = re.compile(r' Tear down ([\w.]+) in ([\d.]+) seconds.') |
|
37 |
||
38 |
UNSUPPORTED_TEARDOWN_RE = re.compile( |
|
39 |
r' Tear down ([\w.]+) ... not supported') |
|
40 |
||
41 |
# We are not restricting this to the standard python identifiers because
|
|
42 |
# some doctest unittest or generated tests could contain funky names.
|
|
43 |
PYTHON_TEST_RE = re.compile(r'([^\( ]+) ?\(([^\)]+)\)') |
|
44 |
||
45 |
MS_RE = re.compile(r'\s*\(([\d.]+) ms\)$') |
|
46 |
||
47 |
TOTAL_RE = re.compile(r'Total: (\d+) tests, (\d+) failures, (\d+) errors') |
|
48 |
||
49 |
# List of strings/patterns to attempt at matching.
|
|
50 |
# The second element in the tuple is the method to call when the start of
|
|
51 |
# the current line matches the string or the pattern.
|
|
52 |
MATCH_LIST = [ |
|
53 |
('Running tests at level', 'handleStartTestRunner'), |
|
54 |
(LAYER_STARTS_RE, 'handleLayerStart'), |
|
55 |
(LAYER_ENDS_RE, 'handleLayerEnd'), |
|
56 |
(SETUP_RE, 'handleLayerSetUp'), |
|
57 |
(TEARDOWN_RE, 'handleLayerTearDown'), |
|
58 |
(UNSUPPORTED_TEARDOWN_RE, 'handleUnsupportedTearDown'), |
|
59 |
(' Running:', None), |
|
60 |
('Tearing down left over layers:', 'handleFinalTearDown'), |
|
61 |
(MS_RE, 'handleTestRuntime'), |
|
62 |
(LAYER_ENDS_RE, 'handleLayerEnd'), |
|
63 |
(TEARDOWN_RE, 'handleLayerTearDown'), |
|
64 |
(TOTAL_RE, 'handleTotal'), |
|
65 |
(' ', 'handleTestRun'), |
|
66 |
(None, 'handleGarbage'), |
|
67 |
]
|
|
68 |
||
69 |
def __init__(self, logfile): |
|
70 |
"""Create a new TestRunnerStats from a log file.
|
|
71 |
||
72 |
:param logfile: Open file-like object containing the log of the test
|
|
73 |
suite. That should have been generated at -vvv for maximum
|
|
74 |
information.
|
|
75 |
:raise ParseException: when the log file doesn't contain a testrunner
|
|
76 |
log, or couldn't be parsed for some other reasons.
|
|
77 |
"""
|
|
78 |
self.logfile = logfile |
|
79 |
self._parse() |
|
80 |
||
81 |
def _parse(self): |
|
82 |
"""Extract timing information from the log file."""
|
|
83 |
self.layers = {} |
|
84 |
self.ignored_lines = [] |
|
85 |
self.current_layer = None |
|
86 |
self.last_test = None |
|
87 |
||
88 |
end_of_tests = False |
|
89 |
while not end_of_tests: |
|
90 |
line = self.logfile.readline() |
|
91 |
if not line: |
|
92 |
break
|
|
93 |
for match, action in self.MATCH_LIST: |
|
94 |
found = False |
|
95 |
if isinstance(match, basestring): |
|
96 |
if line.startswith(match): |
|
97 |
found = True |
|
98 |
elif match is None: |
|
99 |
# None indicates the default action.
|
|
100 |
found = True |
|
101 |
elif getattr(match, 'match', None): |
|
102 |
found = match.match(line) |
|
103 |
if found: |
|
104 |
# Action is the name of the method to call.
|
|
105 |
# If it returns False, stop parsing.
|
|
106 |
if action is not None: |
|
107 |
end_of_tests = getattr(self, action)(line, found) |
|
108 |
break
|
|
109 |
||
110 |
if not end_of_tests: |
|
111 |
raise ParseException('End of file before end of test run.') |
|
112 |
||
113 |
def handleStartTestRunner(self, line, ignored): |
|
114 |
"""Switch the the layer state."""
|
|
115 |
||
116 |
def handleLayerStart(self, line, match): |
|
117 |
"""Create a new stats container for the layer."""
|
|
118 |
layer_name = match.group(1) |
|
119 |
self.current_layer = self.getLayer(layer_name) |
|
120 |
||
121 |
def handleLayerEnd(self, line, match): |
|
122 |
"""Collect the total runtime for the layer tests."""
|
|
123 |
tests_run = match.group(1) |
|
124 |
runtime = match.group(4) |
|
125 |
self.current_layer.collectEndResults(tests_run, runtime) |
|
126 |
||
127 |
def handleLayerSetUp(self, line, match): |
|
128 |
"""Collect the runtime for the layer set up."""
|
|
129 |
layer_name = match.group(1) |
|
130 |
runtime = float(match.group(2)) |
|
131 |
self.getLayer(layer_name).collectSetUp(runtime) |
|
132 |
||
133 |
def handleLayerTearDown(self, line, match): |
|
134 |
"""Collect the runtime for the layer tear down."""
|
|
135 |
layer_name = match.group(1) |
|
136 |
runtime = float(match.group(2)) |
|
137 |
self.getLayer(layer_name).collectTearDown(runtime) |
|
138 |
||
139 |
def handleUnsupportedTearDown(self, line, match): |
|
140 |
"""Flag that tear down was unsupported."""
|
|
141 |
layer_name = match.group(1) |
|
142 |
self.getLayer(layer_name).collectUnsupportedTearDown() |
|
143 |
||
144 |
def handleFinalTearDown(self, line, match): |
|
145 |
"""Switch to teardown state."""
|
|
146 |
||
147 |
def handleTestRun(self, line, ignored): |
|
148 |
"""Collect that a test was run."""
|
|
149 |
# If we didn't saw the last test runtime, we are probably
|
|
150 |
# in a stack trace or something like that. So treat it as garbage.
|
|
151 |
if self.last_test is not None and not self.last_test_complete: |
|
152 |
if self.MS_RE.search(line) is None: |
|
153 |
self.handleGarbage(line, ignored) |
|
154 |
return
|
|
155 |
else: |
|
156 |
# It happens that a test doesn't output timing information.
|
|
157 |
# But other tests after that will.
|
|
158 |
# We are probably encountering such a case.
|
|
159 |
pass
|
|
160 |
line = line[4:] |
|
161 |
if '/' in line: |
|
162 |
if ' ' in line: |
|
163 |
doctest, line = line.split(' ', 1) |
|
164 |
else: |
|
165 |
doctest = line |
|
166 |
line = '\n' |
|
167 |
self.last_test = DocTestStats(doctest) |
|
168 |
else: |
|
169 |
match = self.PYTHON_TEST_RE.match(line) |
|
170 |
if match: |
|
171 |
self.last_test = PythonTestStats( |
|
172 |
match.group(1), match.group(2)) |
|
173 |
else: |
|
174 |
raise ParseException("can't parse test name: %s" % line) |
|
175 |
line = line[match.end():] |
|
176 |
self.current_layer.collectTest(self.last_test) |
|
177 |
||
178 |
# If the runtime isn't on this line, it means that there was output
|
|
179 |
# by the test, so we'll find the runtime info later on.
|
|
180 |
match = self.MS_RE.search(line) |
|
181 |
if match: |
|
182 |
self.last_test_complete = True |
|
183 |
self.last_test.runtime = float(match.group(1)) |
|
184 |
else: |
|
185 |
self.last_test_complete = False |
|
186 |
self.last_test.collectGarbage(line) |
|
187 |
||
188 |
def handleGarbage(self, line, ignored): |
|
189 |
"""Save the log output by the test."""
|
|
190 |
if self.last_test is not None: |
|
191 |
self.last_test.collectGarbage(line) |
|
192 |
else: |
|
193 |
self.ignored_lines.append(line) |
|
194 |
||
195 |
def handleTestRuntime(self, line, match): |
|
196 |
"""Collect the broken test runtime."""
|
|
197 |
if self.last_test is not None: |
|
198 |
self.last_test.runtime = float(match.group(1)) |
|
199 |
self.last_test_complete = True |
|
200 |
else: |
|
201 |
self.ignored_lines.append(line) |
|
202 |
||
203 |
def handleTotal(self, line, match): |
|
204 |
"""Action invoked when the final line is encountered."""
|
|
205 |
self.current_layer = None |
|
206 |
self.last_test = None |
|
207 |
return True |
|
208 |
||
209 |
def getLayer(self, layer_name): |
|
210 |
"""Return the layer with name.
|
|
211 |
||
212 |
Create and return an empty layer if it doesn't exists.
|
|
213 |
"""
|
|
214 |
if layer_name not in self.layers: |
|
215 |
self.layers[layer_name] = TestLayerStats(layer_name) |
|
216 |
return self.layers[layer_name] |
|
217 |
||
218 |
def getTestsIter(self): |
|
219 |
"""Return an iterator over all tests."""
|
|
220 |
for layer in self.layers.values(): |
|
221 |
for test in layer.tests: |
|
222 |
yield test |
|
223 |
||
224 |
@property
|
|
225 |
def total_runtime(self): |
|
226 |
"""Number of seconds used to run the whole test suite."""
|
|
227 |
return sum([layer.total_runtime for layer in self.layers.values()]) |
|
228 |
||
229 |
@property
|
|
230 |
def tests_count(self): |
|
231 |
"""Number of tests in the test suite."""
|
|
232 |
return sum([len(layer.tests) for layer in self.layers.values()]) |
|
233 |
||
234 |
||
235 |
class TestLayerStats: |
|
236 |
"""Contain all the tests that were run in the layer."""
|
|
237 |
||
238 |
name = None |
|
239 |
unsupported_tear_downs = 0 |
|
240 |
||
241 |
tests_runtime = 0 |
|
242 |
||
243 |
def __init__(self, name): |
|
244 |
"""Create a new stats container."""
|
|
245 |
self.name = name |
|
246 |
self.tests = [] |
|
247 |
self.set_ups = [] |
|
248 |
self.tear_downs = [] |
|
249 |
||
250 |
@property
|
|
251 |
def total_runtime(self): |
|
252 |
"""Return the runtime (including fixture) in this layer."""
|
|
253 |
return self.tests_runtime + sum(self.set_ups) + sum(self.tear_downs) |
|
254 |
||
255 |
def collectTest(self, test): |
|
256 |
"""Call when a test was run in the layer."""
|
|
257 |
self.tests.append(test) |
|
258 |
||
259 |
def collectEndResults(self, tests_run, runtime): |
|
260 |
"""Called when all the tests in the layer were run."""
|
|
261 |
self.tests_runtime = float(runtime) |
|
262 |
self.tests_count = int(tests_run) |
|
263 |
||
264 |
def collectSetUp(self, runtime): |
|
265 |
"""Called when the layer was set up."""
|
|
266 |
self.set_ups.append(runtime) |
|
267 |
||
268 |
def collectTearDown(self, runtime): |
|
269 |
"""Called when the layer was torn down."""
|
|
270 |
self.tear_downs.append(runtime) |
|
271 |
||
272 |
def collectUnsupportedTearDown(self): |
|
273 |
"""Called when the layer couldn't be torn down."""
|
|
274 |
self.unsupported_tear_downs += 1 |
|
275 |
||
276 |
def __iter__(self): |
|
277 |
"""Return an iterator over the tests run in this layer."""
|
|
278 |
return iter(self.tests) |
|
279 |
||
280 |
||
281 |
class TestStats: |
|
282 |
"""Base class for a test stats."""
|
|
283 |
||
284 |
name = None |
|
285 |
runtime = 0 |
|
286 |
||
287 |
def __init__(self): |
|
288 |
self._garbage = [] |
|
289 |
||
290 |
@property
|
|
291 |
def garbage(self): |
|
292 |
"""Return the garbage output by the test."""
|
|
293 |
return "".join(self._garbage) |
|
294 |
||
295 |
def collectGarbage(self, line): |
|
296 |
self._garbage.append(line) |
|
297 |
||
298 |
||
299 |
class PythonTestStats(TestStats): |
|
300 |
"""Stats for a regular python unit test."""
|
|
301 |
||
302 |
def __init__(self, method, module): |
|
303 |
super(PythonTestStats, self).__init__() |
|
304 |
self.method = method |
|
305 |
self.module = module |
|
306 |
||
307 |
@property
|
|
308 |
def name(self): |
|
309 |
"""Return the full name of the test."""
|
|
310 |
return "%s.%s" % (self.module, self.method) |
|
311 |
||
312 |
||
313 |
class DocTestStats(TestStats): |
|
314 |
"""Stats for a doctest."""
|
|
315 |
||
316 |
def __init__(self, filename): |
|
317 |
super(DocTestStats, self).__init__() |
|
318 |
self.filename = filename |
|
319 |
||
320 |
@property
|
|
321 |
def name(self): |
|
322 |
"""Remove the PQM directory from the name."""
|
|
323 |
index = self.filename.find("lib/canonical") |
|
324 |
if index != -1: |
|
325 |
filename = self.filename[index:] |
|
326 |
else: |
|
327 |
filename = self.filename |
|
328 |
return os.path.normpath(filename) |
|
329 |
||
330 |
||
331 |
class PQMLog: |
|
332 |
"""Encapsulates information about a PQM log."""
|
|
333 |
||
334 |
def __init__(self, logfile): |
|
335 |
"""Create a new PQMLog instance.
|
|
336 |
||
337 |
:param logfile: Path to the PQM log.
|
|
338 |
"""
|
|
339 |
self.logfile = logfile |
|
340 |
self.fixtures_profile = [] |
|
341 |
||
342 |
self._parse() |
|
343 |
||
344 |
def _parse(self): |
|
345 |
"""Parse a PQM log file.
|
|
346 |
||
347 |
Extract the branch name, the time each tests took as well as the
|
|
348 |
time spent in the layers.
|
|
349 |
"""
|
|
350 |
self.branch = "Unknown" |
|
351 |
profile = self.fixtures_profile |
|
352 |
||
353 |
logfile = open(self.logfile) |
|
354 |
while True: |
|
355 |
line = logfile.readline() |
|
356 |
if not line: |
|
357 |
break
|
|
358 |
line = line.strip() |
|
359 |
if not line: |
|
360 |
continue
|
|
361 |
if line.startswith("Executing star-merge"): |
|
362 |
self.branch = line.split(" ")[2] |
|
363 |
elif " calls taking " in line: |
|
364 |
if "s." not in line: |
|
365 |
continue
|
|
366 |
values = line.split(" ") |
|
367 |
runtime = float(values[-1][:-2]) |
|
368 |
profile.append((runtime, values[0])) |
|
369 |
elif line.startswith("Executing pre-commit hook"): |
|
370 |
self.testrunner_stats = TestRunnerStats(logfile) |
|
371 |
||
372 |
||
373 |
def main(argv): |
|
374 |
"""Parse a PQM log file."""
|
|
375 |
if len(sys.argv) > 1: |
|
376 |
logfile = sys.argv[1] |
|
377 |
else: |
|
378 |
logfile = find_latest_successful_merge() |
|
379 |
print_report(PQMLog(logfile)) |
|
380 |
||
381 |
||
382 |
def find_latest_successful_merge(): |
|
383 |
"""Return the latest PQM log that contain a successful merge.
|
|
384 |
||
385 |
Look into the current directory for the log files.
|
|
386 |
"""
|
|
387 |
cmd = "ls -at | head -10 | xargs grep -l 'star-merge succeeded'" |
|
388 |
p = os.popen(cmd) |
|
389 |
logfile_name = p.readlines()[0].strip() |
|
390 |
p.close() |
|
391 |
return logfile_name |
|
392 |
||
393 |
||
394 |
def print_report(pqm_log, out=sys.stdout): |
|
395 |
"""Print the report on STDOUT."""
|
|
396 |
||
397 |
print >>out, "Log: %s" % pqm_log.logfile |
|
398 |
print >>out, "Branch: %s" % pqm_log.branch |
|
399 |
||
400 |
stats = pqm_log.testrunner_stats |
|
401 |
||
402 |
top_tests = list(stats.getTestsIter()) |
|
403 |
top_tests.sort(key=operator.attrgetter('runtime'), reverse=True) |
|
404 |
||
405 |
total_runtime = stats.total_runtime |
|
406 |
tests_count = stats.tests_count |
|
407 |
||
408 |
print >>out |
|
409 |
print >>out, "Top %d tests taking the longest time" % LEN |
|
410 |
print >>out, "====================================" |
|
411 |
print
|
|
412 |
top_runtime = 0.0 |
|
413 |
for test in top_tests[:LEN]: |
|
414 |
percent = test.runtime / total_runtime * 100 |
|
415 |
top_runtime += test.runtime |
|
416 |
print >>out, "%6.2f (%.1f%%) %s" % (test.runtime, percent, test.name) |
|
417 |
print >>out |
|
418 |
test_percent = LEN / float(tests_count) * 100 |
|
419 |
time_percent = top_runtime / total_runtime * 100 |
|
420 |
print >>out, ( |
|
421 |
"Top %s of %s (%.1f%%) tests taking %ss of %ss (%.1f%%)" |
|
422 |
% (LEN, tests_count, test_percent, top_runtime, total_runtime, |
|
423 |
time_percent)) |
|
424 |
print >>out |
|
425 |
||
426 |
print >>out, "Tests and runtime by layer" |
|
427 |
print >>out, "==========================" |
|
428 |
print >>out |
|
429 |
||
430 |
layers = stats.layers.values() |
|
431 |
layers.sort(key=operator.attrgetter('total_runtime'), reverse=True) |
|
432 |
for layer in layers: |
|
433 |
if len(layer.tests) == 0: |
|
434 |
continue
|
|
435 |
runtime_percent = layer.tests_runtime / total_runtime * 100 |
|
436 |
layer_name = layer.name.split('.')[-1] |
|
437 |
print "%7.2f (%4.1f%%) %4d tests (%5.2fs/t) %s" % ( |
|
438 |
layer.tests_runtime, runtime_percent, len(layer.tests), |
|
439 |
layer.tests_runtime / len(layer.tests), layer_name) |
|
440 |
||
441 |
||
442 |
print >>out |
|
443 |
print >>out, "Slowest fixture methods" |
|
444 |
print >>out, "=======================" |
|
445 |
print >>out |
|
446 |
||
447 |
profile = list(pqm_log.fixtures_profile) |
|
448 |
profile.sort(reverse=True) |
|
449 |
print >>out |
|
450 |
fixture_runtime = 0 |
|
451 |
for runtime, method in profile: |
|
452 |
runtime_percent = runtime / total_runtime * 100 |
|
453 |
print >>out, "%7.2f (%4.1f%%) %s" % (runtime, runtime_percent, method) |
|
454 |
fixture_runtime += runtime |
|
455 |
||
456 |
print >>out |
|
457 |
print >>out, "Fixture overhead %ss (%.1f%%)" % ( |
|
458 |
fixture_runtime, fixture_runtime / total_runtime * 100) |
|
459 |
print >>out |
|
460 |
||
461 |
tests_with_garbage = 0 |
|
462 |
garbage_lines_count = 0 |
|
463 |
for test in stats.getTestsIter(): |
|
464 |
if len(test.garbage): |
|
465 |
tests_with_garbage += 1 |
|
466 |
garbage_lines_count += test.garbage.strip().count('\n')+1 |
|
467 |
||
468 |
print >>out, "%d tests output %d warning lines." % ( |
|
469 |
tests_with_garbage, garbage_lines_count) |
|
470 |
print >>out, "Ignored %d lines in the testrunner output." % len( |
|
471 |
stats.ignored_lines) |
|
472 |
print >>out |
|
473 |
||
474 |
||
475 |
if __name__ == '__main__': |
|
476 |
main(sys.argv) |