Skip to content

Commit

Permalink
Clean up src-side runtest.py, patch #2 (remove log scraping)
Browse files Browse the repository at this point in the history
BUG=506498

Review URL: https://codereview.chromium.org/1225563004

Cr-Commit-Position: refs/heads/master@{#337381}
  • Loading branch information
phajdan.jr authored and JulienIsorce committed Jul 17, 2015
1 parent ecf5fe3 commit a8ca453
Show file tree
Hide file tree
Showing 8 changed files with 30 additions and 1,745 deletions.
404 changes: 0 additions & 404 deletions infra/scripts/legacy/scripts/common/gtest_utils.py

Large diffs are not rendered by default.

21 changes: 13 additions & 8 deletions infra/scripts/legacy/scripts/slave/annotation_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,15 @@

import re

from slave import performance_log_processor
from slave import slave_utils


# Status codes that can be returned by the evaluateCommand method.
# From buildbot.status.builder.
# See: http://docs.buildbot.net/current/developer/results.html
SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION, RETRY = range(6)


def getText(result, observer, name):
"""Generate a text summary for the waterfall.
Expand Down Expand Up @@ -46,9 +51,9 @@ def getText(result, observer, name):

failed_test_count = len(observer.FailedTests())
if failed_test_count == 0:
if result == performance_log_processor.SUCCESS:
if result == SUCCESS:
return basic_info
elif result == performance_log_processor.WARNINGS:
elif result == WARNINGS:
return basic_info + ['warnings']

if observer.RunningTests():
Expand Down Expand Up @@ -80,7 +85,7 @@ def annotate(test_name, result, log_processor, perf_dashboard_id=None):
# with no output (exit code can have some clues, especially on Windows).
print 'exit code (as seen by runtest.py): %d' % result

get_text_result = performance_log_processor.SUCCESS
get_text_result = SUCCESS

for failure in sorted(log_processor.FailedTests()):
clean_test_name = re.sub(r'[^\w\.\-]', '_', failure)
Expand All @@ -102,18 +107,18 @@ def annotate(test_name, result, log_processor, perf_dashboard_id=None):
if parser_result > result:
result = parser_result

if result == performance_log_processor.SUCCESS:
if result == SUCCESS:
if (len(log_processor.ParsingErrors()) or
len(log_processor.FailedTests()) or
len(log_processor.MemoryToolReportHashes())):
print '@@@STEP_WARNINGS@@@'
get_text_result = performance_log_processor.WARNINGS
get_text_result = WARNINGS
elif result == slave_utils.WARNING_EXIT_CODE:
print '@@@STEP_WARNINGS@@@'
get_text_result = performance_log_processor.WARNINGS
get_text_result = WARNINGS
else:
print '@@@STEP_FAILURE@@@'
get_text_result = performance_log_processor.FAILURE
get_text_result = FAILURE

for desc in getText(get_text_result, log_processor, test_name):
print '@@@STEP_TEXT@%s@@@' % desc
Expand Down
65 changes: 0 additions & 65 deletions infra/scripts/legacy/scripts/slave/gtest_slave_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,71 +25,6 @@
TIMES_MS_FILENAME = 'times_ms.json'


# Note: GTestUnexpectedDeathTracker is being deprecated in favor of
# common.gtest_utils.GTestLogParser. See scripts/slave/runtest.py for details.
class GTestUnexpectedDeathTracker(object):
"""A lightweight version of log parser that keeps track of running tests
for unexpected timeout or crash."""

def __init__(self):
self._current_test = None
self._completed = False
self._test_start = re.compile(r'\[\s+RUN\s+\] (\w+\.\w+)')
self._test_ok = re.compile(r'\[\s+OK\s+\] (\w+\.\w+)')
self._test_fail = re.compile(r'\[\s+FAILED\s+\] (\w+\.\w+)')
self._test_passed = re.compile(r'\[\s+PASSED\s+\] \d+ tests?.')

self._failed_tests = set()

def OnReceiveLine(self, line):
results = self._test_start.search(line)
if results:
self._current_test = results.group(1)
return

results = self._test_ok.search(line)
if results:
self._current_test = ''
return

results = self._test_fail.search(line)
if results:
self._failed_tests.add(results.group(1))
self._current_test = ''
return

results = self._test_passed.search(line)
if results:
self._completed = True
self._current_test = ''
return

def GetResultsMap(self):
"""Returns a map of TestResults."""

if self._current_test:
self._failed_tests.add(self._current_test)

test_results_map = dict()
for test in self._failed_tests:
test_results_map[canonical_name(test)] = [TestResult(test, failed=True)]

return test_results_map

def CompletedWithoutFailure(self):
"""Returns True if all tests completed and no tests failed unexpectedly."""

if not self._completed:
return False

for test in self._failed_tests:
test_modifier = TestResult(test, failed=True).modifier
if test_modifier not in (TestResult.FAILS, TestResult.FLAKY):
return False

return True


def GetResultsMap(observer):
"""Returns a map of TestResults."""

Expand Down
Loading

0 comments on commit a8ca453

Please sign in to comment.