From e4007707d5f35bf048fafb3f1f25232a53aabd47 Mon Sep 17 00:00:00 2001 From: Franck Chauvel Date: Wed, 10 Apr 2019 19:57:47 +0200 Subject: [PATCH] Generate JUnit/XML test report (#204) fchauvel's excellent JUnit report feature from #204. Resolves #104 --- .gitignore | 5 + green/cmdline.py | 8 + green/config.py | 8 +- green/junit.py | 169 ++++++++++++++++++++++ green/test/test_cmdline.py | 17 +++ green/test/test_junit.py | 289 +++++++++++++++++++++++++++++++++++++ requirements.txt | 1 + setup.py | 1 + 8 files changed, 497 insertions(+), 1 deletion(-) create mode 100644 green/junit.py create mode 100644 green/test/test_junit.py diff --git a/.gitignore b/.gitignore index 4b1d8bd..6e41acb 100644 --- a/.gitignore +++ b/.gitignore @@ -43,3 +43,8 @@ venv* env* *.sublime-workspace + +# Emacs +\#*\# +*~ +.\#* \ No newline at end of file diff --git a/green/cmdline.py b/green/cmdline.py index d8fb404..8a40bd3 100644 --- a/green/cmdline.py +++ b/green/cmdline.py @@ -6,6 +6,7 @@ import green.config as config + def main(argv=None, testing=False): args = config.parseArguments(argv) args = config.mergeConfig(args, testing) @@ -68,6 +69,13 @@ def main(argv=None, testing=False): # Actually run the test_suite result = run(test_suite, stream, args, testing) + # Generate a test report if required + if args.junit_report: + from green.junit import JUnitXML + adapter = JUnitXML() + with open(args.junit_report, "w") as report_file: + adapter.save_as(result, report_file) + return(int(not result.wasSuccessful())) diff --git a/green/config.py b/green/config.py index 3b7fb35..023de6c 100644 --- a/green/config.py +++ b/green/config.py @@ -46,6 +46,7 @@ config = None, # Not in configs file_pattern = 'test*.py', test_pattern = '*', + junit_report = False, run_coverage = False, quiet_coverage = False, clear_omit = False, @@ -251,6 +252,11 @@ def parseArguments(argv=None): # pragma: no cover metavar='PATTERN', help="Pattern to match test method names after " "'test'. Default is '*', meaning match methods named 'test*'.", default=argparse.SUPPRESS)) + store_opt(other_args.add_argument('-j', '--junit-report', + action='store', + metavar="FILENAME", + help=("Generate a JUnit XML report."), + default=argparse.SUPPRESS)) cov_args = parser.add_argument_group( "Coverage Options ({})".format(coverage_version)) @@ -422,7 +428,7 @@ def mergeConfig(args, testing=False): # pragma: no cover 'help', 'logging', 'version', 'disable_unidecode', 'failfast', 'run_coverage', 'options', 'completions', 'completion_file', 'clear_omit', 'no_skip_report', 'no_tracebacks', - 'disable_windows', 'quiet_coverage']: + 'disable_windows', 'quiet_coverage', 'junit_report']: config_getter = config.getboolean elif name in ['processes', 'debug', 'verbose']: config_getter = config.getint diff --git a/green/junit.py b/green/junit.py new file mode 100644 index 0000000..e48a462 --- /dev/null +++ b/green/junit.py @@ -0,0 +1,169 @@ +from __future__ import unicode_literals + +from lxml.etree import Element, SubElement, tostring as to_xml + + + +class JUnitDialect(object): + """ + Hold the name of the elements defined in the JUnit XML schema (for JUnit 4). + """ + CLASS_NAME = "classname" + ERROR = "error" + ERROR_COUNT = "errors" + FAILURE = "failure" + FAILURE_COUNT = "failures" + NAME = "name" + SKIPPED = "skipped" + SKIPPED_COUNT = "skipped" + SYSTEM_ERR = "system-err" + SYSTEM_OUT= "system-out" + TEST_CASE = "testcase" + TEST_COUNT = "tests" + TEST_SUITE = "testsuite" + TEST_SUITES = "testsuites" + + + +class Verdict(object): + """ + Enumeration of possible test verdicts + """ + PASSED=0 + FAILED=1 + ERROR=2 + SKIPPED=3 + + + +class JUnitXML(object): + """ + Serialize a GreenTestResult object into a JUnit XML file, that can + be read by continuous integration servers, for example. + + See GitHub Issue #104 + See Option '-j' / '--junit-report' + """ + + def save_as(self, test_results, destination): + xml_root = Element(JUnitDialect.TEST_SUITES) + tests_by_class = self._group_tests_by_class(test_results) + for name, suite in tests_by_class.items(): + xml_suite = self._convert_suite(test_results, name, suite) + xml_root.append(xml_suite) + xml = to_xml(xml_root, + xml_declaration=True, + pretty_print=True, + encoding="utf-8", + method="xml") + destination.write(xml.decode()) + + + def _group_tests_by_class(self, test_results): + result = {} + self._add_passing_tests(result, test_results) + self._add_failures(result, test_results) + self._add_errors(result, test_results) + self._add_skipped_tests(result, test_results) + return result + + + @staticmethod + def _add_passing_tests(collection, test_results): + for each_test in test_results.passing: + key = JUnitXML._suite_name(each_test) + if key not in collection: + collection[key] = [] + collection[key].append((Verdict.PASSED, each_test)) + + + @staticmethod + def _suite_name(test): + return "%s.%s" % (test.module, test.class_name) + + + @staticmethod + def _add_failures(collection, test_results): + for each_test, failure in test_results.failures: + key = JUnitXML._suite_name(each_test) + if key not in collection: + collection[key] = [] + collection[key].append((Verdict.FAILED, each_test, failure)) + + + @staticmethod + def _add_errors(collection, test_results): + for each_test, error in test_results.errors: + key = JUnitXML._suite_name(each_test) + if key not in collection: + collection[key] = [] + collection[key].append((Verdict.ERROR, each_test, error)) + + + @staticmethod + def _add_skipped_tests(collection, test_results): + for each_test, reason in test_results.skipped: + key = JUnitXML._suite_name(each_test) + if key not in collection: + collection[key] = [] + collection[key].append((Verdict.SKIPPED, each_test, reason)) + + + def _convert_suite(self, results, name, suite): + xml_suite = Element(JUnitDialect.TEST_SUITE) + xml_suite.set(JUnitDialect.NAME, name) + xml_suite.set(JUnitDialect.TEST_COUNT, + str(len(suite))) + xml_suite.set(JUnitDialect.FAILURE_COUNT, + str(self._count_test_with_verdict(Verdict.FAILED, suite))) + xml_suite.set(JUnitDialect.ERROR_COUNT, + str(self._count_test_with_verdict(Verdict.ERROR, suite))) + xml_suite.set(JUnitDialect.SKIPPED_COUNT, + str(self._count_test_with_verdict(Verdict.SKIPPED, suite))) + for each_test in suite: + xml_test = self._convert_test(results, *each_test) + xml_suite.append(xml_test) + return xml_suite + + + @staticmethod + def _count_test_with_verdict(verdict, suite): + return sum(1 for entry in suite if entry[0] == verdict) + + + def _convert_test(self, results, verdict, test, *details): + xml_test = Element(JUnitDialect.TEST_CASE) + xml_test.set(JUnitDialect.NAME, test.method_name) + xml_test.set(JUnitDialect.CLASS_NAME, test.class_name) + + xml_verdict = self._convert_verdict(verdict, test, details) + if verdict: + xml_test.append(xml_verdict) + + if test in results.stdout_output: + system_out = Element(JUnitDialect.SYSTEM_OUT) + system_out.text = results.stdout_output[test] + xml_test.append(system_out) + + if test in results.stderr_errput: + system_err = Element(JUnitDialect.SYSTEM_ERR) + system_err.text = results.stderr_errput[test] + xml_test.append(system_err) + + return xml_test + + + def _convert_verdict(self, verdict, test, details): + if verdict == Verdict.FAILED: + failure = Element(JUnitDialect.FAILURE) + failure.text = str(details[0]) + return failure + if verdict == Verdict.ERROR: + error = Element(JUnitDialect.ERROR) + error.text = str(details[0]) + return error + if verdict == Verdict.SKIPPED: + skipped = Element(JUnitDialect.SKIPPED) + skipped.text = str(details[0]) + return skipped + return None diff --git a/green/test/test_cmdline.py b/green/test/test_cmdline.py index e38813b..f661641 100644 --- a/green/test/test_cmdline.py +++ b/green/test/test_cmdline.py @@ -15,6 +15,8 @@ except: from StringIO import StringIO +from os.path import isfile, join + try: from unittest.mock import MagicMock except: @@ -148,3 +150,18 @@ def test_import_cmdline_module(self): except: pass # Python 2.7's reload is builtin reload(cmdline) + + + def test_generate_junit_test_report(self): + """ + Test that a report is generated when we use the '--junit-report' option. + """ + tmpdir = tempfile.mkdtemp() + report = join(tmpdir, "test_report.xml") + self.assertFalse(isfile(report)) + + argv = ["--junit-report", report, "example/proj" ] + cmdline.main(argv) + + self.assertTrue(isfile(report)) + shutil.rmtree(tmpdir) diff --git a/green/test/test_junit.py b/green/test/test_junit.py new file mode 100644 index 0000000..c1664da --- /dev/null +++ b/green/test/test_junit.py @@ -0,0 +1,289 @@ +from __future__ import unicode_literals + +from green.config import default_args +from green.output import GreenStream +from green.junit import JUnitXML, JUnitDialect, Verdict +from green.result import GreenTestResult, BaseTestResult, ProtoTest, proto_error + +from io import StringIO + +from sys import exc_info + +from unittest import TestCase + +from xml.etree.ElementTree import fromstring as from_xml, tostring as to_xml + + + +def test(module, class_name, method_name): + test = ProtoTest() + test.module = module + test.class_name = class_name + test.method_name = method_name + return test + + + +class JUnitXMLReportIsGenerated(TestCase): + + + def setUp(self): + self._destination = StringIO() + self._test_results = GreenTestResult(default_args, + GreenStream(StringIO())) + self._adapter = JUnitXML() + + self._test = ProtoTest() + self._test.module = "my_module" + self._test.class_name = "MyClass" + self._test.method_name = "my_method" + + + + def test_when_the_results_contain_only_one_successful_test(self): + self._test_results.addSuccess(self._test) + + self._adapter.save_as(self._test_results, self._destination) + + self._assert_report_is({ + "my_module.MyClass": { + "tests": { + "my_method": {"verdict": Verdict.PASSED} + } + } + }) + + + def test_when_the_results_contain_tests_with_various_verdict(self): + self._test_results.addSuccess( + test("my.module", "MyClass", "test_method1")) + self._test_results.addSuccess( + test("my.module", "MyClass", "test_method2")) + self._record_failure( + test("my.module", "MyClass", "test_method3")) + self._record_failure( + test("my.module", "MyClass", "test_method4")) + self._record_error( + test("my.module", "MyClass", "test_method5")) + self._test_results.addSkip( + test("my.module", "MyClass", "test_method6"), + "Take too long") + + self._adapter.save_as(self._test_results, self._destination) + + self._assert_report_is({ + "my.module.MyClass": { + "#tests": "6", + "#failures": "2", + "#errors": "1", + "#skipped": "1", + "tests": { + "test_method1": { "verdict": Verdict.PASSED }, + "test_method2": { "verdict": Verdict.PASSED }, + "test_method3": { "verdict": Verdict.FAILED }, + "test_method4": { "verdict": Verdict.FAILED }, + "test_method5": { "verdict": Verdict.ERROR }, + "test_method6": { "verdict": Verdict.SKIPPED } + } + }, + }) + + + def _record_failure(self, test): + try: + raise ValueError("Wrong value") + except: + error = proto_error(exc_info()) + self._test_results.addFailure(test, error) + + + def _record_error(self, test): + try: + raise ValueError("Wrong value") + except: + error = proto_error(exc_info()) + self._test_results.addError(test, error) + + + + def test_when_the_results_contain_only_one_test_with_output(self): + output = "This is the output of the test" + self._test_results.recordStdout(self._test, output) + self._test_results.addSuccess(self._test) + + self._adapter.save_as(self._test_results, self._destination) + + self._assert_report_is({ + "my_module.MyClass": { + "tests": { + "my_method": { + "verdict": Verdict.PASSED, + "stdout": output + } + } + } + }) + + + def test_when_the_results_contain_only_one_test_with_errput(self): + errput = "This is the errput of the test" + self._test_results.recordStderr(self._test, errput) + self._test_results.addSuccess(self._test) + + self._adapter.save_as(self._test_results, self._destination) + + self._assert_report_is({ + "my_module.MyClass": { + "tests": { + "my_method": { + "verdict": Verdict.PASSED, + "stderr": errput + } + } + } + }) + + + + def test_when_the_results_contain_only_one_failed_test(self): + self._record_failure( + test("my_module", "MyClass", "my_method")) + + self._adapter.save_as(self._test_results, self._destination) + + self._assert_report_is({ + "my_module.MyClass": { + "tests" : { + "my_method": {"verdict": Verdict.FAILED} + } + } + }) + + + def test_when_the_results_contain_only_one_erroneous_test(self): + self._record_error( + test("my_module", "MyClass", "my_method")) + + self._adapter.save_as(self._test_results, self._destination) + + self._assert_report_is({ + "my_module.MyClass": { + "tests": { + "my_method": {"verdict": Verdict.ERROR} + } + } + }) + + + def test_when_the_results_contain_only_one_skipped_test(self): + self._test_results.addSkip(self._test, "reason for skipping") + + self._adapter.save_as(self._test_results, self._destination) + + self._assert_report_is({ + "my_module.MyClass": { + "tests": { + "my_method": {"verdict": Verdict.SKIPPED} + } + } + }) + + + def _assert_report_is(self, report): + """ + Verify the structure of the generated XML text against the given + 'report' structure. + """ + root = from_xml(self._destination.getvalue()) + test_suites = root.findall(JUnitDialect.TEST_SUITE) + self.assertEqual(len(report), len(test_suites)) + for each_suite in test_suites: + self._assert_suite(report, each_suite) + + + def _assert_suite(self, expected_report, suite): + """ + Verify that the given 'suite' matches one in the expected test report. + """ + name = suite.get(JUnitDialect.NAME) + self.assertIsNotNone(name) + self.assertIn(name, expected_report) + expected_suite = expected_report[name] + + # Check the count of tests + if "#tests" in expected_suite: + self.assertEqual(expected_suite["#tests"], + suite.get(JUnitDialect.TEST_COUNT)) + + # Check the count of failures + if "#failures" in expected_suite: + self.assertEqual(expected_suite["#failures"], + suite.get(JUnitDialect.FAILURE_COUNT)) + + # Check the count of errors + if "#errors" in expected_suite: + self.assertEqual(expected_suite["#errors"], + suite.get(JUnitDialect.ERROR_COUNT)) + + # Check the count of skipped tests + if "#skipped" in expected_suite: + self.assertEqual(expected_suite["#skipped"], + suite.get(JUnitDialect.SKIPPED_COUNT)) + + # Check individual test reports + self.assertEqual(len(expected_suite["tests"]), len(suite)) + for each_test in suite: + self._assert_test(expected_suite["tests"], each_test) + + + def _assert_test(self, expected_suite, test): + """ + Verify that the given 'test' matches one in the expected test suite. + """ + name = test.get(JUnitDialect.NAME) + self.assertIsNotNone(test) + self.assertIn(name, expected_suite) + expected_test = expected_suite[name] + + test_passed = True + + for key, expected in expected_test.items(): + if key == "verdict": + self._assert_verdict(expected, test) + + elif key == "stdout": + system_out = test.find(JUnitDialect.SYSTEM_OUT) + self.assertIsNotNone(system_out) + self.assertEqual(expected, system_out.text) + + elif key == "stderr": + system_err = test.find(JUnitDialect.SYSTEM_ERR) + self.assertIsNotNone(system_err) + self.assertEqual(expected, system_err.text) + + + + def _assert_verdict(self, expected_verdict, test): + failure = test.find(JUnitDialect.FAILURE) + error = test.find(JUnitDialect.ERROR) + skipped = test.find(JUnitDialect.SKIPPED) + + if expected_verdict == Verdict.FAILED: + self.assertIsNotNone(failure) + self.assertIsNone(error) + self.assertIsNone(skipped) + + elif expected_verdict == Verdict.ERROR: + self.assertIsNone(failure) + self.assertIsNotNone(error) + self.assertIsNone(skipped) + + elif expected_verdict == Verdict.SKIPPED: + self.assertIsNone(failure) + self.assertIsNone(error) + self.assertIsNotNone(skipped) + + else: # Verdict == PASSED + self.assertIsNone(failure) + self.assertIsNone(error) + self.assertIsNone(skipped) diff --git a/requirements.txt b/requirements.txt index 29c1cc7..000456e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,3 +3,4 @@ coverage mock unidecode backports.shutil_get_terminal_size; python_version < '3.3' +lxml diff --git a/setup.py b/setup.py index de9ae20..0d83c2b 100644 --- a/setup.py +++ b/setup.py @@ -13,6 +13,7 @@ 'colorama', 'coverage', 'unidecode', + 'lxml' ] if sys.version_info[0] == 2: dependencies.append('mock')