diff --git a/pytest_subtests.py b/pytest_subtests.py index e66bca9..599801a 100644 --- a/pytest_subtests.py +++ b/pytest_subtests.py @@ -71,6 +71,10 @@ def _from_json(cls, reportdict): ) return report + @classmethod + def from_test_report(cls, test_report): + return super()._from_json(test_report._to_json()) + def _addSubTest(self, test_case, test, exc_info): if exc_info is not None: @@ -78,7 +82,8 @@ def _addSubTest(self, test_case, test, exc_info): call_info = make_call_info( ExceptionInfo(exc_info), start=0, stop=0, duration=0, when="call" ) - sub_report = SubTestReport.from_item_and_call(item=self, call=call_info) + report = self.ihook.pytest_runtest_makereport(item=self, call=call_info) + sub_report = SubTestReport.from_test_report(report) sub_report.context = SubTestContext(msg, dict(test.params)) self.ihook.pytest_runtest_logreport(report=sub_report) if check_interactive_exception(call_info, sub_report): @@ -171,7 +176,8 @@ def test(self, msg=None, **kwargs): call_info = make_call_info( exc_info, start=start, stop=stop, duration=duration, when="call" ) - sub_report = SubTestReport.from_item_and_call(item=self.item, call=call_info) + report = self.ihook.pytest_runtest_makereport(item=self.item, call=call_info) + sub_report = SubTestReport.from_test_report(report) sub_report.context = SubTestContext(msg, kwargs.copy()) captured.update_report(sub_report) diff --git a/tests/test_subtests.py b/tests/test_subtests.py index 101fd35..8f4a7d9 100644 --- a/tests/test_subtests.py +++ b/tests/test_subtests.py @@ -103,6 +103,27 @@ def test_foo(subtests): expected_lines += ["* 1 passed, 3 skipped in *"] result.stdout.fnmatch_lines(expected_lines) + def test_xfail(self, testdir, mode): + testdir.makepyfile( + """ + import pytest + def test_foo(subtests): + for i in range(5): + with subtests.test(msg="custom", i=i): + if i % 2 == 0: + pytest.xfail('even number') + """ + ) + if mode == "normal": + result = testdir.runpytest() + expected_lines = ["collected 1 item"] + else: + pytest.importorskip("xdist") + result = testdir.runpytest("-n1") + expected_lines = ["gw0 [1]"] + expected_lines += ["* 1 passed, 3 xfailed in *"] + result.stdout.fnmatch_lines(expected_lines) + class TestSubTest: """ @@ -234,6 +255,35 @@ def test_foo(self): ["collected 1 item", "* 3 skipped, 1 passed in *"] ) + @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) + @pytest.mark.xfail(reason="Not producing the expected results (#5)") + def test_xfail(self, testdir, runner): + p = testdir.makepyfile( + """ + import pytest + from unittest import expectedFailure, TestCase, main + + class T(TestCase): + @expectedFailure + def test_foo(self): + for i in range(5): + with self.subTest(msg="custom", i=i): + if i % 2 == 0: + raise pytest.xfail('even number') + + if __name__ == '__main__': + main() + """ + ) + if runner == "unittest": + result = testdir.runpython(p) + result.stderr.fnmatch_lines(["Ran 1 test in *", "OK (expected failures=3)"]) + else: + result = testdir.runpytest(p) + result.stdout.fnmatch_lines( + ["collected 1 item", "* 3 xfailed, 1 passed in *"] + ) + class TestCapture: def create_file(self, testdir):