From 4fc20d09feb39d31d6c13d48deae717c2d816cf3 Mon Sep 17 00:00:00 2001 From: Raphael Pierzina Date: Fri, 5 Aug 2016 19:25:55 +0100 Subject: [PATCH 01/15] Change outcome to 'passed' for xfail unless it's strict --- _pytest/skipping.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/_pytest/skipping.py b/_pytest/skipping.py index 18e038d2c84..66964f94566 100644 --- a/_pytest/skipping.py +++ b/_pytest/skipping.py @@ -230,7 +230,8 @@ def pytest_runtest_makereport(item, call): if hasattr(item, '_unexpectedsuccess') and rep.when == "call": # we need to translate into how pytest encodes xpass rep.wasxfail = "reason: " + repr(item._unexpectedsuccess) - rep.outcome = "failed" + # TODO: Do we need to check for strict xfail here as well? + rep.outcome = "passed" elif item.config.option.runxfail: pass # don't interefere elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception): @@ -245,7 +246,12 @@ def pytest_runtest_makereport(item, call): rep.outcome = "skipped" rep.wasxfail = evalxfail.getexplanation() elif call.when == "call": - rep.outcome = "failed" # xpass outcome + strict_default = item.config.getini('xfail_strict') + is_strict_xfail = evalxfail.get('strict', strict_default) + if is_strict_xfail: + rep.outcome = "failed" + else: + rep.outcome = "passed" rep.wasxfail = evalxfail.getexplanation() elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple: # skipped by mark.skipif; change the location of the failure @@ -260,7 +266,7 @@ def pytest_report_teststatus(report): if hasattr(report, "wasxfail"): if report.skipped: return "xfailed", "x", "xfail" - elif report.failed: + elif report.passed: return "xpassed", "X", ("XPASS", {'yellow': True}) # called by the terminalreporter instance/plugin From 10a6ed17071c2e416a917ecaac8f4b5cf246a4bc Mon Sep 17 00:00:00 2001 From: Raphael Pierzina Date: Fri, 12 Aug 2016 23:18:02 +0100 Subject: [PATCH 02/15] Update unittest test to expect failure for an unexpected success --- testing/test_unittest.py | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/testing/test_unittest.py b/testing/test_unittest.py index 144aad79bf4..39a31cfeaa6 100644 --- a/testing/test_unittest.py +++ b/testing/test_unittest.py @@ -588,23 +588,37 @@ def test_hello(self, arg1): assert result.ret == 1 @pytest.mark.skipif("sys.version_info < (2,7)") -def test_unittest_unexpected_failure(testdir): +def test_unittest_expected_failure_for_failing_test_is_xfail(testdir): testdir.makepyfile(""" import unittest class MyTestCase(unittest.TestCase): @unittest.expectedFailure - def test_func1(self): - assert 0 + def test_failing_test_is_xfail(self): + assert False + """) + result = testdir.runpytest("-rxX") + result.stdout.fnmatch_lines([ + "*XFAIL*MyTestCase*test_failing_test_is_xfail*", + "*1 xfailed*", + ]) + assert result.ret == 0 + +@pytest.mark.skipif("sys.version_info < (2,7)") +def test_unittest_expected_failure_for_passing_test_is_fail(testdir): + testdir.makepyfile(""" + import unittest + class MyTestCase(unittest.TestCase): @unittest.expectedFailure - def test_func2(self): - assert 1 + def test_passing_test_is_fail(self): + assert True """) result = testdir.runpytest("-rxX") result.stdout.fnmatch_lines([ - "*XFAIL*MyTestCase*test_func1*", - "*XPASS*MyTestCase*test_func2*", - "*1 xfailed*1 xpass*", + "*FAILURES*", + "*MyTestCase*test_passing_test_is_fail*", + "*1 failed*", ]) + assert result.ret == 1 @pytest.mark.parametrize('fix_type, stmt', [ From 296f42a2c9f0870c793444837ae58c2da816f9e7 Mon Sep 17 00:00:00 2001 From: Raphael Pierzina Date: Fri, 12 Aug 2016 23:18:36 +0100 Subject: [PATCH 03/15] Treat unittest.expectedFailure pass as a failure --- _pytest/skipping.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/_pytest/skipping.py b/_pytest/skipping.py index 66964f94566..bd27870d6ea 100644 --- a/_pytest/skipping.py +++ b/_pytest/skipping.py @@ -228,10 +228,13 @@ def pytest_runtest_makereport(item, call): evalskip = getattr(item, '_evalskip', None) # unitttest special case, see setting of _unexpectedsuccess if hasattr(item, '_unexpectedsuccess') and rep.when == "call": - # we need to translate into how pytest encodes xpass - rep.wasxfail = "reason: " + repr(item._unexpectedsuccess) - # TODO: Do we need to check for strict xfail here as well? - rep.outcome = "passed" + # unittest treats an 'unexpected successes' as a failure + # which means pytest needs to handle it like a 'xfail(strict=True)' + rep.outcome = "failed" + if item._unexpectedsuccess: + rep.longrepr = "Unexpected success: {0}".format(item._unexpectedsuccess) + else: + rep.longrepr = "Unexpected success" elif item.config.option.runxfail: pass # don't interefere elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception): From 14a4dd0697db7be6043d0715c5b44e9d004dd73c Mon Sep 17 00:00:00 2001 From: Raphael Pierzina Date: Fri, 12 Aug 2016 23:31:38 +0100 Subject: [PATCH 04/15] Extend test to verify longrepr in stdout --- testing/test_unittest.py | 1 + 1 file changed, 1 insertion(+) diff --git a/testing/test_unittest.py b/testing/test_unittest.py index 39a31cfeaa6..1390ed95cf4 100644 --- a/testing/test_unittest.py +++ b/testing/test_unittest.py @@ -616,6 +616,7 @@ def test_passing_test_is_fail(self): result.stdout.fnmatch_lines([ "*FAILURES*", "*MyTestCase*test_passing_test_is_fail*", + "*Unexpected success*", "*1 failed*", ]) assert result.ret == 1 From 225341cf2ca4733a739ec7ba2fe63cb834e0bb34 Mon Sep 17 00:00:00 2001 From: Raphael Pierzina Date: Sat, 13 Aug 2016 00:00:51 +0100 Subject: [PATCH 05/15] Set wasxfail only for xpass w/o strict and else set longrepr --- _pytest/skipping.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/_pytest/skipping.py b/_pytest/skipping.py index bd27870d6ea..651aa1a1970 100644 --- a/_pytest/skipping.py +++ b/_pytest/skipping.py @@ -251,11 +251,13 @@ def pytest_runtest_makereport(item, call): elif call.when == "call": strict_default = item.config.getini('xfail_strict') is_strict_xfail = evalxfail.get('strict', strict_default) + explanation = evalxfail.getexplanation() if is_strict_xfail: rep.outcome = "failed" + rep.longrepr = "[XPASS(strict)] {0}".format(explanation) else: rep.outcome = "passed" - rep.wasxfail = evalxfail.getexplanation() + rep.wasxfail = explanation elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple: # skipped by mark.skipif; change the location of the failure # to point to the item definition, otherwise it will display From 55ec1d7f56d56902e1a3fb4cbd1e9796f9420fe6 Mon Sep 17 00:00:00 2001 From: Raphael Pierzina Date: Mon, 15 Aug 2016 23:58:16 +0100 Subject: [PATCH 06/15] Update test_junitxml.py to interpret XPASS as passed --- testing/test_junitxml.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/test_junitxml.py b/testing/test_junitxml.py index a4f10dec506..1d2a5853b05 100644 --- a/testing/test_junitxml.py +++ b/testing/test_junitxml.py @@ -100,7 +100,7 @@ def test_xpass(): result, dom = runandparse(testdir) assert result.ret node = dom.find_first_by_tag("testsuite") - node.assert_attr(name="pytest", errors=0, failures=1, skips=3, tests=5) + node.assert_attr(name="pytest", errors=0, failures=1, skips=2, tests=5) def test_timing_function(self, testdir): testdir.makepyfile(""" From ea379e0e4f07182ebb10b1a4002f29f8ab36e5c2 Mon Sep 17 00:00:00 2001 From: Raphael Pierzina Date: Wed, 17 Aug 2016 22:02:54 +0100 Subject: [PATCH 07/15] Fix test in test_junitxml and add one for strict --- testing/test_junitxml.py | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/testing/test_junitxml.py b/testing/test_junitxml.py index 1d2a5853b05..d5d6aa69532 100644 --- a/testing/test_junitxml.py +++ b/testing/test_junitxml.py @@ -325,16 +325,33 @@ def test_xpass(): result, dom = runandparse(testdir) # assert result.ret node = dom.find_first_by_tag("testsuite") - node.assert_attr(skips=1, tests=1) + node.assert_attr(skips=0, tests=1) tnode = node.find_first_by_tag("testcase") tnode.assert_attr( file="test_xfailure_xpass.py", line="1", classname="test_xfailure_xpass", name="test_xpass") - fnode = tnode.find_first_by_tag("skipped") - fnode.assert_attr(message="xfail-marked test passes unexpectedly") - # assert "ValueError" in fnode.toxml() + + def test_xfailure_xpass_strict(self, testdir): + testdir.makepyfile(""" + import pytest + @pytest.mark.xfail(strict=True, reason="This needs to fail!") + def test_xpass(): + pass + """) + result, dom = runandparse(testdir) + # assert result.ret + node = dom.find_first_by_tag("testsuite") + node.assert_attr(skips=0, tests=1) + tnode = node.find_first_by_tag("testcase") + tnode.assert_attr( + file="test_xfailure_xpass_strict.py", + line="1", + classname="test_xfailure_xpass_strict", + name="test_xpass") + fnode = tnode.find_first_by_tag("failure") + fnode.assert_attr(message="[XPASS(strict)] This needs to fail!") def test_collect_error(self, testdir): testdir.makepyfile("syntax error") From 018197d72aa903c29cb874e155dbdff0a2ce5803 Mon Sep 17 00:00:00 2001 From: Raphael Pierzina Date: Wed, 17 Aug 2016 22:14:51 +0100 Subject: [PATCH 08/15] Fix broken test in test_skipping and add one for strict xfail --- testing/test_skipping.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/testing/test_skipping.py b/testing/test_skipping.py index 2bfb6a8dc58..4339da7086d 100644 --- a/testing/test_skipping.py +++ b/testing/test_skipping.py @@ -145,7 +145,20 @@ def test_func(): def test_xfail_xpassed(self, testdir): item = testdir.getitem(""" import pytest - @pytest.mark.xfail + @pytest.mark.xfail(reason="nope") + def test_func(): + assert 1 + """) + reports = runtestprotocol(item, log=False) + assert len(reports) == 3 + callreport = reports[1] + assert callreport.passed + assert callreport.wasxfail == "nope" + + def test_xfail_xpassed_strict(self, testdir): + item = testdir.getitem(""" + import pytest + @pytest.mark.xfail(strict=True, reason="nope") def test_func(): assert 1 """) @@ -153,7 +166,8 @@ def test_func(): assert len(reports) == 3 callreport = reports[1] assert callreport.failed - assert callreport.wasxfail == "" + assert callreport.longrepr == "[XPASS(strict)] nope" + assert not hasattr(callreport, "wasxfail") def test_xfail_run_anyway(self, testdir): testdir.makepyfile(""" From d1f2f779ee701838a2182d57e31535dc3c37d000 Mon Sep 17 00:00:00 2001 From: Raphael Pierzina Date: Wed, 17 Aug 2016 22:31:56 +0100 Subject: [PATCH 09/15] Use a better xfail reason --- testing/test_skipping.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/test_skipping.py b/testing/test_skipping.py index 4339da7086d..3b4bc7bd2e1 100644 --- a/testing/test_skipping.py +++ b/testing/test_skipping.py @@ -145,7 +145,7 @@ def test_func(): def test_xfail_xpassed(self, testdir): item = testdir.getitem(""" import pytest - @pytest.mark.xfail(reason="nope") + @pytest.mark.xfail(reason="this is an xfail") def test_func(): assert 1 """) @@ -153,7 +153,7 @@ def test_func(): assert len(reports) == 3 callreport = reports[1] assert callreport.passed - assert callreport.wasxfail == "nope" + assert callreport.wasxfail == "this is an xfail" def test_xfail_xpassed_strict(self, testdir): item = testdir.getitem(""" From 767c28d42257a3ddedd70dcfe429d9333eb5f9cb Mon Sep 17 00:00:00 2001 From: Raphael Pierzina Date: Wed, 17 Aug 2016 22:32:27 +0100 Subject: [PATCH 10/15] Fix broken test in test_junitxml --- testing/test_junitxml.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/testing/test_junitxml.py b/testing/test_junitxml.py index f4bf2e63166..899cc5880fa 100644 --- a/testing/test_junitxml.py +++ b/testing/test_junitxml.py @@ -115,13 +115,16 @@ def test_fail(): def test_error(fixture): pass @pytest.mark.xfail + def test_xfail(): + assert False + @pytest.mark.xfail(strict=True) def test_xpass(): - assert 1 + assert True """) result, dom = runandparse(testdir) assert result.ret node = dom.find_first_by_tag("testsuite") - node.assert_attr(name="pytest", errors=1, failures=1, skips=1, tests=4) + node.assert_attr(name="pytest", errors=1, failures=2, skips=1, tests=5) def test_timing_function(self, testdir): testdir.makepyfile(""" From 01739529611c433f7f3b91fb0734e428b84335b7 Mon Sep 17 00:00:00 2001 From: Raphael Pierzina Date: Wed, 17 Aug 2016 23:15:14 +0100 Subject: [PATCH 11/15] Fix py3 xfail expression evaluation and parametrize strict --- testing/python/metafunc.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/testing/python/metafunc.py b/testing/python/metafunc.py index d6e45384d91..09d54e47821 100644 --- a/testing/python/metafunc.py +++ b/testing/python/metafunc.py @@ -1080,22 +1080,23 @@ def test_increment(n, expected): reprec = testdir.inline_run() reprec.assertoutcome(passed=2, skipped=1) - def test_xfail_passing_is_xpass(self, testdir): + @pytest.mark.parametrize('strict', [True, False]) + def test_xfail_passing_is_xpass(self, testdir, strict): s = """ import pytest @pytest.mark.parametrize(("n", "expected"), [ (1, 2), - pytest.mark.xfail("sys.version > 0", reason="some bug")((2, 3)), + pytest.mark.xfail("sys.version_info.major > 0", reason="some bug", strict={strict})((2, 3)), (3, 4), ]) def test_increment(n, expected): assert n + 1 == expected - """ + """.format(strict=strict) testdir.makepyfile(s) reprec = testdir.inline_run() - # xpass is fail, obviously :) - reprec.assertoutcome(passed=2, failed=1) + passed, failed = (2, 1) if strict else (3, 0) + reprec.assertoutcome(passed=passed, failed=failed) def test_parametrize_called_in_generate_tests(self, testdir): s = """ From dfc659f7810284a8607d02a1e0052ff4af002300 Mon Sep 17 00:00:00 2001 From: Raphael Pierzina Date: Wed, 17 Aug 2016 23:32:56 +0100 Subject: [PATCH 12/15] Fix sys.version_info expression in xfail marker --- testing/python/metafunc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/python/metafunc.py b/testing/python/metafunc.py index 09d54e47821..249983ff5cb 100644 --- a/testing/python/metafunc.py +++ b/testing/python/metafunc.py @@ -1087,7 +1087,7 @@ def test_xfail_passing_is_xpass(self, testdir, strict): @pytest.mark.parametrize(("n", "expected"), [ (1, 2), - pytest.mark.xfail("sys.version_info.major > 0", reason="some bug", strict={strict})((2, 3)), + pytest.mark.xfail("sys.version_info > (0, 0, 0)", reason="some bug", strict={strict})((2, 3)), (3, 4), ]) def test_increment(n, expected): From 4ed412eb5930455e0b14b012e7cd171f0c32752b Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Wed, 17 Aug 2016 20:29:26 -0300 Subject: [PATCH 13/15] unittest's unexpectedSuccess should work as non-strict xpass Make sure tests for that behavior obtain the same return code using either pytest or unittest to run the same file --- _pytest/skipping.py | 20 +++++++++++-- testing/test_unittest.py | 63 ++++++++++++++++++++++++++++------------ 2 files changed, 61 insertions(+), 22 deletions(-) diff --git a/_pytest/skipping.py b/_pytest/skipping.py index 651aa1a1970..7f4d927d9e5 100644 --- a/_pytest/skipping.py +++ b/_pytest/skipping.py @@ -220,6 +220,18 @@ def check_strict_xfail(pyfuncitem): pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False) +def _is_unittest_unexpected_success_a_failure(): + """Return if the test suite should fail if a @expectedFailure unittest test PASSES. + + From https://docs.python.org/3/library/unittest.html?highlight=unittest#unittest.TestResult.wasSuccessful: + Changed in version 3.4: Returns False if there were any + unexpectedSuccesses from tests marked with the expectedFailure() decorator. + + TODO: this should be moved to the "compat" module. + """ + return sys.version_info >= (3, 4) + + @pytest.hookimpl(hookwrapper=True) def pytest_runtest_makereport(item, call): outcome = yield @@ -228,13 +240,15 @@ def pytest_runtest_makereport(item, call): evalskip = getattr(item, '_evalskip', None) # unitttest special case, see setting of _unexpectedsuccess if hasattr(item, '_unexpectedsuccess') and rep.when == "call": - # unittest treats an 'unexpected successes' as a failure - # which means pytest needs to handle it like a 'xfail(strict=True)' - rep.outcome = "failed" if item._unexpectedsuccess: rep.longrepr = "Unexpected success: {0}".format(item._unexpectedsuccess) else: rep.longrepr = "Unexpected success" + if _is_unittest_unexpected_success_a_failure(): + rep.outcome = "failed" + else: + rep.outcome = "passed" + rep.wasxfail = rep.longrepr elif item.config.option.runxfail: pass # don't interefere elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception): diff --git a/testing/test_unittest.py b/testing/test_unittest.py index 1390ed95cf4..9c35e4e3a55 100644 --- a/testing/test_unittest.py +++ b/testing/test_unittest.py @@ -419,8 +419,9 @@ def setup_class(cls): def test_method(self): pass """) + from _pytest.skipping import _is_unittest_unexpected_success_a_failure + should_fail = _is_unittest_unexpected_success_a_failure() result = testdir.runpytest("-rxs") - assert result.ret == 0 result.stdout.fnmatch_lines_random([ "*XFAIL*test_trial_todo*", "*trialselfskip*", @@ -429,8 +430,9 @@ def test_method(self): "*i2wanto*", "*sys.version_info*", "*skip_in_method*", - "*4 skipped*3 xfail*1 xpass*", + "*1 failed*4 skipped*3 xfailed*" if should_fail else "*4 skipped*3 xfail*1 xpass*", ]) + assert result.ret == (1 if should_fail else 0) def test_trial_error(self, testdir): testdir.makepyfile(""" @@ -587,39 +589,62 @@ def test_hello(self, arg1): assert "TypeError" in result.stdout.str() assert result.ret == 1 + @pytest.mark.skipif("sys.version_info < (2,7)") -def test_unittest_expected_failure_for_failing_test_is_xfail(testdir): - testdir.makepyfile(""" +@pytest.mark.parametrize('runner', ['pytest', 'unittest']) +def test_unittest_expected_failure_for_failing_test_is_xfail(testdir, runner): + script = testdir.makepyfile(""" import unittest class MyTestCase(unittest.TestCase): @unittest.expectedFailure def test_failing_test_is_xfail(self): assert False + if __name__ == '__main__': + unittest.main() """) - result = testdir.runpytest("-rxX") - result.stdout.fnmatch_lines([ - "*XFAIL*MyTestCase*test_failing_test_is_xfail*", - "*1 xfailed*", - ]) + if runner == 'pytest': + result = testdir.runpytest("-rxX") + result.stdout.fnmatch_lines([ + "*XFAIL*MyTestCase*test_failing_test_is_xfail*", + "*1 xfailed*", + ]) + else: + result = testdir.runpython(script) + result.stderr.fnmatch_lines([ + "*1 test in*", + "*OK*(expected failures=1)*", + ]) assert result.ret == 0 + @pytest.mark.skipif("sys.version_info < (2,7)") -def test_unittest_expected_failure_for_passing_test_is_fail(testdir): - testdir.makepyfile(""" +@pytest.mark.parametrize('runner', ['pytest', 'unittest']) +def test_unittest_expected_failure_for_passing_test_is_fail(testdir, runner): + script = testdir.makepyfile(""" import unittest class MyTestCase(unittest.TestCase): @unittest.expectedFailure def test_passing_test_is_fail(self): assert True + if __name__ == '__main__': + unittest.main() """) - result = testdir.runpytest("-rxX") - result.stdout.fnmatch_lines([ - "*FAILURES*", - "*MyTestCase*test_passing_test_is_fail*", - "*Unexpected success*", - "*1 failed*", - ]) - assert result.ret == 1 + from _pytest.skipping import _is_unittest_unexpected_success_a_failure + should_fail = _is_unittest_unexpected_success_a_failure() + if runner == 'pytest': + result = testdir.runpytest("-rxX") + result.stdout.fnmatch_lines([ + "*MyTestCase*test_passing_test_is_fail*", + "*1 failed*" if should_fail else "*1 xpassed*", + ]) + else: + result = testdir.runpython(script) + result.stderr.fnmatch_lines([ + "*1 test in*", + "*(unexpected successes=1)*", + ]) + + assert result.ret == (1 if should_fail else 0) @pytest.mark.parametrize('fix_type, stmt', [ From 224ef673740c519ebd1dcbc9f6688529c5ae0a9e Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Wed, 17 Aug 2016 20:32:25 -0300 Subject: [PATCH 14/15] Quick fix for tests in config depended on the current directory When running those tests from pytest's root folder, they would fail because they would end up picking pytest's own pytest.ini --- testing/test_config.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/testing/test_config.py b/testing/test_config.py index 1997ddacdda..5a75f7d60ce 100644 --- a/testing/test_config.py +++ b/testing/test_config.py @@ -592,6 +592,7 @@ def test_setuppy_fallback(self, tmpdir): assert inicfg == {} def test_nothing(self, tmpdir): + tmpdir.chdir() rootdir, inifile, inicfg = determine_setup(None, [tmpdir]) assert rootdir == tmpdir assert inifile is None @@ -603,6 +604,7 @@ def test_with_specific_inifile(self, tmpdir): assert rootdir == tmpdir def test_with_arg_outside_cwd_without_inifile(self, tmpdir): + tmpdir.chdir() a = tmpdir.mkdir("a") b = tmpdir.mkdir("b") rootdir, inifile, inicfg = determine_setup(None, [a, b]) From 0fb34cd2a1bddf248c9e9c942e14c80e0991e2ea Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Wed, 17 Aug 2016 21:10:34 -0300 Subject: [PATCH 15/15] Update CHANGELOG entries --- CHANGELOG.rst | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e8aa1982a0e..c26d071e5d6 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -79,6 +79,20 @@ * +**Changes** + +* Change ``report.outcome`` for ``xpassed`` tests to ``"passed"`` in non-strict + mode and ``"failed"`` in strict mode. Thanks to `@hackebrot`_ for the PR + (`#1795`_) and `@gprasad84`_ for report (`#1546`_). + +* Tests marked with ``xfail(strict=False)`` (the default) now appear in + JUnitXML reports as passing tests instead of skipped. + Thanks to `@hackebrot`_ for the PR (`#1795`_). + +.. _#1795: https://github.com/pytest-dev/pytest/pull/1795 +.. _#1546: https://github.com/pytest-dev/pytest/issues/1546 +.. _@gprasad84: https://github.com/gprasad84 + .. _#1210: https://github.com/pytest-dev/pytest/issues/1210 .. _#1435: https://github.com/pytest-dev/pytest/issues/1435 .. _#1471: https://github.com/pytest-dev/pytest/issues/1471