Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

WIP Change outcome to 'passed' for xfail unless it's strict #1795

Merged
merged 17 commits into from
Aug 18, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,20 @@

*

**Changes**
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Deliberately placed it here because this should be moved to the appropriate section on features.


* Change ``report.outcome`` for ``xpassed`` tests to ``"passed"`` in non-strict
mode and ``"failed"`` in strict mode. Thanks to `@hackebrot`_ for the PR
(`#1795`_) and `@gprasad84`_ for report (`#1546`_).

* Tests marked with ``xfail(strict=False)`` (the default) now appear in
JUnitXML reports as passing tests instead of skipped.
Thanks to `@hackebrot`_ for the PR (`#1795`_).

.. _#1795: https://github.com/pytest-dev/pytest/pull/1795
.. _#1546: https://github.com/pytest-dev/pytest/issues/1546
.. _@gprasad84: https://github.com/gprasad84

.. _#1210: https://github.com/pytest-dev/pytest/issues/1210
.. _#1435: https://github.com/pytest-dev/pytest/issues/1435
.. _#1471: https://github.com/pytest-dev/pytest/issues/1471
Expand Down
37 changes: 31 additions & 6 deletions _pytest/skipping.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,18 @@ def check_strict_xfail(pyfuncitem):
pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False)


def _is_unittest_unexpected_success_a_failure():
"""Return if the test suite should fail if a @expectedFailure unittest test PASSES.

From https://docs.python.org/3/library/unittest.html?highlight=unittest#unittest.TestResult.wasSuccessful:
Changed in version 3.4: Returns False if there were any
unexpectedSuccesses from tests marked with the expectedFailure() decorator.

TODO: this should be moved to the "compat" module.
"""
return sys.version_info >= (3, 4)


@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
Expand All @@ -228,9 +240,15 @@ def pytest_runtest_makereport(item, call):
evalskip = getattr(item, '_evalskip', None)
# unitttest special case, see setting of _unexpectedsuccess
if hasattr(item, '_unexpectedsuccess') and rep.when == "call":
# we need to translate into how pytest encodes xpass
rep.wasxfail = "reason: " + repr(item._unexpectedsuccess)
rep.outcome = "failed"
if item._unexpectedsuccess:
rep.longrepr = "Unexpected success: {0}".format(item._unexpectedsuccess)
else:
rep.longrepr = "Unexpected success"
if _is_unittest_unexpected_success_a_failure():
rep.outcome = "failed"
else:
rep.outcome = "passed"
rep.wasxfail = rep.longrepr
elif item.config.option.runxfail:
pass # don't interefere
elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception):
Expand All @@ -245,8 +263,15 @@ def pytest_runtest_makereport(item, call):
rep.outcome = "skipped"
rep.wasxfail = evalxfail.getexplanation()
elif call.when == "call":
rep.outcome = "failed" # xpass outcome
rep.wasxfail = evalxfail.getexplanation()
strict_default = item.config.getini('xfail_strict')
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm I think this is the more correct implementation of "strict xfail" than the one I did... with this, I think we don't even need check_strict_xfail (which is used in pytest_pyfunc_call) in this module anymore at all, because failure/passing will be handled here which seems more appropriate now. 😄

is_strict_xfail = evalxfail.get('strict', strict_default)
explanation = evalxfail.getexplanation()
if is_strict_xfail:
rep.outcome = "failed"
rep.longrepr = "[XPASS(strict)] {0}".format(explanation)
else:
rep.outcome = "passed"
rep.wasxfail = explanation
elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple:
# skipped by mark.skipif; change the location of the failure
# to point to the item definition, otherwise it will display
Expand All @@ -260,7 +285,7 @@ def pytest_report_teststatus(report):
if hasattr(report, "wasxfail"):
if report.skipped:
return "xfailed", "x", "xfail"
elif report.failed:
elif report.passed:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@fabioz I think this will mean you no longer have to do special handling for this in pydev_runfiles_pytest2.py, correct?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@nicoddemus do you think we need to check for strict_xfail here too?

Copy link
Member

@nicoddemus nicoddemus Aug 15, 2016

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No need: strict xfails now have report set to failed and will appear as a normal failure.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

okay, that's what I thought. Thanks!

return "xpassed", "X", ("XPASS", {'yellow': True})

# called by the terminalreporter instance/plugin
Expand Down
11 changes: 6 additions & 5 deletions testing/python/metafunc.py
Original file line number Diff line number Diff line change
Expand Up @@ -1080,22 +1080,23 @@ def test_increment(n, expected):
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2, skipped=1)

def test_xfail_passing_is_xpass(self, testdir):
@pytest.mark.parametrize('strict', [True, False])
def test_xfail_passing_is_xpass(self, testdir, strict):
s = """
import pytest

@pytest.mark.parametrize(("n", "expected"), [
(1, 2),
pytest.mark.xfail("sys.version > 0", reason="some bug")((2, 3)),
pytest.mark.xfail("sys.version_info > (0, 0, 0)", reason="some bug", strict={strict})((2, 3)),
(3, 4),
])
def test_increment(n, expected):
assert n + 1 == expected
"""
""".format(strict=strict)
testdir.makepyfile(s)
reprec = testdir.inline_run()
# xpass is fail, obviously :)
reprec.assertoutcome(passed=2, failed=1)
passed, failed = (2, 1) if strict else (3, 0)
reprec.assertoutcome(passed=passed, failed=failed)

def test_parametrize_called_in_generate_tests(self, testdir):
s = """
Expand Down
2 changes: 2 additions & 0 deletions testing/test_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -592,6 +592,7 @@ def test_setuppy_fallback(self, tmpdir):
assert inicfg == {}

def test_nothing(self, tmpdir):
tmpdir.chdir()
rootdir, inifile, inicfg = determine_setup(None, [tmpdir])
assert rootdir == tmpdir
assert inifile is None
Expand All @@ -603,6 +604,7 @@ def test_with_specific_inifile(self, tmpdir):
assert rootdir == tmpdir

def test_with_arg_outside_cwd_without_inifile(self, tmpdir):
tmpdir.chdir()
a = tmpdir.mkdir("a")
b = tmpdir.mkdir("b")
rootdir, inifile, inicfg = determine_setup(None, [a, b])
Expand Down
34 changes: 27 additions & 7 deletions testing/test_junitxml.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def test_xpass():
result, dom = runandparse(testdir)
assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(name="pytest", errors=0, failures=1, skips=3, tests=5)
node.assert_attr(name="pytest", errors=0, failures=1, skips=2, tests=5)

def test_summing_simple_with_errors(self, testdir):
testdir.makepyfile("""
Expand All @@ -115,13 +115,16 @@ def test_fail():
def test_error(fixture):
pass
@pytest.mark.xfail
def test_xfail():
assert False
@pytest.mark.xfail(strict=True)
def test_xpass():
assert 1
assert True
""")
result, dom = runandparse(testdir)
assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(name="pytest", errors=1, failures=1, skips=1, tests=4)
node.assert_attr(name="pytest", errors=1, failures=2, skips=1, tests=5)

def test_timing_function(self, testdir):
testdir.makepyfile("""
Expand Down Expand Up @@ -346,16 +349,33 @@ def test_xpass():
result, dom = runandparse(testdir)
# assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(skips=1, tests=1)
node.assert_attr(skips=0, tests=1)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file="test_xfailure_xpass.py",
line="1",
classname="test_xfailure_xpass",
name="test_xpass")
fnode = tnode.find_first_by_tag("skipped")
fnode.assert_attr(message="xfail-marked test passes unexpectedly")
# assert "ValueError" in fnode.toxml()

def test_xfailure_xpass_strict(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.xfail(strict=True, reason="This needs to fail!")
def test_xpass():
pass
""")
result, dom = runandparse(testdir)
# assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(skips=0, tests=1)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file="test_xfailure_xpass_strict.py",
line="1",
classname="test_xfailure_xpass_strict",
name="test_xpass")
fnode = tnode.find_first_by_tag("failure")
fnode.assert_attr(message="[XPASS(strict)] This needs to fail!")

def test_collect_error(self, testdir):
testdir.makepyfile("syntax error")
Expand Down
18 changes: 16 additions & 2 deletions testing/test_skipping.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,15 +145,29 @@ def test_func():
def test_xfail_xpassed(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.xfail
@pytest.mark.xfail(reason="this is an xfail")
def test_func():
assert 1
""")
reports = runtestprotocol(item, log=False)
assert len(reports) == 3
callreport = reports[1]
assert callreport.passed
assert callreport.wasxfail == "this is an xfail"

def test_xfail_xpassed_strict(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.xfail(strict=True, reason="nope")
def test_func():
assert 1
""")
reports = runtestprotocol(item, log=False)
assert len(reports) == 3
callreport = reports[1]
assert callreport.failed
assert callreport.wasxfail == ""
assert callreport.longrepr == "[XPASS(strict)] nope"
assert not hasattr(callreport, "wasxfail")

def test_xfail_run_anyway(self, testdir):
testdir.makepyfile("""
Expand Down
68 changes: 54 additions & 14 deletions testing/test_unittest.py
Original file line number Diff line number Diff line change
Expand Up @@ -419,8 +419,9 @@ def setup_class(cls):
def test_method(self):
pass
""")
from _pytest.skipping import _is_unittest_unexpected_success_a_failure
should_fail = _is_unittest_unexpected_success_a_failure()
result = testdir.runpytest("-rxs")
assert result.ret == 0
result.stdout.fnmatch_lines_random([
"*XFAIL*test_trial_todo*",
"*trialselfskip*",
Expand All @@ -429,8 +430,9 @@ def test_method(self):
"*i2wanto*",
"*sys.version_info*",
"*skip_in_method*",
"*4 skipped*3 xfail*1 xpass*",
"*1 failed*4 skipped*3 xfailed*" if should_fail else "*4 skipped*3 xfail*1 xpass*",
])
assert result.ret == (1 if should_fail else 0)

def test_trial_error(self, testdir):
testdir.makepyfile("""
Expand Down Expand Up @@ -587,24 +589,62 @@ def test_hello(self, arg1):
assert "TypeError" in result.stdout.str()
assert result.ret == 1


@pytest.mark.skipif("sys.version_info < (2,7)")
def test_unittest_unexpected_failure(testdir):
testdir.makepyfile("""
@pytest.mark.parametrize('runner', ['pytest', 'unittest'])
def test_unittest_expected_failure_for_failing_test_is_xfail(testdir, runner):
script = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
@unittest.expectedFailure
def test_func1(self):
assert 0
def test_failing_test_is_xfail(self):
assert False
if __name__ == '__main__':
unittest.main()
""")
if runner == 'pytest':
result = testdir.runpytest("-rxX")
result.stdout.fnmatch_lines([
"*XFAIL*MyTestCase*test_failing_test_is_xfail*",
"*1 xfailed*",
])
else:
result = testdir.runpython(script)
result.stderr.fnmatch_lines([
"*1 test in*",
"*OK*(expected failures=1)*",
])
assert result.ret == 0


@pytest.mark.skipif("sys.version_info < (2,7)")
@pytest.mark.parametrize('runner', ['pytest', 'unittest'])
def test_unittest_expected_failure_for_passing_test_is_fail(testdir, runner):
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@hackebrot unfortunately the behavior for a passing test marked with @expectedFailure changed in Python 3.4. 😓

script = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
@unittest.expectedFailure
def test_func2(self):
assert 1
def test_passing_test_is_fail(self):
assert True
if __name__ == '__main__':
unittest.main()
""")
result = testdir.runpytest("-rxX")
result.stdout.fnmatch_lines([
"*XFAIL*MyTestCase*test_func1*",
"*XPASS*MyTestCase*test_func2*",
"*1 xfailed*1 xpass*",
])
from _pytest.skipping import _is_unittest_unexpected_success_a_failure
should_fail = _is_unittest_unexpected_success_a_failure()
if runner == 'pytest':
result = testdir.runpytest("-rxX")
result.stdout.fnmatch_lines([
"*MyTestCase*test_passing_test_is_fail*",
"*1 failed*" if should_fail else "*1 xpassed*",
])
else:
result = testdir.runpython(script)
result.stderr.fnmatch_lines([
"*1 test in*",
"*(unexpected successes=1)*",
])

assert result.ret == (1 if should_fail else 0)


@pytest.mark.parametrize('fix_type, stmt', [
Expand Down