diff --git a/AUTHORS b/AUTHORS index 83ca4115983..8f348e0e28c 100644 --- a/AUTHORS +++ b/AUTHORS @@ -28,6 +28,7 @@ Dave Hunt David Mohr Edison Gustavo Muenz Eduardo Schettino +Endre Galaczi Elizaveta Shashkova Eric Hunsberger Eric Siegerman @@ -51,6 +52,7 @@ Marc Schlaich Mark Abramowitz Markus Unterwaditzer Martijn Faassen +Michael Aquilina Michael Droettboom Nicolas Delaby Pieter Mulder diff --git a/CHANGELOG b/CHANGELOG index b0141f25797..a437530d09a 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,3 +1,9 @@ +2.9.0.dev +--------- + +* Add unconditional skip mechanism (`pytest.mark.skip`) + +2.8.1.dev 2.8.2.dev --------- @@ -25,9 +31,9 @@ "pytest-xdist" plugin, with test reports being assigned to the wrong tests. Thanks Daniel Grunwald for the report and Bruno Oliveira for the PR. -- (experimental) adapt more SEMVER style versioning and change meaning of - master branch in git repo: "master" branch now keeps the bugfixes, changes - aimed for micro releases. "features" branch will only be be released +- (experimental) adapt more SEMVER style versioning and change meaning of + master branch in git repo: "master" branch now keeps the bugfixes, changes + aimed for micro releases. "features" branch will only be be released with minor or major pytest releases. - Fix issue #766 by removing documentation references to distutils. @@ -42,6 +48,7 @@ - Fix issue #411: Add __eq__ method to assertion comparison example. Thanks Ben Webb. +- Fix issue #653: deprecated_call can be used as context manager. - fix issue 877: properly handle assertion explanations with non-ascii repr Thanks Mathieu Agopian for the report and Ronny Pfannschmidt for the PR. @@ -52,7 +59,7 @@ ----------------------------- - new ``--lf`` and ``-ff`` options to run only the last failing tests or - "failing tests first" from the last run. This functionality is provided + "failing tests first" from the last run. This functionality is provided through porting the formerly external pytest-cache plugin into pytest core. BACKWARD INCOMPAT: if you used pytest-cache's functionality to persist data between test runs be aware that we don't serialize sets anymore. @@ -158,9 +165,9 @@ - fix issue735: assertion failures on debug versions of Python 3.4+ -- new option ``--import-mode`` to allow to change test module importing - behaviour to append to sys.path instead of prepending. This better allows - to run test modules against installated versions of a package even if the +- new option ``--import-mode`` to allow to change test module importing + behaviour to append to sys.path instead of prepending. This better allows + to run test modules against installated versions of a package even if the package under test has the same import root. In this example:: testing/__init__.py @@ -168,7 +175,7 @@ pkg_under_test/ the tests will run against the installed version - of pkg_under_test when ``--import-mode=append`` is used whereas + of pkg_under_test when ``--import-mode=append`` is used whereas by default they would always pick up the local version. Thanks Holger Krekel. - pytester: add method ``TmpTestdir.delete_loaded_modules()``, and call it diff --git a/_pytest/__init__.py b/_pytest/__init__.py index 08dabdc4836..51751401b64 100644 --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.8.2.dev1' +__version__ = '2.9.0.dev1' diff --git a/_pytest/recwarn.py b/_pytest/recwarn.py index 601acffdab4..2922e5b3109 100644 --- a/_pytest/recwarn.py +++ b/_pytest/recwarn.py @@ -28,9 +28,17 @@ def pytest_namespace(): 'warns': warns} -def deprecated_call(func, *args, **kwargs): +def deprecated_call(func=None, *args, **kwargs): """Assert that ``func(*args, **kwargs)`` triggers a DeprecationWarning. + + This function can be used as a context manager:: + + >>> with deprecated_call(): + ... myobject.deprecated_method() """ + if not func: + return WarningsChecker(expected_warning=DeprecationWarning) + wrec = WarningsRecorder() with wrec: warnings.simplefilter('always') # ensure all warnings are triggered @@ -150,8 +158,8 @@ def showwarning(message, category, filename, lineno, self._module.showwarning = showwarning # allow the same warning to be raised more than once - self._module.simplefilter('always', append=True) + self._module.simplefilter('always') return self def __exit__(self, *exc_info): diff --git a/_pytest/skipping.py b/_pytest/skipping.py index 36e54d7d8ae..47f789efbdc 100644 --- a/_pytest/skipping.py +++ b/_pytest/skipping.py @@ -5,6 +5,8 @@ import py import pytest +from _pytest.mark import MarkInfo + def pytest_addoption(parser): group = parser.getgroup("general") @@ -12,6 +14,7 @@ def pytest_addoption(parser): action="store_true", dest="runxfail", default=False, help="run tests even if they are marked xfail") + def pytest_configure(config): if config.option.runxfail: old = pytest.xfail @@ -38,18 +41,22 @@ def nop(*args, **kwargs): "See http://pytest.org/latest/skipping.html" ) + def pytest_namespace(): return dict(xfail=xfail) + class XFailed(pytest.fail.Exception): """ raised from an explicit call to pytest.xfail() """ + def xfail(reason=""): """ xfail an executing test or setup functions with the given reason.""" __tracebackhide__ = True raise XFailed(reason) xfail.Exception = XFailed + class MarkEvaluator: def __init__(self, item, name): self.item = item @@ -147,10 +154,25 @@ def getexplanation(self): @pytest.hookimpl(tryfirst=True) def pytest_runtest_setup(item): - evalskip = MarkEvaluator(item, 'skipif') - if evalskip.istrue(): - item._evalskip = evalskip - pytest.skip(evalskip.getexplanation()) + # Check if skip or skipif are specified as pytest marks + + skipif_info = item.keywords.get('skipif') + if isinstance(skipif_info, MarkInfo): + eval_skipif = MarkEvaluator(item, 'skipif') + if eval_skipif.istrue(): + item._evalskip = eval_skipif + pytest.skip(eval_skipif.getexplanation()) + + skip_info = item.keywords.get('skip') + if isinstance(skip_info, MarkInfo): + item._evalskip = True + if 'reason' in skip_info.kwargs: + pytest.skip(skip_info.kwargs['reason']) + elif skip_info.args: + pytest.skip(skip_info.args[0]) + else: + pytest.skip("unconditional skip") + item._evalxfail = MarkEvaluator(item, 'xfail') check_xfail_no_run(item) diff --git a/doc/en/recwarn.rst b/doc/en/recwarn.rst index c2a1e65fa17..ead162f4ed8 100644 --- a/doc/en/recwarn.rst +++ b/doc/en/recwarn.rst @@ -114,3 +114,9 @@ command ``warnings.simplefilter('always')``:: warnings.warn("deprecated", DeprecationWarning) assert len(recwarn) == 1 assert recwarn.pop(DeprecationWarning) + +You can also use it as a contextmanager:: + + def test_global(): + with pytest.deprecated_call(): + myobject.deprecated_method() diff --git a/doc/en/skipping.rst b/doc/en/skipping.rst index fc4f66e76bb..25e3c81163c 100644 --- a/doc/en/skipping.rst +++ b/doc/en/skipping.rst @@ -29,8 +29,18 @@ corresponding to the "short" letters shown in the test progress:: Marking a test function to be skipped ------------------------------------------- +.. versionadded:: 2.9 + +The simplest way to skip a test function is to mark it with the `skip` decorator +which may be passed an optional `reason`: + + @pytest.mark.skip(reason="no way of currently testing this") + def test_the_unknown(): + ... + .. versionadded:: 2.0, 2.4 +If you wish to skip something conditionally then you can use `skipif` instead. Here is an example of marking a test function to be skipped when run on a Python3.3 interpreter:: @@ -168,12 +178,12 @@ Running it with the report-on-xfail option gives this output:: platform linux -- Python 3.4.3, pytest-2.8.1, py-1.4.30, pluggy-0.3.1 rootdir: $REGENDOC_TMPDIR/example, inifile: collected 7 items - + xfail_demo.py xxxxxxx ======= short test summary info ======== XFAIL xfail_demo.py::test_hello XFAIL xfail_demo.py::test_hello2 - reason: [NOTRUN] + reason: [NOTRUN] XFAIL xfail_demo.py::test_hello3 condition: hasattr(os, 'sep') XFAIL xfail_demo.py::test_hello4 @@ -183,7 +193,7 @@ Running it with the report-on-xfail option gives this output:: XFAIL xfail_demo.py::test_hello6 reason: reason XFAIL xfail_demo.py::test_hello7 - + ======= 7 xfailed in 0.12 seconds ======== .. _`skip/xfail with parametrize`: diff --git a/testing/test_capture.py b/testing/test_capture.py index 53933352573..b5b374a726b 100644 --- a/testing/test_capture.py +++ b/testing/test_capture.py @@ -556,7 +556,6 @@ def test_a(): import subprocess subprocess.call([sys.executable, __file__]) - @pytest.mark.skip def test_foo(): import os;os.write(1, b'\xc3') diff --git a/testing/test_recwarn.py b/testing/test_recwarn.py index 03bbd1eb42d..97a0f25050d 100644 --- a/testing/test_recwarn.py +++ b/testing/test_recwarn.py @@ -79,6 +79,7 @@ def dep_explicit(i): filename="hello", lineno=3) class TestDeprecatedCall(object): + def test_deprecated_call_raises(self): excinfo = pytest.raises(AssertionError, "pytest.deprecated_call(dep, 3)") @@ -111,6 +112,16 @@ def test_deprecated_explicit_call(self): pytest.deprecated_call(dep_explicit, 0) pytest.deprecated_call(dep_explicit, 0) + def test_deprecated_call_as_context_manager_no_warning(self): + with pytest.raises(pytest.fail.Exception) as ex: + with pytest.deprecated_call(): + dep(1) + assert str(ex.value) == "DID NOT WARN" + + def test_deprecated_call_as_context_manager(self): + with pytest.deprecated_call(): + dep(0) + def test_deprecated_call_pending(self): f = lambda: py.std.warnings.warn(PendingDeprecationWarning("hi")) pytest.deprecated_call(f) diff --git a/testing/test_skipping.py b/testing/test_skipping.py index 1048c9455fb..66bfd68c20b 100644 --- a/testing/test_skipping.py +++ b/testing/test_skipping.py @@ -4,6 +4,7 @@ from _pytest.skipping import MarkEvaluator, folded_skips, pytest_runtest_setup from _pytest.runner import runtestprotocol + class TestEvaluator: def test_no_marker(self, testdir): item = testdir.getitem("def test_func(): pass") @@ -382,6 +383,90 @@ def test_func(): ]) +class TestSkip: + def test_skip_class(self, testdir): + testdir.makepyfile(""" + import pytest + @pytest.mark.skip + class TestSomething(object): + def test_foo(self): + pass + def test_bar(self): + pass + + def test_baz(): + pass + """) + rec = testdir.inline_run() + rec.assertoutcome(skipped=2, passed=1) + + def test_skips_on_false_string(self, testdir): + testdir.makepyfile(""" + import pytest + @pytest.mark.skip('False') + def test_foo(): + pass + """) + rec = testdir.inline_run() + rec.assertoutcome(skipped=1) + + def test_arg_as_reason(self, testdir): + testdir.makepyfile(""" + import pytest + @pytest.mark.skip('testing stuff') + def test_bar(): + pass + """) + result = testdir.runpytest('-rs') + result.stdout.fnmatch_lines([ + "*testing stuff*", + "*1 skipped*", + ]) + + def test_skip_no_reason(self, testdir): + testdir.makepyfile(""" + import pytest + @pytest.mark.skip + def test_foo(): + pass + """) + result = testdir.runpytest('-rs') + result.stdout.fnmatch_lines([ + "*unconditional skip*", + "*1 skipped*", + ]) + + def test_skip_with_reason(self, testdir): + testdir.makepyfile(""" + import pytest + @pytest.mark.skip(reason="for lolz") + def test_bar(): + pass + """) + result = testdir.runpytest('-rs') + result.stdout.fnmatch_lines([ + "*for lolz*", + "*1 skipped*", + ]) + + def test_only_skips_marked_test(self, testdir): + testdir.makepyfile(""" + import pytest + @pytest.mark.skip + def test_foo(): + pass + @pytest.mark.skip(reason="nothing in particular") + def test_bar(): + pass + def test_baz(): + assert True + """) + result = testdir.runpytest('-rs') + result.stdout.fnmatch_lines([ + "*nothing in particular*", + "*1 passed*2 skipped*", + ]) + class TestSkipif: def test_skipif_conditional(self, testdir): item = testdir.getitem("""