From d8b9b5f1c83adf19e3dc034018fa986d4475aaac Mon Sep 17 00:00:00 2001 From: holger krekel Date: Thu, 15 Oct 2009 20:10:06 +0200 Subject: [PATCH] - make importorskip static at py.test.importorskip because it's used for conditional plugin loading - fix case where xfail is defined at module/class level - fixes and improvements to docs, correct links to plugins - use new skip facilities here and there --HG-- branch : trunk --- _py/test/outcome.py | 19 +++++ _py/test/plugin/pytest_figleaf.py | 3 +- _py/test/plugin/pytest_skipping.py | 77 +++++++------------ bin-for-dist/makepluginlist.py | 15 ++-- doc/test/features.txt | 6 +- doc/test/plugin/index.txt | 2 +- doc/test/plugin/links.txt | 32 ++++---- doc/test/plugin/skipping.txt | 34 ++++---- py/__init__.py | 1 + testing/code/test_assertion.py | 6 +- testing/code/test_source.py | 4 +- testing/io_/test_terminalwriter.py | 17 ++-- testing/process/test_forkedfunc.py | 7 +- testing/process/test_killproc.py | 1 - testing/pytest/plugin/test_pytest_runner.py | 7 +- testing/pytest/plugin/test_pytest_skipping.py | 38 ++++----- testing/pytest/test_outcome.py | 27 +++++++ 17 files changed, 148 insertions(+), 148 deletions(-) diff --git a/_py/test/outcome.py b/_py/test/outcome.py index 6c1bbce00..ecb537855 100644 --- a/_py/test/outcome.py +++ b/_py/test/outcome.py @@ -94,6 +94,25 @@ def raises(ExpectedException, *args, **kwargs): raise ExceptionFailure(msg="DID NOT RAISE", expr=args, expected=ExpectedException) +def importorskip(modname, minversion=None): + """ return imported module or perform a dynamic skip() """ + compile(modname, '', 'eval') # to catch syntaxerrors + try: + mod = __import__(modname, None, None, ['__doc__']) + except ImportError: + py.test.skip("could not import %r" %(modname,)) + if minversion is None: + return mod + verattr = getattr(mod, '__version__', None) + if isinstance(minversion, str): + minver = minversion.split(".") + else: + minver = list(minversion) + if verattr is None or verattr.split(".") < minver: + py.test.skip("module %r has __version__ %r, required is: %r" %( + modname, verattr, minversion)) + return mod + # exitcodes for the command line EXIT_OK = 0 diff --git a/_py/test/plugin/pytest_figleaf.py b/_py/test/plugin/pytest_figleaf.py index 173ea6faf..ed4d7bb7c 100644 --- a/_py/test/plugin/pytest_figleaf.py +++ b/_py/test/plugin/pytest_figleaf.py @@ -4,7 +4,8 @@ write and report coverage data with 'figleaf'. """ import py -figleaf = py.test.importorskip("figleaf.annotate_html") +py.test.importorskip("figleaf.annotate_html") +import figleaf def pytest_addoption(parser): group = parser.addgroup('figleaf options') diff --git a/_py/test/plugin/pytest_skipping.py b/_py/test/plugin/pytest_skipping.py index 78f5fdefc..27b7d4454 100644 --- a/_py/test/plugin/pytest_skipping.py +++ b/_py/test/plugin/pytest_skipping.py @@ -1,13 +1,12 @@ """ -mark python test functions, classes or modules for conditional -skipping (skipif) or as expected-to-fail (xfail). Both declarations -lead to special reporting and both can be systematically associated -with functions, whole classes or modules. The difference between -the two is that 'xfail' will still execute test functions -but it will revert the outcome. A passing test is now -a failure and failing test is expected. All skip conditions -are reported at the end of test run through the terminal -reporter. +advanced conditional skipping for python test functions, classes or modules. + +You can mark functions, classes or modules for for conditional +skipping (skipif) or as expected-to-fail (xfail). The difference +between the two is that 'xfail' will still execute test functions +but it will invert the outcome: a passing test becomes a failure and +a failing test is a semi-passing one. All skip conditions are +reported at the end of test run through the terminal reporter. .. _skipif: @@ -20,15 +19,18 @@ Here is an example for skipping a test function on Python3:: def test_function(): ... -Conditions are specified as python expressions -and can access the ``sys`` module. They can also -access the config object and thus depend on command -line or conftest options:: +The 'skipif' marker accepts an **arbitrary python expression** +as a condition. When setting up the test function the condition +is evaluated by calling ``eval(expr, namespace)``. The namespace +contains the ``sys`` and ``os`` modules as well as the +test ``config`` object. The latter allows you to skip based +on a test configuration value e.g. like this:: - @py.test.mark.skipif("config.getvalue('db') is None") + @py.test.mark.skipif("not config.getvalue('db')") def test_function(...): ... + conditionally mark a function as "expected to fail" ------------------------------------------------------- @@ -53,7 +55,7 @@ skip/xfail a whole test class or module ------------------------------------------- Instead of marking single functions you can skip -a whole class of tests when runnign on a specific +a whole class of tests when running on a specific platform:: class TestSomething: @@ -75,13 +77,12 @@ You can use a helper to skip on a failing import:: You can use this helper at module level or within a test or setup function. -You can aslo skip if a library does not have the right version:: +You can also skip if a library does not come with a high enough version:: docutils = py.test.importorskip("docutils", minversion="0.3") The version will be read from the specified module's ``__version__`` attribute. - dynamically skip from within a test or setup ------------------------------------------------- @@ -96,16 +97,11 @@ If you want to skip the execution of a test you can call .. _`funcarg factory`: ../funcargs.html#factory """ -# XXX not all skip-related code is contained in -# this plugin yet, some remains in outcome.py and -# the Skipped Exception is imported here and there. - +# XXX py.test.skip, .importorskip and the Skipped class +# should also be defined in this plugin, requires thought/changes import py -def pytest_namespace(): - return {'importorskip': importorskip} - def pytest_runtest_setup(item): expr, result = evalexpression(item, 'skipif') if result: @@ -117,14 +113,15 @@ def pytest_runtest_makereport(__multicall__, item, call): if hasattr(item, 'obj'): expr, result = evalexpression(item, 'xfail') if result: - res = __multicall__.execute() + rep = __multicall__.execute() if call.excinfo: - res.skipped = True - res.failed = res.passed = False + rep.skipped = True + rep.failed = rep.passed = False else: - res.skipped = res.passed = False - res.failed = True - return res + rep.skipped = rep.passed = False + rep.failed = True + rep.keywords['xfail'] = True # expr + return rep def pytest_report_teststatus(report): if 'xfail' in report.keywords: @@ -157,24 +154,6 @@ def pytest_terminal_summary(terminalreporter): pos = "%s %s:%d: unexpectedly passing" %(modpath, fspath, lineno) tr._tw.line(pos) -def importorskip(modname, minversion=None): - """ return imported module or perform a dynamic skip() """ - compile(modname, '', 'eval') # to catch syntaxerrors - try: - mod = __import__(modname) - except ImportError: - py.test.skip("could not import %r" %(modname,)) - if minversion is None: - return mod - verattr = getattr(mod, '__version__', None) - if isinstance(minversion, str): - minver = minversion.split(".") - else: - minver = list(minversion) - if verattr is None or verattr.split(".") < minver: - py.test.skip("module %r has __version__ %r, required is: %r" %( - modname, verattr, minversion)) - return mod def getexpression(item, keyword): if isinstance(item, py.test.collect.Function): @@ -193,7 +172,7 @@ def evalexpression(item, keyword): result = None if expr: if isinstance(expr, str): - d = {'sys': py.std.sys, 'config': item.config} + d = {'os': py.std.os, 'sys': py.std.sys, 'config': item.config} result = eval(expr, d) else: result = expr diff --git a/bin-for-dist/makepluginlist.py b/bin-for-dist/makepluginlist.py index 597524316..a7de85a8c 100644 --- a/bin-for-dist/makepluginlist.py +++ b/bin-for-dist/makepluginlist.py @@ -21,11 +21,13 @@ plugins = [ ] externals = { - 'oejskit': 'run javascript tests in real life browsers', - 'django': 'support for testing django applications', - + 'oejskit': "run javascript tests in real life browsers", + 'django': "support for testing django applications", +# 'coverage': "support for using Ned's coverage module", +# 'xmlresult': "support for generating xml reports " +# "and CruiseControl integration", } - + def warn(*args): msg = " ".join(map(str, args)) print >>sys.stderr, "WARN:", msg @@ -123,7 +125,7 @@ class RestWriter: self.out.close() print "wrote", self.target del self.out - + class PluginOverview(RestWriter): def makerest(self, config): plugindir = py.path.local(py.__file__).dirpath("test", "plugin") @@ -145,7 +147,6 @@ class PluginOverview(RestWriter): self.Print() class HookSpec(RestWriter): - def makerest(self, config): module = config.pluginmanager.hook._hookspecs source = py.code.Source(module) @@ -212,7 +213,7 @@ class PluginDoc(RestWriter): # "py/test/plugin/%s" %(hg_changeset, basename))) self.links.append((basename, "http://bitbucket.org/hpk42/py-trunk/raw/%s/" - "py/test/plugin/%s" %(pyversion, basename))) + "_py/test/plugin/%s" %(pyversion, basename))) self.links.append(('customize', '../customize.html')) self.links.append(('plugins', 'index.html')) self.links.append(('get in contact', '../../contact.html')) diff --git a/doc/test/features.txt b/doc/test/features.txt index d23b1b800..d78bffe80 100644 --- a/doc/test/features.txt +++ b/doc/test/features.txt @@ -123,14 +123,14 @@ command line. Using the `--pdb`` option you can automatically activate a PDB `Python debugger`_ when a test fails. advanced skipping of tests -------------------------------- +====================================== -py.test has builtin support for skipping tests or expecting +py.test has `advanced support for skipping tests`_ or expecting failures on tests on certain platforms. Apart from the minimal py.test style also unittest- and nose-style tests can make use of this feature. - +.. _`advanced support for skipping tests`: plugin/skipping.html .. _`funcargs mechanism`: funcargs.html .. _`unittest.py`: http://docs.python.org/library/unittest.html .. _`doctest.py`: http://docs.python.org/library/doctest.html diff --git a/doc/test/plugin/index.txt b/doc/test/plugin/index.txt index 25dd1c8a4..04a083623 100644 --- a/doc/test/plugin/index.txt +++ b/doc/test/plugin/index.txt @@ -2,7 +2,7 @@ plugins for Python test functions ================================= -skipping_ mark python test functions, classes or modules for conditional +skipping_ advanced conditional skipping for python test functions, classes or modules. figleaf_ write and report coverage data with 'figleaf'. diff --git a/doc/test/plugin/links.txt b/doc/test/plugin/links.txt index 2df8df3c4..14ebef39c 100644 --- a/doc/test/plugin/links.txt +++ b/doc/test/plugin/links.txt @@ -1,38 +1,38 @@ .. _`helpconfig`: helpconfig.html .. _`terminal`: terminal.html -.. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_recwarn.py +.. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_recwarn.py .. _`unittest`: unittest.html -.. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_monkeypatch.py -.. _`pytest_keyword.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_keyword.py +.. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_monkeypatch.py +.. _`pytest_keyword.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_keyword.py .. _`pastebin`: pastebin.html .. _`skipping`: skipping.html .. _`plugins`: index.html -.. _`pytest_doctest.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_doctest.py +.. _`pytest_doctest.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_doctest.py .. _`capture`: capture.html -.. _`pytest_nose.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_nose.py -.. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_restdoc.py +.. _`pytest_nose.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_nose.py +.. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_restdoc.py .. _`restdoc`: restdoc.html -.. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_pastebin.py -.. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_figleaf.py -.. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_hooklog.py -.. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_skipping.py +.. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_pastebin.py +.. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_figleaf.py +.. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_hooklog.py +.. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_skipping.py .. _`checkout the py.test development version`: ../../download.html#checkout -.. _`pytest_helpconfig.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_helpconfig.py +.. _`pytest_helpconfig.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_helpconfig.py .. _`oejskit`: oejskit.html .. _`doctest`: doctest.html .. _`get in contact`: ../../contact.html -.. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_capture.py +.. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_capture.py .. _`figleaf`: figleaf.html .. _`customize`: ../customize.html .. _`hooklog`: hooklog.html -.. _`pytest_terminal.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_terminal.py +.. _`pytest_terminal.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_terminal.py .. _`recwarn`: recwarn.html -.. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_pdb.py +.. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_pdb.py .. _`monkeypatch`: monkeypatch.html .. _`resultlog`: resultlog.html .. _`keyword`: keyword.html .. _`django`: django.html -.. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_unittest.py +.. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_unittest.py .. _`nose`: nose.html -.. _`pytest_resultlog.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_resultlog.py +.. _`pytest_resultlog.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_resultlog.py .. _`pdb`: pdb.html diff --git a/doc/test/plugin/skipping.txt b/doc/test/plugin/skipping.txt index ef1c5421c..4cef33e6a 100644 --- a/doc/test/plugin/skipping.txt +++ b/doc/test/plugin/skipping.txt @@ -2,19 +2,17 @@ pytest_skipping plugin ====================== -mark python test functions, classes or modules for conditional +advanced conditional skipping for python test functions, classes or modules. .. contents:: :local: -skipping (skipif) or as expected-to-fail (xfail). Both declarations -lead to special reporting and both can be systematically associated -with functions, whole classes or modules. The difference between -the two is that 'xfail' will still execute test functions -but it will revert the outcome. A passing test is now -a failure and failing test is expected. All skip conditions -are reported at the end of test run through the terminal -reporter. +You can mark functions, classes or modules for for conditional +skipping (skipif) or as expected-to-fail (xfail). The difference +between the two is that 'xfail' will still execute test functions +but it will invert the outcome: a passing test becomes a failure and +a failing test is a semi-passing one. All skip conditions are +reported at the end of test run through the terminal reporter. .. _skipif: @@ -27,15 +25,18 @@ Here is an example for skipping a test function on Python3:: def test_function(): ... -Conditions are specified as python expressions -and can access the ``sys`` module. They can also -access the config object and thus depend on command -line or conftest options:: +The 'skipif' marker accepts an **arbitrary python expression** +as a condition. When setting up the test function the condition +is evaluated by calling ``eval(expr, namespace)``. The namespace +contains the ``sys`` and ``os`` modules as well as the +test ``config`` object. The latter allows you to skip based +on a test configuration value e.g. like this:: - @py.test.mark.skipif("config.getvalue('db') is None") + @py.test.mark.skipif("not config.getvalue('db')") def test_function(...): ... + conditionally mark a function as "expected to fail" ------------------------------------------------------- @@ -60,7 +61,7 @@ skip/xfail a whole test class or module ------------------------------------------- Instead of marking single functions you can skip -a whole class of tests when runnign on a specific +a whole class of tests when running on a specific platform:: class TestSomething: @@ -82,13 +83,12 @@ You can use a helper to skip on a failing import:: You can use this helper at module level or within a test or setup function. -You can aslo skip if a library does not have the right version:: +You can also skip if a library does not come with a high enough version:: docutils = py.test.importorskip("docutils", minversion="0.3") The version will be read from the specified module's ``__version__`` attribute. - dynamically skip from within a test or setup ------------------------------------------------- diff --git a/py/__init__.py b/py/__init__.py index ebf6b6e84..56e798ed9 100644 --- a/py/__init__.py +++ b/py/__init__.py @@ -53,6 +53,7 @@ _py.apipkg.initpkg(__name__, dict( '_PluginManager' : '_py.test.pluginmanager:PluginManager', 'raises' : '_py.test.outcome:raises', 'skip' : '_py.test.outcome:skip', + 'importorskip' : '_py.test.outcome:importorskip', 'fail' : '_py.test.outcome:fail', 'exit' : '_py.test.outcome:exit', # configuration/initialization related test api diff --git a/testing/code/test_assertion.py b/testing/code/test_assertion.py index 2f131eb06..25b65d0a9 100644 --- a/testing/code/test_assertion.py +++ b/testing/code/test_assertion.py @@ -135,11 +135,7 @@ def test_assert_with_brokenrepr_arg(): class TestView: def setup_class(cls): - try: - from _py.code._assertionold import View - except ImportError: - py.test.skip("requires the compile package") - cls.View = View + cls.View = py.test.importorskip("_py.code._assertionold").View def test_class_dispatch(self): ### Use a custom class hierarchy with existing instances diff --git a/testing/code/test_source.py b/testing/code/test_source.py index bef0bdf7e..c3d1fe1ec 100644 --- a/testing/code/test_source.py +++ b/testing/code/test_source.py @@ -191,9 +191,8 @@ class TestSourceParsingAndCompiling: assert len(source) == 9 assert source.getstatementrange(5) == (0, 9) + @py.test.mark.skipif("sys.version_info < (2,6)") def test_compile_to_ast(self): - if sys.version_info < (2, 6): - py.test.skip("requires Python 2.6") import ast source = Source("x = 4") mod = source.compile(flag=ast.PyCF_ONLY_AST) @@ -257,7 +256,6 @@ def test_getstartingblock_multiline(): assert len(l) == 4 def test_getline_finally(): - #py.test.skip("inner statements cannot be located yet.") def c(): pass excinfo = py.test.raises(TypeError, """ teardown = None diff --git a/testing/io_/test_terminalwriter.py b/testing/io_/test_terminalwriter.py index 982a6cd4b..91d1cedaf 100644 --- a/testing/io_/test_terminalwriter.py +++ b/testing/io_/test_terminalwriter.py @@ -2,13 +2,6 @@ import py import os, sys from _py.io import terminalwriter -def skip_win32(): - if sys.platform == 'win32': - py.test.skip('Not relevant on win32') - -import os -import py - def test_terminal_width_COLUMNS(monkeypatch): """ Dummy test for get_terminal_width """ @@ -82,14 +75,14 @@ class BaseTests: assert len(l) == 1 assert l[0] == "-" * 26 + " hello " + "-" * 27 + "\n" + @py.test.mark.skipif("sys.platform == 'win32'") def test__escaped(self): - skip_win32() tw = self.getwriter() text2 = tw._escaped("hello", (31)) assert text2.find("hello") != -1 + @py.test.mark.skipif("sys.platform == 'win32'") def test_markup(self): - skip_win32() tw = self.getwriter() for bold in (True, False): for color in ("red", "green"): @@ -104,9 +97,9 @@ class BaseTests: tw.line("x", bold=True) tw.write("x\n", red=True) l = self.getlines() - skip_win32() - assert len(l[0]) > 2, l - assert len(l[1]) > 2, l + if sys.platform != "win32": + assert len(l[0]) > 2, l + assert len(l[1]) > 2, l def test_attr_fullwidth(self): tw = self.getwriter() diff --git a/testing/process/test_forkedfunc.py b/testing/process/test_forkedfunc.py index 7cded26ef..c6071387a 100644 --- a/testing/process/test_forkedfunc.py +++ b/testing/process/test_forkedfunc.py @@ -1,9 +1,6 @@ import py, sys, os -def setup_module(mod): - if not hasattr(os, 'fork'): - py.test.skip("forkedfunc requires os.fork") - mod.tmpdir = py.test.ensuretemp(mod.__file__) +skipif = "not hasattr(os, 'fork')" def test_waitfinish_removes_tempdir(): ff = py.process.ForkedFunc(boxf1) @@ -56,7 +53,7 @@ def test_forkedfunc_on_fds(): def test_forkedfunc_signal(): result = py.process.ForkedFunc(boxseg).waitfinish() assert result.retval is None - if py.std.sys.version_info < (2,4): + if sys.version_info < (2,4): py.test.skip("signal detection does not work with python prior 2.4") assert result.signal == 11 diff --git a/testing/process/test_killproc.py b/testing/process/test_killproc.py index 14d7cac94..88fe4dd6d 100644 --- a/testing/process/test_killproc.py +++ b/testing/process/test_killproc.py @@ -13,5 +13,4 @@ def test_kill(): if sys.platform == "win32" and ret == 0: py.test.skip("XXX on win32, subprocess.Popen().wait() on a killed " "process does not yield return value != 0") - assert ret != 0 diff --git a/testing/pytest/plugin/test_pytest_runner.py b/testing/pytest/plugin/test_pytest_runner.py index a9c90d4c6..08a6e8eec 100644 --- a/testing/pytest/plugin/test_pytest_runner.py +++ b/testing/pytest/plugin/test_pytest_runner.py @@ -218,9 +218,8 @@ class TestExecutionNonForked(BaseFunctionalTests): py.test.fail("did not raise") class TestExecutionForked(BaseFunctionalTests): + skipif = "not hasattr(os, 'fork')" def getrunner(self): - if not hasattr(py.std.os, 'fork'): - py.test.skip("no os.fork available") return runner.forked_run_report def test_suicide(self, testdir): @@ -262,10 +261,8 @@ class TestCollectionReports: assert not rep.passed assert rep.skipped - +@py.test.mark.skipif("not hasattr(os, 'fork')") def test_functional_boxed(testdir): - if not hasattr(py.std.os, 'fork'): - py.test.skip("needs os.fork") p1 = testdir.makepyfile(""" import os def test_function(): diff --git a/testing/pytest/plugin/test_pytest_skipping.py b/testing/pytest/plugin/test_pytest_skipping.py index ded9e350f..d2d4f5ff6 100644 --- a/testing/pytest/plugin/test_pytest_skipping.py +++ b/testing/pytest/plugin/test_pytest_skipping.py @@ -21,6 +21,21 @@ def test_xfail_decorator(testdir): ]) assert result.ret == 1 +def test_xfail_at_module(testdir): + p = testdir.makepyfile(""" + xfail = 'True' + + def test_intentional_xfail(): + assert 0 + """) + result = testdir.runpytest(p) + extra = result.stdout.fnmatch_lines([ + "*expected failures*", + "*test_intentional_xfail*:4*", + "*1 xfailed*" + ]) + assert result.ret == 0 + def test_skipif_decorator(testdir): p = testdir.makepyfile(""" import py @@ -84,26 +99,3 @@ def test_evalexpression_cls_config_example(testdir): x, y = evalexpression(item, 'skipif') assert x == 'config._hackxyz' assert y == 3 - -def test_importorskip(): - from _py.test.outcome import Skipped - from _py.test.plugin.pytest_skipping import importorskip - assert importorskip == py.test.importorskip - try: - sys = importorskip("sys") - assert sys == py.std.sys - #path = py.test.importorskip("os.path") - #assert path == py.std.os.path - py.test.raises(Skipped, "py.test.importorskip('alskdj')") - py.test.raises(SyntaxError, "py.test.importorskip('x y z')") - py.test.raises(SyntaxError, "py.test.importorskip('x=y')") - path = importorskip("py", minversion=".".join(py.__version__)) - mod = py.std.types.ModuleType("hello123") - mod.__version__ = "1.3" - py.test.raises(Skipped, """ - py.test.importorskip("hello123", minversion="5.0") - """) - except Skipped: - print(py.code.ExceptionInfo()) - py.test.fail("spurious skip") - diff --git a/testing/pytest/test_outcome.py b/testing/pytest/test_outcome.py index e20532d8f..dec5ced67 100644 --- a/testing/pytest/test_outcome.py +++ b/testing/pytest/test_outcome.py @@ -29,3 +29,30 @@ def test_exception_printing_skip(): excinfo = py.code.ExceptionInfo() s = excinfo.exconly(tryshort=True) assert s.startswith("Skipped") + +def test_importorskip(): + from _py.test.outcome import Skipped, importorskip + assert importorskip == py.test.importorskip + try: + sys = importorskip("sys") + assert sys == py.std.sys + #path = py.test.importorskip("os.path") + #assert path == py.std.os.path + py.test.raises(Skipped, "py.test.importorskip('alskdj')") + py.test.raises(SyntaxError, "py.test.importorskip('x y z')") + py.test.raises(SyntaxError, "py.test.importorskip('x=y')") + path = importorskip("py", minversion=".".join(py.__version__)) + mod = py.std.types.ModuleType("hello123") + mod.__version__ = "1.3" + py.test.raises(Skipped, """ + py.test.importorskip("hello123", minversion="5.0") + """) + except Skipped: + print(py.code.ExceptionInfo()) + py.test.fail("spurious skip") + +def test_importorskip_imports_last_module_part(): + import os + ospath = py.test.importorskip("os.path") + assert os.path == ospath +