generalize skipping
- rename pytest_xfail to pytest_skip - dynamic "skipif" and "xfail" decorators - move most skipping code to the plugin also coming with this commit: - extend mark keyword to accept positional args + docs - fix a few documentation related issues - leave version as "trunk" for now --HG-- branch : trunk
This commit is contained in:
parent
5e21e39125
commit
3ca770b420
|
@ -10,5 +10,5 @@ Generator = py.test.collect.Generator
|
|||
Function = py.test.collect.Function
|
||||
Instance = py.test.collect.Instance
|
||||
|
||||
pytest_plugins = "default runner capture terminal keyword xfail tmpdir monkeypatch recwarn pdb pastebin unittest helpconfig nose assertion".split()
|
||||
pytest_plugins = "default runner capture terminal keyword skipping tmpdir monkeypatch recwarn pdb pastebin unittest helpconfig nose assertion".split()
|
||||
|
||||
|
|
|
@ -56,25 +56,6 @@ def skip(msg=""):
|
|||
__tracebackhide__ = True
|
||||
raise Skipped(msg=msg)
|
||||
|
||||
def importorskip(modname, minversion=None):
|
||||
""" return imported module or skip() """
|
||||
compile(modname, '', 'eval') # to catch syntaxerrors
|
||||
try:
|
||||
mod = __import__(modname)
|
||||
except ImportError:
|
||||
py.test.skip("could not import %r" %(modname,))
|
||||
if minversion is None:
|
||||
return mod
|
||||
verattr = getattr(mod, '__version__', None)
|
||||
if isinstance(minversion, str):
|
||||
minver = minversion.split(".")
|
||||
else:
|
||||
minver = list(minversion)
|
||||
if verattr is None or verattr.split(".") < minver:
|
||||
py.test.skip("module %r has __version__ %r, required is: %r" %(
|
||||
modname, verattr, minversion))
|
||||
return mod
|
||||
|
||||
def fail(msg="unknown failure"):
|
||||
""" fail with the given Message. """
|
||||
__tracebackhide__ = True
|
||||
|
|
|
@ -8,22 +8,29 @@ By default, all filename parts and class/function names of a test
|
|||
function are put into the set of keywords for a given test. You can
|
||||
specify additional kewords like this::
|
||||
|
||||
@py.test.mark.webtest
|
||||
@py.test.mark.webtest
|
||||
def test_send_http():
|
||||
...
|
||||
|
||||
This will set an attribute 'webtest' on the given test function
|
||||
and by default all such attributes signal keywords. You can
|
||||
also set values in this attribute which you could read from
|
||||
a hook in order to do something special with respect to
|
||||
the test function::
|
||||
This will set an attribute 'webtest' to True on the given test function.
|
||||
You can read the value 'webtest' from the functions __dict__ later.
|
||||
|
||||
@py.test.mark.timeout(seconds=5)
|
||||
You can also set values for an attribute which are put on an empty
|
||||
dummy object::
|
||||
|
||||
@py.test.mark.webtest(firefox=30)
|
||||
def test_receive():
|
||||
...
|
||||
|
||||
This will set the "timeout" attribute with a Marker object
|
||||
that has a 'seconds' attribute.
|
||||
after which ``test_receive.webtest.firefox == 30`` holds true.
|
||||
|
||||
In addition to keyword arguments you can also use positional arguments::
|
||||
|
||||
@py.test.mark.webtest("triangular")
|
||||
def test_receive():
|
||||
...
|
||||
|
||||
after which ``test_receive.webtest._1 == 'triangular`` hold true.
|
||||
|
||||
"""
|
||||
import py
|
||||
|
@ -49,20 +56,20 @@ class MarkerDecorator:
|
|||
return "<MarkerDecorator %r %r>" %(name, d)
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
if not args:
|
||||
if hasattr(self, 'kwargs'):
|
||||
raise TypeError("double mark-keywords?")
|
||||
self.kwargs = kwargs.copy()
|
||||
return self
|
||||
else:
|
||||
if not len(args) == 1 or not hasattr(args[0], '__dict__'):
|
||||
raise TypeError("need exactly one function to decorate, "
|
||||
"got %r" %(args,))
|
||||
func = args[0]
|
||||
mh = MarkHolder(getattr(self, 'kwargs', {}))
|
||||
setattr(func, self.markname, mh)
|
||||
return func
|
||||
|
||||
if args:
|
||||
if hasattr(args[0], '__call__'):
|
||||
func = args[0]
|
||||
mh = MarkHolder(getattr(self, 'kwargs', {}))
|
||||
setattr(func, self.markname, mh)
|
||||
return func
|
||||
# not a function so we memorize all args/kwargs settings
|
||||
for i, arg in enumerate(args):
|
||||
kwargs["_" + str(i)] = arg
|
||||
if hasattr(self, 'kwargs'):
|
||||
raise TypeError("double mark-keywords?")
|
||||
self.kwargs = kwargs.copy()
|
||||
return self
|
||||
|
||||
class MarkHolder:
|
||||
def __init__(self, kwargs):
|
||||
self.__dict__.update(kwargs)
|
||||
|
|
|
@ -175,7 +175,7 @@ class ReSTSyntaxTest(py.test.collect.Item):
|
|||
'to the py package') % (text,)
|
||||
relpath = '/'.join(text.split('/')[1:])
|
||||
if check:
|
||||
pkgroot = py.__pkg__.getpath()
|
||||
pkgroot = py.path.local(py._py.__file__).dirpath()
|
||||
abspath = pkgroot.join(relpath)
|
||||
assert pkgroot.join(relpath).check(), (
|
||||
'problem with linkrole :source:`%s`: '
|
||||
|
|
|
@ -276,7 +276,7 @@ class SetupState(object):
|
|||
assert not self._finalizers
|
||||
|
||||
def teardown_exact(self, item):
|
||||
if item == self.stack[-1]:
|
||||
if self.stack and item == self.stack[-1]:
|
||||
self._pop_and_teardown()
|
||||
else:
|
||||
self._callfinalizers(item)
|
||||
|
|
|
@ -0,0 +1,201 @@
|
|||
"""
|
||||
mark python test functions, classes or modules for conditional
|
||||
skipping (skipif) or as expected-to-fail (xfail). Both declarations
|
||||
lead to special reporting and both can be systematically associated
|
||||
with functions, whole classes or modules. The difference between
|
||||
the two is that 'xfail' will still execute test functions
|
||||
but it will revert the outcome. A passing test is now
|
||||
a failure and failing test is expected. All skip conditions
|
||||
are reported at the end of test run through the terminal
|
||||
reporter.
|
||||
|
||||
.. _skipif:
|
||||
|
||||
skip a test function conditionally
|
||||
-------------------------------------------
|
||||
|
||||
Here is an example for skipping a test function on Python3::
|
||||
|
||||
@py.test.mark.skipif("sys.version_info >= (3,0)")
|
||||
def test_function():
|
||||
...
|
||||
|
||||
Conditions are specified as python expressions
|
||||
and can access the ``sys`` module. They can also
|
||||
access the config object and thus depend on command
|
||||
line or conftest options::
|
||||
|
||||
@py.test.mark.skipif("config.getvalue('db') is None")
|
||||
def test_function(...):
|
||||
...
|
||||
|
||||
conditionally mark a function as "expected to fail"
|
||||
-------------------------------------------------------
|
||||
|
||||
You can use the ``xfail`` keyword to mark your test functions as
|
||||
'expected to fail'::
|
||||
|
||||
@py.test.mark.xfail
|
||||
def test_hello():
|
||||
...
|
||||
|
||||
This test will be executed but no traceback will be reported
|
||||
when it fails. Instead terminal reporting will list it in the
|
||||
"expected to fail" or "unexpectedly passing" sections.
|
||||
As with skipif_ you may selectively expect a failure
|
||||
depending on platform::
|
||||
|
||||
@py.test.mark.xfail("sys.version_info >= (3,0)")
|
||||
def test_function():
|
||||
...
|
||||
|
||||
skip/xfail a whole test class or module
|
||||
-------------------------------------------
|
||||
|
||||
Instead of marking single functions you can skip
|
||||
a whole class of tests when runnign on a specific
|
||||
platform::
|
||||
|
||||
class TestSomething:
|
||||
skipif = "sys.platform == 'win32'"
|
||||
|
||||
Or you can mark all test functions as expected
|
||||
to fail for a specific test configuration::
|
||||
|
||||
xfail = "config.getvalue('db') == 'mysql'"
|
||||
|
||||
|
||||
skip if a dependency cannot be imported
|
||||
---------------------------------------------
|
||||
|
||||
You can use a helper to skip on a failing import::
|
||||
|
||||
docutils = py.test.importorskip("docutils")
|
||||
|
||||
You can use this helper at module level or within
|
||||
a test or setup function.
|
||||
|
||||
You can aslo skip if a library does not have the right version::
|
||||
|
||||
docutils = py.test.importorskip("docutils", minversion="0.3")
|
||||
|
||||
The version will be read from the specified module's ``__version__`` attribute.
|
||||
|
||||
|
||||
dynamically skip from within a test or setup
|
||||
-------------------------------------------------
|
||||
|
||||
If you want to skip the execution of a test you can call
|
||||
``py.test.skip()`` within a test, a setup or from a
|
||||
`funcarg factory`_ function. Example::
|
||||
|
||||
def test_function():
|
||||
if not valid_config():
|
||||
py.test.skip("unsuppored configuration")
|
||||
|
||||
.. _`funcarg factory`: ../funcargs.html#factory
|
||||
|
||||
"""
|
||||
# XXX not all skip-related code is contained in
|
||||
# this plugin yet, some remains in outcome.py and
|
||||
# the Skipped Exception is imported here and there.
|
||||
|
||||
|
||||
import py
|
||||
|
||||
def pytest_namespace():
|
||||
return {'importorskip': importorskip}
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
expr, result = evalexpression(item, 'skipif')
|
||||
if result:
|
||||
py.test.skip(expr)
|
||||
|
||||
def pytest_runtest_makereport(__multicall__, item, call):
|
||||
if call.when != "call":
|
||||
return
|
||||
if hasattr(item, 'obj'):
|
||||
expr, result = evalexpression(item, 'xfail')
|
||||
if result:
|
||||
res = __multicall__.execute()
|
||||
if call.excinfo:
|
||||
res.skipped = True
|
||||
res.failed = res.passed = False
|
||||
else:
|
||||
res.skipped = res.passed = False
|
||||
res.failed = True
|
||||
return res
|
||||
|
||||
def pytest_report_teststatus(report):
|
||||
if 'xfail' in report.keywords:
|
||||
if report.skipped:
|
||||
return "xfailed", "x", "xfail"
|
||||
elif report.failed:
|
||||
return "xpassed", "P", "xpass"
|
||||
|
||||
# called by the terminalreporter instance/plugin
|
||||
def pytest_terminal_summary(terminalreporter):
|
||||
tr = terminalreporter
|
||||
xfailed = tr.stats.get("xfailed")
|
||||
if xfailed:
|
||||
tr.write_sep("_", "expected failures")
|
||||
for rep in xfailed:
|
||||
entry = rep.longrepr.reprcrash
|
||||
modpath = rep.item.getmodpath(includemodule=True)
|
||||
pos = "%s %s:%d: " %(modpath, entry.path, entry.lineno)
|
||||
reason = rep.longrepr.reprcrash.message
|
||||
i = reason.find("\n")
|
||||
if i != -1:
|
||||
reason = reason[:i]
|
||||
tr._tw.line("%s %s" %(pos, reason))
|
||||
|
||||
xpassed = terminalreporter.stats.get("xpassed")
|
||||
if xpassed:
|
||||
tr.write_sep("_", "UNEXPECTEDLY PASSING TESTS")
|
||||
for rep in xpassed:
|
||||
fspath, lineno, modpath = rep.item.reportinfo()
|
||||
pos = "%s %s:%d: unexpectedly passing" %(modpath, fspath, lineno)
|
||||
tr._tw.line(pos)
|
||||
|
||||
def importorskip(modname, minversion=None):
|
||||
""" return imported module or perform a dynamic skip() """
|
||||
compile(modname, '', 'eval') # to catch syntaxerrors
|
||||
try:
|
||||
mod = __import__(modname)
|
||||
except ImportError:
|
||||
py.test.skip("could not import %r" %(modname,))
|
||||
if minversion is None:
|
||||
return mod
|
||||
verattr = getattr(mod, '__version__', None)
|
||||
if isinstance(minversion, str):
|
||||
minver = minversion.split(".")
|
||||
else:
|
||||
minver = list(minversion)
|
||||
if verattr is None or verattr.split(".") < minver:
|
||||
py.test.skip("module %r has __version__ %r, required is: %r" %(
|
||||
modname, verattr, minversion))
|
||||
return mod
|
||||
|
||||
def getexpression(item, keyword):
|
||||
if isinstance(item, py.test.collect.Function):
|
||||
val = getattr(item.obj, keyword, None)
|
||||
val = getattr(val, '_0', val)
|
||||
if val is not None:
|
||||
return val
|
||||
cls = item.getparent(py.test.collect.Class)
|
||||
if cls and hasattr(cls.obj, keyword):
|
||||
return getattr(cls.obj, keyword)
|
||||
mod = item.getparent(py.test.collect.Module)
|
||||
return getattr(mod.obj, keyword, None)
|
||||
|
||||
def evalexpression(item, keyword):
|
||||
expr = getexpression(item, keyword)
|
||||
result = None
|
||||
if expr:
|
||||
if isinstance(expr, str):
|
||||
d = {'sys': py.std.sys, 'config': item.config}
|
||||
result = eval(expr, d)
|
||||
else:
|
||||
result = expr
|
||||
return expr, result
|
||||
|
|
@ -1,65 +0,0 @@
|
|||
"""
|
||||
mark python test functions as expected-to-fail and report them separately.
|
||||
|
||||
usage
|
||||
------------
|
||||
|
||||
Use the generic mark decorator to mark your test functions as
|
||||
'expected to fail'::
|
||||
|
||||
@py.test.mark.xfail
|
||||
def test_hello():
|
||||
...
|
||||
|
||||
This test will be executed but no traceback will be reported
|
||||
when it fails. Instead terminal reporting will list it in the
|
||||
"expected to fail" section or "unexpectedly passing" section.
|
||||
|
||||
"""
|
||||
|
||||
import py
|
||||
|
||||
def pytest_runtest_makereport(__multicall__, item, call):
|
||||
if call.when != "call":
|
||||
return
|
||||
if hasattr(item, 'obj') and py.builtin._getfuncdict(item.obj):
|
||||
if 'xfail' in py.builtin._getfuncdict(item.obj):
|
||||
res = __multicall__.execute()
|
||||
if call.excinfo:
|
||||
res.skipped = True
|
||||
res.failed = res.passed = False
|
||||
else:
|
||||
res.skipped = res.passed = False
|
||||
res.failed = True
|
||||
return res
|
||||
|
||||
def pytest_report_teststatus(report):
|
||||
if 'xfail' in report.keywords:
|
||||
if report.skipped:
|
||||
return "xfailed", "x", "xfail"
|
||||
elif report.failed:
|
||||
return "xpassed", "P", "xpass"
|
||||
|
||||
# called by the terminalreporter instance/plugin
|
||||
def pytest_terminal_summary(terminalreporter):
|
||||
tr = terminalreporter
|
||||
xfailed = tr.stats.get("xfailed")
|
||||
if xfailed:
|
||||
tr.write_sep("_", "expected failures")
|
||||
for rep in xfailed:
|
||||
entry = rep.longrepr.reprcrash
|
||||
modpath = rep.item.getmodpath(includemodule=True)
|
||||
pos = "%s %s:%d: " %(modpath, entry.path, entry.lineno)
|
||||
reason = rep.longrepr.reprcrash.message
|
||||
i = reason.find("\n")
|
||||
if i != -1:
|
||||
reason = reason[:i]
|
||||
tr._tw.line("%s %s" %(pos, reason))
|
||||
|
||||
xpassed = terminalreporter.stats.get("xpassed")
|
||||
if xpassed:
|
||||
tr.write_sep("_", "UNEXPECTEDLY PASSING TESTS")
|
||||
for rep in xpassed:
|
||||
fspath, lineno, modpath = rep.item.reportinfo()
|
||||
pos = "%s %s:%d: unexpectedly passing" %(modpath, fspath, lineno)
|
||||
tr._tw.line(pos)
|
|
@ -5,7 +5,7 @@ WIDTH = 75
|
|||
|
||||
plugins = [
|
||||
('plugins for Python test functions',
|
||||
'xfail figleaf monkeypatch capture recwarn',),
|
||||
'skipping figleaf monkeypatch capture recwarn',),
|
||||
('plugins for other testing styles and languages',
|
||||
'oejskit unittest nose django doctest restdoc'),
|
||||
('plugins for generic reporting and failure logging',
|
||||
|
@ -252,7 +252,7 @@ class PluginDoc(RestWriter):
|
|||
warn("missing docstring", func)
|
||||
|
||||
def emit_options(self, plugin):
|
||||
from py.__.test.parseopt import Parser
|
||||
from _py.test.parseopt import Parser
|
||||
options = []
|
||||
parser = Parser(processopt=options.append)
|
||||
if hasattr(plugin, 'pytest_addoption'):
|
||||
|
|
|
@ -1,6 +1,11 @@
|
|||
Changes between 1.0.2 and '1.1.0b1'
|
||||
=====================================
|
||||
|
||||
* generalized skipping: a new way to mark python functions with skipif or xfail
|
||||
at function, class and modules level based on platform or sys-module attributes.
|
||||
|
||||
* extend py.test.mark decorator to allow for positional args
|
||||
|
||||
* introduce and test "py.cleanup -d" to remove empty directories
|
||||
|
||||
* fix issue #59 - robustify unittest test collection
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import py
|
||||
from py.__.rest.resthtml import convert_rest_html, strip_html_header
|
||||
from _py.rest.resthtml import convert_rest_html, strip_html_header
|
||||
|
||||
html = py.xml.html
|
||||
|
||||
|
|
|
@ -125,22 +125,11 @@ a PDB `Python debugger`_ when a test fails.
|
|||
advanced skipping of tests
|
||||
-------------------------------
|
||||
|
||||
If you want to skip tests you can use ``py.test.skip`` within
|
||||
test or setup functions. Example::
|
||||
py.test has builtin support for skipping tests or expecting
|
||||
failures on tests on certain platforms. Apart from the
|
||||
minimal py.test style also unittest- and nose-style tests
|
||||
can make use of this feature.
|
||||
|
||||
def test_hello():
|
||||
if sys.platform != "win32":
|
||||
py.test.skip("only win32 supported")
|
||||
|
||||
You can also use a helper to skip on a failing import::
|
||||
|
||||
docutils = py.test.importorskip("docutils")
|
||||
|
||||
or to skip if a library does not have the right version::
|
||||
|
||||
docutils = py.test.importorskip("docutils", minversion="0.3")
|
||||
|
||||
The version will be read from the specified module's ``__version__`` attribute.
|
||||
|
||||
.. _`funcargs mechanism`: funcargs.html
|
||||
.. _`unittest.py`: http://docs.python.org/library/unittest.html
|
||||
|
|
|
@ -276,6 +276,7 @@ methods in a convenient way.
|
|||
.. _`conftest plugin`: customize.html#conftestplugin
|
||||
|
||||
.. _`funcarg factory`:
|
||||
.. _factory:
|
||||
|
||||
funcarg factories: setting up test function arguments
|
||||
==============================================================
|
||||
|
|
|
@ -139,6 +139,15 @@ hook specification sourcecode
|
|||
# distributed testing
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
def pytest_gwmanage_newgateway(gateway, platinfo):
|
||||
""" called on new raw gateway creation. """
|
||||
|
||||
def pytest_gwmanage_rsyncstart(source, gateways):
|
||||
""" called before rsyncing a directory to remote gateways takes place. """
|
||||
|
||||
def pytest_gwmanage_rsyncfinish(source, gateways):
|
||||
""" called after rsyncing a directory to remote gateways takes place. """
|
||||
|
||||
def pytest_testnodeready(node):
|
||||
""" Test Node is ready to operate. """
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
plugins for Python test functions
|
||||
=================================
|
||||
|
||||
xfail_ mark python test functions as expected-to-fail and report them separately.
|
||||
skipping_ mark python test functions, classes or modules for conditional
|
||||
|
||||
figleaf_ write and report coverage data with 'figleaf'.
|
||||
|
||||
|
|
|
@ -14,22 +14,29 @@ By default, all filename parts and class/function names of a test
|
|||
function are put into the set of keywords for a given test. You can
|
||||
specify additional kewords like this::
|
||||
|
||||
@py.test.mark.webtest
|
||||
@py.test.mark.webtest
|
||||
def test_send_http():
|
||||
...
|
||||
|
||||
This will set an attribute 'webtest' on the given test function
|
||||
and by default all such attributes signal keywords. You can
|
||||
also set values in this attribute which you could read from
|
||||
a hook in order to do something special with respect to
|
||||
the test function::
|
||||
This will set an attribute 'webtest' to True on the given test function.
|
||||
You can read the value 'webtest' from the functions __dict__ later.
|
||||
|
||||
@py.test.mark.timeout(seconds=5)
|
||||
You can also set values for an attribute which are put on an empty
|
||||
dummy object::
|
||||
|
||||
@py.test.mark.webtest(firefox=30)
|
||||
def test_receive():
|
||||
...
|
||||
|
||||
This will set the "timeout" attribute with a Marker object
|
||||
that has a 'seconds' attribute.
|
||||
after which ``test_receive.webtest.firefox == 30`` holds true.
|
||||
|
||||
In addition to keyword arguments you can also use positional arguments::
|
||||
|
||||
@py.test.mark.webtest("triangular")
|
||||
def test_receive():
|
||||
...
|
||||
|
||||
after which ``test_receive.webtest._1 == 'triangular`` hold true.
|
||||
|
||||
Start improving this plugin in 30 seconds
|
||||
=========================================
|
||||
|
|
|
@ -5,22 +5,23 @@
|
|||
.. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_monkeypatch.py
|
||||
.. _`pytest_keyword.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_keyword.py
|
||||
.. _`pastebin`: pastebin.html
|
||||
.. _`skipping`: skipping.html
|
||||
.. _`plugins`: index.html
|
||||
.. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_capture.py
|
||||
.. _`pytest_doctest.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_doctest.py
|
||||
.. _`capture`: capture.html
|
||||
.. _`pytest_nose.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_nose.py
|
||||
.. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_restdoc.py
|
||||
.. _`xfail`: xfail.html
|
||||
.. _`restdoc`: restdoc.html
|
||||
.. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_pastebin.py
|
||||
.. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_figleaf.py
|
||||
.. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_hooklog.py
|
||||
.. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_skipping.py
|
||||
.. _`checkout the py.test development version`: ../../download.html#checkout
|
||||
.. _`pytest_helpconfig.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_helpconfig.py
|
||||
.. _`oejskit`: oejskit.html
|
||||
.. _`doctest`: doctest.html
|
||||
.. _`get in contact`: ../../contact.html
|
||||
.. _`pytest_xfail.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_xfail.py
|
||||
.. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_capture.py
|
||||
.. _`figleaf`: figleaf.html
|
||||
.. _`customize`: ../customize.html
|
||||
.. _`hooklog`: hooklog.html
|
||||
|
@ -30,7 +31,6 @@
|
|||
.. _`monkeypatch`: monkeypatch.html
|
||||
.. _`resultlog`: resultlog.html
|
||||
.. _`keyword`: keyword.html
|
||||
.. _`restdoc`: restdoc.html
|
||||
.. _`django`: django.html
|
||||
.. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/py/test/plugin/pytest_unittest.py
|
||||
.. _`nose`: nose.html
|
||||
|
|
|
@ -0,0 +1,115 @@
|
|||
|
||||
pytest_skipping plugin
|
||||
======================
|
||||
|
||||
mark python test functions, classes or modules for conditional
|
||||
|
||||
.. contents::
|
||||
:local:
|
||||
|
||||
skipping (skipif) or as expected-to-fail (xfail). Both declarations
|
||||
lead to special reporting and both can be systematically associated
|
||||
with functions, whole classes or modules. The difference between
|
||||
the two is that 'xfail' will still execute test functions
|
||||
but it will revert the outcome. A passing test is now
|
||||
a failure and failing test is expected. All skip conditions
|
||||
are reported at the end of test run through the terminal
|
||||
reporter.
|
||||
|
||||
.. _skipif:
|
||||
|
||||
skip a test function conditionally
|
||||
-------------------------------------------
|
||||
|
||||
Here is an example for skipping a test function on Python3::
|
||||
|
||||
@py.test.mark.skipif("sys.version_info >= (3,0)")
|
||||
def test_function():
|
||||
...
|
||||
|
||||
Conditions are specified as python expressions
|
||||
and can access the ``sys`` module. They can also
|
||||
access the config object and thus depend on command
|
||||
line or conftest options::
|
||||
|
||||
@py.test.mark.skipif("config.getvalue('db') is None")
|
||||
def test_function(...):
|
||||
...
|
||||
|
||||
conditionally mark a function as "expected to fail"
|
||||
-------------------------------------------------------
|
||||
|
||||
You can use the ``xfail`` keyword to mark your test functions as
|
||||
'expected to fail'::
|
||||
|
||||
@py.test.mark.xfail
|
||||
def test_hello():
|
||||
...
|
||||
|
||||
This test will be executed but no traceback will be reported
|
||||
when it fails. Instead terminal reporting will list it in the
|
||||
"expected to fail" or "unexpectedly passing" sections.
|
||||
As with skipif_ you may selectively expect a failure
|
||||
depending on platform::
|
||||
|
||||
@py.test.mark.xfail("sys.version_info >= (3,0)")
|
||||
def test_function():
|
||||
...
|
||||
|
||||
skip/xfail a whole test class or module
|
||||
-------------------------------------------
|
||||
|
||||
Instead of marking single functions you can skip
|
||||
a whole class of tests when runnign on a specific
|
||||
platform::
|
||||
|
||||
class TestSomething:
|
||||
skipif = "sys.platform == 'win32'"
|
||||
|
||||
Or you can mark all test functions as expected
|
||||
to fail for a specific test configuration::
|
||||
|
||||
xfail = "config.getvalue('db') == 'mysql'"
|
||||
|
||||
|
||||
skip if a dependency cannot be imported
|
||||
---------------------------------------------
|
||||
|
||||
You can use a helper to skip on a failing import::
|
||||
|
||||
docutils = py.test.importorskip("docutils")
|
||||
|
||||
You can use this helper at module level or within
|
||||
a test or setup function.
|
||||
|
||||
You can aslo skip if a library does not have the right version::
|
||||
|
||||
docutils = py.test.importorskip("docutils", minversion="0.3")
|
||||
|
||||
The version will be read from the specified module's ``__version__`` attribute.
|
||||
|
||||
|
||||
dynamically skip from within a test or setup
|
||||
-------------------------------------------------
|
||||
|
||||
If you want to skip the execution of a test you can call
|
||||
``py.test.skip()`` within a test, a setup or from a
|
||||
`funcarg factory`_ function. Example::
|
||||
|
||||
def test_function():
|
||||
if not valid_config():
|
||||
py.test.skip("unsuppored configuration")
|
||||
|
||||
.. _`funcarg factory`: ../funcargs.html#factory
|
||||
|
||||
Start improving this plugin in 30 seconds
|
||||
=========================================
|
||||
|
||||
|
||||
1. Download `pytest_skipping.py`_ plugin source code
|
||||
2. put it somewhere as ``pytest_skipping.py`` into your import path
|
||||
3. a subsequent ``py.test`` run will use your local version
|
||||
|
||||
Checkout customize_, other plugins_ or `get in contact`_.
|
||||
|
||||
.. include:: links.txt
|
|
@ -1,34 +0,0 @@
|
|||
|
||||
pytest_xfail plugin
|
||||
===================
|
||||
|
||||
mark python test functions as expected-to-fail and report them separately.
|
||||
|
||||
.. contents::
|
||||
:local:
|
||||
|
||||
usage
|
||||
------------
|
||||
|
||||
Use the generic mark decorator to mark your test functions as
|
||||
'expected to fail'::
|
||||
|
||||
@py.test.mark.xfail
|
||||
def test_hello():
|
||||
...
|
||||
|
||||
This test will be executed but no traceback will be reported
|
||||
when it fails. Instead terminal reporting will list it in the
|
||||
"expected to fail" section or "unexpectedly passing" section.
|
||||
|
||||
Start improving this plugin in 30 seconds
|
||||
=========================================
|
||||
|
||||
|
||||
1. Download `pytest_xfail.py`_ plugin source code
|
||||
2. put it somewhere as ``pytest_xfail.py`` into your import path
|
||||
3. a subsequent ``py.test`` run will use your local version
|
||||
|
||||
Checkout customize_, other plugins_ or `get in contact`_.
|
||||
|
||||
.. include:: links.txt
|
|
@ -15,7 +15,7 @@ For questions please check out http://pylib.org/contact.html
|
|||
|
||||
(c) Holger Krekel and others, 2009
|
||||
"""
|
||||
version = "1.1.0b1"
|
||||
version = "trunk"
|
||||
|
||||
__version__ = version = version or "1.1.x"
|
||||
import _py.apipkg
|
||||
|
@ -53,7 +53,6 @@ _py.apipkg.initpkg(__name__, dict(
|
|||
'_PluginManager' : '_py.test.pluginmanager:PluginManager',
|
||||
'raises' : '_py.test.outcome:raises',
|
||||
'skip' : '_py.test.outcome:skip',
|
||||
'importorskip' : '_py.test.outcome:importorskip',
|
||||
'fail' : '_py.test.outcome:fail',
|
||||
'exit' : '_py.test.outcome:exit',
|
||||
# configuration/initialization related test api
|
||||
|
|
|
@ -208,7 +208,7 @@ class TestLocalPath(common.CommonFSTests):
|
|||
assert l[2] == p3
|
||||
|
||||
class TestExecutionOnWindows:
|
||||
disabled = py.std.sys.platform != 'win32'
|
||||
skipif = "sys.platform != 'win32'"
|
||||
|
||||
def test_sysfind(self):
|
||||
x = py.path.local.sysfind('cmd')
|
||||
|
@ -216,7 +216,7 @@ class TestExecutionOnWindows:
|
|||
assert py.path.local.sysfind('jaksdkasldqwe') is None
|
||||
|
||||
class TestExecution:
|
||||
disabled = py.std.sys.platform == 'win32'
|
||||
skipif = "sys.platform == 'win32'"
|
||||
|
||||
def test_sysfind(self):
|
||||
x = py.path.local.sysfind('test')
|
||||
|
@ -346,8 +346,7 @@ def test_homedir():
|
|||
assert homedir.check(dir=1)
|
||||
|
||||
class TestWINLocalPath:
|
||||
#root = local(TestLocalPath.root)
|
||||
disabled = py.std.sys.platform != 'win32'
|
||||
skipif = "sys.platform != 'win32'"
|
||||
|
||||
def test_owner_group_not_implemented(self):
|
||||
py.test.raises(NotImplementedError, "path1.stat().owner")
|
||||
|
@ -396,7 +395,7 @@ class TestWINLocalPath:
|
|||
old.chdir()
|
||||
|
||||
class TestPOSIXLocalPath:
|
||||
disabled = py.std.sys.platform == 'win32'
|
||||
skipif = "sys.platform == 'win32'"
|
||||
|
||||
def test_samefile(self, tmpdir):
|
||||
assert tmpdir.samefile(tmpdir)
|
||||
|
|
|
@ -50,12 +50,11 @@ class TestSvnURLCommandPath(CommonSvnTests):
|
|||
def test_svnurl_characters_tilde_end(self, path1):
|
||||
py.path.svnurl("http://host.com/some/file~")
|
||||
|
||||
@py.test.mark.xfail("sys.platform == 'win32'")
|
||||
def test_svnurl_characters_colon_path(self, path1):
|
||||
if py.std.sys.platform == 'win32':
|
||||
# colons are allowed on win32, because they're part of the drive
|
||||
# part of an absolute path... however, they shouldn't be allowed in
|
||||
# other parts, I think
|
||||
py.test.skip('XXX fixme win32')
|
||||
# colons are allowed on win32, because they're part of the drive
|
||||
# part of an absolute path... however, they shouldn't be allowed in
|
||||
# other parts, I think
|
||||
py.test.raises(ValueError, 'py.path.svnurl("http://host.com/foo:bar")')
|
||||
|
||||
def test_export(self, path1, tmpdir):
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
|
||||
pytest_plugins = "pytest_xfail", "pytest_pytester", "pytest_tmpdir"
|
||||
pytest_plugins = "skipping", "pytester", "tmpdir"
|
||||
|
||||
|
|
|
@ -14,12 +14,14 @@ def test_pytest_mark_api():
|
|||
assert f.world.x == 3
|
||||
assert f.world.y == 4
|
||||
|
||||
mark.world("hello")(f)
|
||||
assert f.world._0 == "hello"
|
||||
|
||||
py.test.raises(TypeError, "mark.some(x=3)(f=5)")
|
||||
|
||||
def test_mark_plugin(testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import py
|
||||
pytest_plugins = "keyword"
|
||||
@py.test.mark.hello
|
||||
def test_hello():
|
||||
assert hasattr(test_hello, 'hello')
|
||||
|
|
|
@ -27,6 +27,12 @@ class TestSetupState:
|
|||
ss.teardown_all()
|
||||
assert not l
|
||||
|
||||
def test_teardown_exact_stack_empty(self, testdir):
|
||||
item = testdir.getitem("def test_func(): pass")
|
||||
ss = runner.SetupState()
|
||||
ss.teardown_exact(item)
|
||||
ss.teardown_exact(item)
|
||||
ss.teardown_exact(item)
|
||||
|
||||
class BaseFunctionalTests:
|
||||
def test_passfunction(self, testdir):
|
||||
|
|
|
@ -0,0 +1,109 @@
|
|||
import py
|
||||
|
||||
def test_xfail_decorator(testdir):
|
||||
p = testdir.makepyfile(test_one="""
|
||||
import py
|
||||
@py.test.mark.xfail
|
||||
def test_this():
|
||||
assert 0
|
||||
|
||||
@py.test.mark.xfail
|
||||
def test_that():
|
||||
assert 1
|
||||
""")
|
||||
result = testdir.runpytest(p)
|
||||
extra = result.stdout.fnmatch_lines([
|
||||
"*expected failures*",
|
||||
"*test_one.test_this*test_one.py:4*",
|
||||
"*UNEXPECTEDLY PASSING*",
|
||||
"*test_that*",
|
||||
"*1 xfailed*"
|
||||
])
|
||||
assert result.ret == 1
|
||||
|
||||
def test_skipif_decorator(testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import py
|
||||
@py.test.mark.skipif("hasattr(sys, 'platform')")
|
||||
def test_that():
|
||||
assert 0
|
||||
""")
|
||||
result = testdir.runpytest(p)
|
||||
extra = result.stdout.fnmatch_lines([
|
||||
"*Skipped*platform*",
|
||||
"*1 skipped*"
|
||||
])
|
||||
assert result.ret == 0
|
||||
|
||||
def test_skipif_class(testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import py
|
||||
class TestClass:
|
||||
skipif = "True"
|
||||
def test_that(self):
|
||||
assert 0
|
||||
def test_though(self):
|
||||
assert 0
|
||||
""")
|
||||
result = testdir.runpytest(p)
|
||||
extra = result.stdout.fnmatch_lines([
|
||||
"*2 skipped*"
|
||||
])
|
||||
|
||||
def test_getexpression(testdir):
|
||||
from _py.test.plugin.pytest_skipping import getexpression
|
||||
l = testdir.getitems("""
|
||||
import py
|
||||
mod = 5
|
||||
class TestClass:
|
||||
cls = 4
|
||||
@py.test.mark.func(3)
|
||||
def test_func(self):
|
||||
pass
|
||||
@py.test.mark.just
|
||||
def test_other(self):
|
||||
pass
|
||||
""")
|
||||
item, item2 = l
|
||||
assert getexpression(item, 'xyz') is None
|
||||
assert getexpression(item, 'func') == 3
|
||||
assert getexpression(item, 'cls') == 4
|
||||
assert getexpression(item, 'mod') == 5
|
||||
|
||||
assert getexpression(item2, 'just')
|
||||
|
||||
def test_evalexpression_cls_config_example(testdir):
|
||||
from _py.test.plugin.pytest_skipping import evalexpression
|
||||
item, = testdir.getitems("""
|
||||
class TestClass:
|
||||
skipif = "config._hackxyz"
|
||||
def test_func(self):
|
||||
pass
|
||||
""")
|
||||
item.config._hackxyz = 3
|
||||
x, y = evalexpression(item, 'skipif')
|
||||
assert x == 'config._hackxyz'
|
||||
assert y == 3
|
||||
|
||||
def test_importorskip():
|
||||
from _py.test.outcome import Skipped
|
||||
from _py.test.plugin.pytest_skipping import importorskip
|
||||
assert importorskip == py.test.importorskip
|
||||
try:
|
||||
sys = importorskip("sys")
|
||||
assert sys == py.std.sys
|
||||
#path = py.test.importorskip("os.path")
|
||||
#assert path == py.std.os.path
|
||||
py.test.raises(Skipped, "py.test.importorskip('alskdj')")
|
||||
py.test.raises(SyntaxError, "py.test.importorskip('x y z')")
|
||||
py.test.raises(SyntaxError, "py.test.importorskip('x=y')")
|
||||
path = importorskip("py", minversion=".".join(py.__version__))
|
||||
mod = py.std.types.ModuleType("hello123")
|
||||
mod.__version__ = "1.3"
|
||||
py.test.raises(Skipped, """
|
||||
py.test.importorskip("hello123", minversion="5.0")
|
||||
""")
|
||||
except Skipped:
|
||||
print(py.code.ExceptionInfo())
|
||||
py.test.fail("spurious skip")
|
||||
|
|
@ -1,21 +0,0 @@
|
|||
|
||||
def test_xfail(testdir):
|
||||
p = testdir.makepyfile(test_one="""
|
||||
import py
|
||||
@py.test.mark.xfail
|
||||
def test_this():
|
||||
assert 0
|
||||
|
||||
@py.test.mark.xfail
|
||||
def test_that():
|
||||
assert 1
|
||||
""")
|
||||
result = testdir.runpytest(p)
|
||||
extra = result.stdout.fnmatch_lines([
|
||||
"*expected failures*",
|
||||
"*test_one.test_this*test_one.py:4*",
|
||||
"*UNEXPECTEDLY PASSING*",
|
||||
"*test_that*",
|
||||
])
|
||||
assert result.ret == 1
|
||||
|
|
@ -15,26 +15,6 @@ class TestRaises:
|
|||
def test_raises_function(self):
|
||||
py.test.raises(ValueError, int, 'hello')
|
||||
|
||||
def test_importorskip():
|
||||
from _py.test.outcome import Skipped
|
||||
try:
|
||||
sys = py.test.importorskip("sys")
|
||||
assert sys == py.std.sys
|
||||
#path = py.test.importorskip("os.path")
|
||||
#assert path == py.std.os.path
|
||||
py.test.raises(Skipped, "py.test.importorskip('alskdj')")
|
||||
py.test.raises(SyntaxError, "py.test.importorskip('x y z')")
|
||||
py.test.raises(SyntaxError, "py.test.importorskip('x=y')")
|
||||
path = py.test.importorskip("py", minversion=".".join(py.__version__))
|
||||
mod = py.std.types.ModuleType("hello123")
|
||||
mod.__version__ = "1.3"
|
||||
py.test.raises(Skipped, """
|
||||
py.test.importorskip("hello123", minversion="5.0")
|
||||
""")
|
||||
except Skipped:
|
||||
print(py.code.ExceptionInfo())
|
||||
py.test.fail("spurious skip")
|
||||
|
||||
def test_pytest_exit():
|
||||
try:
|
||||
py.test.exit("hello")
|
||||
|
|
|
@ -10,7 +10,7 @@ class TestParser:
|
|||
|
||||
def test_epilog(self):
|
||||
parser = parseopt.Parser()
|
||||
assert not parser.epilog
|
||||
assert not parser.epilog
|
||||
parser.epilog += "hello"
|
||||
assert parser.epilog == "hello"
|
||||
|
||||
|
@ -76,15 +76,6 @@ class TestParser:
|
|||
args = parser.parse_setoption([], option)
|
||||
assert option.hello == "x"
|
||||
|
||||
def test_parser_epilog(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_addoption(parser):
|
||||
parser.epilog = "hello world"
|
||||
""")
|
||||
result = testdir.runpytest('--help')
|
||||
#assert result.ret != 0
|
||||
assert result.stdout.fnmatch_lines(["*hello world*"])
|
||||
|
||||
def test_parse_setoption(self):
|
||||
parser = parseopt.Parser()
|
||||
parser.addoption("--hello", dest="hello", action="store")
|
||||
|
@ -109,3 +100,14 @@ class TestParser:
|
|||
option, args = parser.parse([])
|
||||
assert option.hello == "world"
|
||||
assert option.this == 42
|
||||
|
||||
@py.test.mark.skipif("sys.version_info < (2,5)")
|
||||
def test_addoption_parser_epilog(testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_addoption(parser):
|
||||
parser.epilog = "hello world"
|
||||
""")
|
||||
result = testdir.runpytest('--help')
|
||||
#assert result.ret != 0
|
||||
assert result.stdout.fnmatch_lines(["*hello world*"])
|
||||
|
||||
|
|
Loading…
Reference in New Issue