fix issue30 (the second time)
put module globals into namespace for xfail and skipif expressions
This commit is contained in:
parent
682773e0cb
commit
070c73ff2f
30
CHANGELOG
30
CHANGELOG
|
@ -1,15 +1,37 @@
|
|||
Changes between 2.0.1 and 2.0.2
|
||||
----------------------------------------------
|
||||
|
||||
- fix issue30 - better handling and error reporting for errors in xfail
|
||||
expressions
|
||||
- fix issue30 - extended xfail/skipif handling and better reporting.
|
||||
If you have a syntax error in your skip/xfail
|
||||
expressions you now get nice error reports.
|
||||
|
||||
Also you can now access module globals from xfail/skipif
|
||||
expressions so that this works now::
|
||||
|
||||
import mymodule
|
||||
@pytest.mark.skipif("mymodule.__version__[0] == "1")
|
||||
def test_function():
|
||||
pass
|
||||
|
||||
This will not run the test function if the module's version string
|
||||
does not start with a "1". Note that specifying a string instead
|
||||
of a boolean expressions allows py.test to report meaningful information
|
||||
when summarizing a test run as to what conditions lead to skipping
|
||||
(or xfail-ing) tests.
|
||||
|
||||
- fix issue28 - setup_method and pytest_generate_tests work together
|
||||
The setup_method fixture method now gets called also for
|
||||
test function invocations generated from the pytest_generate_tests
|
||||
hook.
|
||||
|
||||
- fix issue23 - tmpdir argument now works on Python3.2 and WindowsXP
|
||||
(which apparently starts to offer os.symlink now)
|
||||
Starting with Python3.2 os.symlink may be supported. By requiring
|
||||
a newer py lib version the py.path.local() implementation acknowledges
|
||||
this.
|
||||
|
||||
- fixed some typos in the docs (thanks Victor)
|
||||
- fixed typos in the docs (thanks Victor, Brianna) and particular
|
||||
thanks to Laura who also revieved the documentation which
|
||||
lead to some improvements.
|
||||
|
||||
Changes between 2.0.0 and 2.0.1
|
||||
----------------------------------------------
|
||||
|
|
|
@ -54,9 +54,18 @@ class MarkEvaluator:
|
|||
%(self.name, self.expr, "\n".join(msg)),
|
||||
pytrace=False)
|
||||
|
||||
def _getglobals(self):
|
||||
d = {'os': py.std.os, 'sys': py.std.sys, 'config': self.item.config}
|
||||
func = self.item.obj
|
||||
try:
|
||||
d.update(func.__globals__)
|
||||
except AttributeError:
|
||||
d.update(func.func_globals)
|
||||
return d
|
||||
|
||||
def _istrue(self):
|
||||
if self.holder:
|
||||
d = {'os': py.std.os, 'sys': py.std.sys, 'config': self.item.config}
|
||||
d = self._getglobals()
|
||||
if self.holder.args:
|
||||
self.result = False
|
||||
for expr in self.holder.args:
|
||||
|
@ -64,7 +73,7 @@ class MarkEvaluator:
|
|||
if isinstance(expr, str):
|
||||
result = cached_eval(self.item.config, expr, d)
|
||||
else:
|
||||
result = expr
|
||||
pytest.fail("expression is not a string")
|
||||
if result:
|
||||
self.result = True
|
||||
self.expr = expr
|
||||
|
@ -82,7 +91,7 @@ class MarkEvaluator:
|
|||
if not hasattr(self, 'expr'):
|
||||
return ""
|
||||
else:
|
||||
return "condition: " + self.expr
|
||||
return "condition: " + str(self.expr)
|
||||
return expl
|
||||
|
||||
|
||||
|
|
|
@ -17,5 +17,9 @@ def test_hello3():
|
|||
def test_hello4():
|
||||
assert 0
|
||||
|
||||
@xfail('pytest.__version__[0] != "17"')
|
||||
def test_hello5():
|
||||
assert 0
|
||||
|
||||
def test_hello6():
|
||||
pytest.xfail("reason")
|
||||
|
|
|
@ -5,16 +5,17 @@ skip and xfail mechanisms
|
|||
=====================================================================
|
||||
|
||||
You can skip or "xfail" test functions, either by marking functions
|
||||
through a decorator or by calling the ``pytest.skip|xfail`` helpers.
|
||||
through a decorator or by calling the ``pytest.skip|xfail`` functions.
|
||||
|
||||
A *skip* means that you expect your test to pass unless a certain configuration or condition (e.g. wrong Python interpreter, missing dependency) prevents it to run. And *xfail* means that you expect your test to fail because there is an
|
||||
implementation problem. py.test counts and lists *xfailing* tests separately
|
||||
and you can provide info such as a bug number or a URL to provide a
|
||||
human readable problem context.
|
||||
|
||||
Usually detailed information about skipped/xfailed tests is not shown
|
||||
to avoid cluttering the output. You can use the ``-r`` option to
|
||||
see details corresponding to the "short" letters shown in the
|
||||
test progress::
|
||||
at the end of a test run to avoid cluttering the output. You can use
|
||||
the ``-r`` option to see details corresponding to the "short" letters
|
||||
shown in the test progress::
|
||||
|
||||
py.test -rxs # show extra info on skips and xfail tests
|
||||
|
||||
|
@ -28,15 +29,32 @@ Skipping a single function
|
|||
Here is an example for marking a test function to be skipped
|
||||
when run on a Python3 interpreter::
|
||||
|
||||
import sys
|
||||
@pytest.mark.skipif("sys.version_info >= (3,0)")
|
||||
def test_function():
|
||||
...
|
||||
|
||||
During test function setup the skipif condition is
|
||||
evaluated by calling ``eval(expr, namespace)``. The namespace
|
||||
contains the ``sys`` and ``os`` modules and the test
|
||||
``config`` object. The latter allows you to skip based
|
||||
on a test configuration value e.g. like this::
|
||||
contains all the module globals of the test function so that
|
||||
you can for example check for versions::
|
||||
|
||||
import mymodule
|
||||
|
||||
@pytest.mark.skipif("mymodule.__version__ < '1.2'")
|
||||
def test_function():
|
||||
...
|
||||
|
||||
The test function will be skipped and not run if
|
||||
mymodule is below the specified version. The reason
|
||||
for specifying the condition as a string is mainly that
|
||||
you can see more detailed reporting of xfail/skip reasons.
|
||||
|
||||
Actually, the namespace is first initialized by
|
||||
putting the ``sys`` and ``os`` modules and the test
|
||||
``config`` object into it. And is then updated with
|
||||
the module globals. The latter allows you to skip based
|
||||
on a test configuration value::
|
||||
|
||||
@pytest.mark.skipif("not config.getvalue('db')")
|
||||
def test_function(...):
|
||||
|
@ -52,7 +70,7 @@ at module level like this::
|
|||
...
|
||||
|
||||
|
||||
skip test functions of a class
|
||||
skip all test functions of a class
|
||||
--------------------------------------
|
||||
|
||||
As with all function :ref:`marking` you can do it at
|
||||
|
@ -128,7 +146,7 @@ Running it with the report-on-xfail option gives this output::
|
|||
========================= short test summary info ==========================
|
||||
XFAIL xfail_demo.py::test_hello
|
||||
XFAIL xfail_demo.py::test_hello2
|
||||
reason: [NOTRUN]
|
||||
reason: [NOTRUN]
|
||||
XFAIL xfail_demo.py::test_hello3
|
||||
condition: hasattr(os, 'sep')
|
||||
XFAIL xfail_demo.py::test_hello4
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
"""
|
||||
unit and functional testing with Python.
|
||||
"""
|
||||
__version__ = '2.0.2.dev1'
|
||||
__version__ = '2.0.2.dev2'
|
||||
__all__ = ['main']
|
||||
|
||||
from _pytest.core import main, UsageError, _preloadplugins
|
||||
|
|
4
setup.py
4
setup.py
|
@ -22,7 +22,7 @@ def main():
|
|||
name='pytest',
|
||||
description='py.test: simple powerful testing with Python',
|
||||
long_description = long_description,
|
||||
version='2.0.2.dev1',
|
||||
version='2.0.2.dev2',
|
||||
url='http://pytest.org',
|
||||
license='MIT license',
|
||||
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
|
||||
|
@ -67,4 +67,4 @@ def make_entry_points():
|
|||
return {'console_scripts': l}
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
main()
|
|
@ -497,4 +497,34 @@ def test_errors_in_xfail_skip_expressions(testdir):
|
|||
"*1 pass*2 error*",
|
||||
])
|
||||
|
||||
def test_xfail_skipif_with_globals(testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
x = 3
|
||||
@pytest.mark.skipif("x == 3")
|
||||
def test_skip1():
|
||||
pass
|
||||
@pytest.mark.xfail("x == 3")
|
||||
def test_boolean():
|
||||
assert 0
|
||||
""")
|
||||
result = testdir.runpytest("-rsx")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*SKIP*x == 3*",
|
||||
"*XFAIL*test_boolean*",
|
||||
"*x == 3*",
|
||||
])
|
||||
|
||||
def test_direct_gives_error(testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
@pytest.mark.skipif(True)
|
||||
def test_skip1():
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1 error*",
|
||||
])
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue