refine naming, API and docs for py.test.mark mechanism - now contained in pytest_mark plugin

--HG--
branch : trunk
This commit is contained in:
holger krekel 2009-10-22 20:57:21 +02:00
parent 861f34fe90
commit 6c2b1c4363
15 changed files with 281 additions and 201 deletions

View File

@ -10,5 +10,5 @@ Generator = py.test.collect.Generator
Function = py.test.collect.Function
Instance = py.test.collect.Instance
pytest_plugins = "default runner capture terminal keyword skipping tmpdir monkeypatch recwarn pdb pastebin unittest helpconfig nose assertion".split()
pytest_plugins = "default runner capture terminal mark skipping tmpdir monkeypatch recwarn pdb pastebin unittest helpconfig nose assertion".split()

View File

@ -1,60 +1,70 @@
"""
mark test functions with keywords that may hold values.
generic mechanism for marking python functions.
Marking functions by a decorator
By using the ``py.test.mark`` helper you can instantiate
decorators that will set named meta data on test functions.
Marking a single function
----------------------------------------------------
By default, all filename parts and class/function names of a test
function are put into the set of keywords for a given test. You can
specify additional kewords like this::
You can "mark" a test function with meta data like this::
@py.test.mark.webtest
def test_send_http():
...
This will set an attribute 'webtest' to True on the given test function.
You can read the value 'webtest' from the functions __dict__ later.
You can also set values for an attribute which are put on an empty
dummy object::
This will set a "Marker" instance as a function attribute named "webtest".
You can also specify parametrized meta data like this::
@py.test.mark.webtest(firefox=30)
def test_receive():
...
after which ``test_receive.webtest.firefox == 30`` holds true.
The named marker can be accessed like this later::
In addition to keyword arguments you can also use positional arguments::
test_receive.webtest.kwargs['firefox'] == 30
In addition to set key-value pairs you can also use positional arguments::
@py.test.mark.webtest("triangular")
def test_receive():
...
after which ``test_receive.webtest._args[0] == 'triangular`` holds true.
and later access it with ``test_receive.webtest.args[0] == 'triangular``.
.. _`scoped-marking`:
Marking classes or modules
----------------------------------------------------
To mark all methods of a class you can set a class-level attribute::
To mark all methods of a class set a ``pytestmark`` attribute like this::
import py
class TestClass:
pytestmark = py.test.mark.webtest
the marker function will be applied to all test methods.
You can re-use the same markers that you would use for decorating
a function - in fact this marker decorator will be applied
to all test methods of the class.
If you set a marker it inside a test module like this::
You can also set a module level marker::
import py
pytestmark = py.test.mark.webtest
the marker will be applied to all functions and methods of
that module. The module marker is applied last.
in which case then the marker decorator will be applied to all functions and
methods defined in the module.
Outer ``pytestmark`` keywords will overwrite inner keyword
values. Positional arguments are all appeneded to the
same '_args' list.
The order in which marker functions are called is this::
per-function (upon import of module already)
per-class
per-module
Later called markers may overwrite previous key-value settings.
Positional arguments are all appended to the same 'args' list
of the Marker object.
"""
import py
@ -86,27 +96,32 @@ class MarkerDecorator:
func = args[0]
holder = getattr(func, self.markname, None)
if holder is None:
holder = MarkHolder(self.markname, self.args, self.kwargs)
holder = Marker(self.markname, self.args, self.kwargs)
setattr(func, self.markname, holder)
else:
holder.__dict__.update(self.kwargs)
holder._args.extend(self.args)
holder.kwargs.update(self.kwargs)
holder.args.extend(self.args)
return func
else:
self.args.extend(args)
self.kwargs.update(kwargs)
return self
class MarkHolder:
class Marker:
def __init__(self, name, args, kwargs):
self._name = name
self._args = args
self._kwargs = kwargs
self.__dict__.update(kwargs)
self.args = args
self.kwargs = kwargs
def __getattr__(self, name):
if name[0] != '_' and name in self.kwargs:
py.log._apiwarn("1.1", "use .kwargs attribute to access key-values")
return self.kwargs[name]
raise AttributeError(name)
def __repr__(self):
return "<Marker %r args=%r kwargs=%r>" % (
self._name, self._args, self._kwargs)
self._name, self.args, self.kwargs)
def pytest_pycollect_makeitem(__multicall__, collector, name, obj):

View File

@ -1,16 +1,22 @@
"""
advanced conditional skipping for python test functions, classes or modules.
advanced skipping for python test functions, classes or modules.
You can mark functions, classes or modules for for conditional
skipping (skipif) or as expected-to-fail (xfail). The difference
between the two is that 'xfail' will still execute test functions
but it will invert the outcome: a passing test becomes a failure and
a failing test is a semi-passing one. All skip conditions are
reported at the end of test run through the terminal reporter.
With this plugin you can mark test functions for conditional skipping
or as "xfail", expected-to-fail. Skipping a test will avoid running it
while xfail-marked tests will run and result in an inverted outcome:
a pass becomes a failure and a fail becomes a semi-passing one.
The need for skipping a test is usually connected to a condition.
If a test fails under all conditions then it's probably better
to mark your test as 'xfail'.
By passing ``--report=xfailed,skipped`` to the terminal reporter
you will see summary information on skips and xfail-run tests
at the end of a test run.
.. _skipif:
skip a test function conditionally
mark a test function to be skipped
-------------------------------------------
Here is an example for skipping a test function when
@ -20,6 +26,7 @@ running on Python3::
def test_function():
...
During test function setup the skipif condition is
evaluated by calling ``eval(expr, namespace)``. The namespace
contains the ``sys`` and ``os`` modules as well as the
@ -30,76 +37,75 @@ on a test configuration value e.g. like this::
def test_function(...):
...
Note that `test marking can be declared at whole class- or module level`_.
.. _`test marking can also be declared at whole class- or module level`: keyword.html#scoped-marking
mark many test functions at once
--------------------------------------
As with all metadata function marking you can do it at
`whole class- or module level`_. Here is an example
for skipping all methods of a test class based on platform::
class TestPosixCalls:
pytestmark = py.test.mark.skipif("sys.platform == 'win32'")
def test_function(self):
# will not be setup or run under 'win32' platform
#
conditionally mark a function as "expected to fail"
.. _`whole class- or module level`: mark.html#scoped-marking
mark a test function as expected to fail
-------------------------------------------------------
You can use the ``xfail`` keyword to mark your test functions as
'expected to fail'::
You can use the ``xfail`` marker to indicate that you
expect the test to fail::
@py.test.mark.xfail
def test_hello():
...
This test will be executed but no traceback will be reported
when it fails. Instead terminal reporting will list it in the
"expected to fail" or "unexpectedly passing" sections.
As with skipif_ you may selectively expect a failure
depending on platform::
@py.test.mark.xfail("sys.version_info >= (3,0)")
def test_function():
...
skip/xfail a whole test class or module
-------------------------------------------
This test will be run but no traceback will be reported
when it fails. Instead terminal reporting will list it in the
"expected to fail" or "unexpectedly passing" sections.
Instead of marking single functions you can skip
a whole class of tests when running on a specific
platform::
Same as with skipif_ you can also selectively expect a failure
depending on platform::
class TestSomething:
skipif = "sys.platform == 'win32'"
@py.test.mark.xfail(if"sys.version_info >= (3,0)")
Or you can mark all test functions as expected
to fail for a specific test configuration::
xfail = "config.getvalue('db') == 'mysql'"
def test_function():
...
skip if a dependency cannot be imported
---------------------------------------------
skipping on a missing import dependency
--------------------------------------------------
You can use a helper to skip on a failing import::
You can use the following import helper at module level
or within a test or setup function.
docutils = py.test.importorskip("docutils")
You can use this helper at module level or within
a test or setup function.
You can also skip if a library does not come with a high enough version::
If ``docutils`` cannot be imported here, this will lead to a
skip outcome of the test. You can also skip dependeing if
if a library does not come with a high enough version::
docutils = py.test.importorskip("docutils", minversion="0.3")
The version will be read from the specified module's ``__version__`` attribute.
dynamically skip from within a test or setup
-------------------------------------------------
imperative skip from within a test or setup function
------------------------------------------------------
If you want to skip the execution of a test you can call
``py.test.skip()`` within a test, a setup or from a
`funcarg factory`_ function. Example::
If for some reason you cannot declare skip-conditions
you can also imperatively produce a Skip-outcome from
within test or setup code. Example::
def test_function():
if not valid_config():
py.test.skip("unsuppored configuration")
.. _`funcarg factory`: ../funcargs.html#factory
"""
# XXX py.test.skip, .importorskip and the Skipped class
# should also be defined in this plugin, requires thought/changes
@ -177,7 +183,7 @@ def evalexpression(item, keyword):
if markholder:
d = {'os': py.std.os, 'sys': py.std.sys, 'config': item.config}
expr, result = None, True
for expr in markholder._args:
for expr in markholder.args:
if isinstance(expr, str):
result = eval(expr, d)
else:

View File

@ -13,7 +13,7 @@ plugins = [
('plugins for generic reporting and failure logging',
'pastebin resultlog terminal',),
('misc plugins / core functionality',
'helpconfig pdb keyword hooklog')
'helpconfig pdb mark hooklog')
#('internal plugins / core functionality',
# #'pdb keyword hooklog runner execnetcleanup # pytester',
# 'pdb keyword hooklog runner execnetcleanup' # pytester',

View File

@ -52,7 +52,7 @@ def pytest_generate_tests(metafunc):
multi = getattr(metafunc.function, 'multi', None)
if multi is None:
return
assert len(multi._kwargs) == 1
for name, l in multi._kwargs.items():
assert len(multi.kwargs) == 1
for name, l in multi.kwargs.items():
for val in l:
metafunc.addcall(funcargs={name: val})

View File

@ -1,6 +1,8 @@
Changes between 1.0.2 and '1.1.0b1'
=====================================
* introduce generalized py.test.mark function marking
* reshuffle / refine command line grouping
* deprecate parser.addgroup in favour of getgroup which creates option group

View File

@ -200,7 +200,7 @@ kewords like this::
and then use those keywords to select tests. See the `pytest_keyword`_
plugin for more information.
.. _`pytest_keyword`: plugin/keyword.html
.. _`pytest_keyword`: plugin/mark.html
easy to extend
=========================================

View File

@ -113,10 +113,10 @@ command line options
--------------------
``-s``
shortcut for --capture=no.
``--capture=method``
set capturing method during tests: fd (default)|sys|no.
``-s``
shortcut for --capture=no.
Start improving this plugin in 30 seconds
=========================================

View File

@ -2,7 +2,7 @@
plugins for Python test functions
=================================
skipping_ advanced conditional skipping for python test functions, classes or modules.
skipping_ advanced skipping for python test functions, classes or modules.
figleaf_ write and report coverage data with 'figleaf'.
@ -56,7 +56,7 @@ helpconfig_ provide version info, conftest/environment config names.
pdb_ interactive debugging with the Python Debugger.
keyword_ mark test functions with keywords that may hold values.
mark_ generic mechanism for marking python functions.
hooklog_ log invocations of extension hooks to a file.

View File

@ -1,51 +0,0 @@
pytest_keyword plugin
=====================
mark test functions with keywords that may hold values.
.. contents::
:local:
Marking functions and setting rich attributes
----------------------------------------------------
By default, all filename parts and class/function names of a test
function are put into the set of keywords for a given test. You can
specify additional kewords like this::
@py.test.mark.webtest
def test_send_http():
...
This will set an attribute 'webtest' to True on the given test function.
You can read the value 'webtest' from the functions __dict__ later.
You can also set values for an attribute which are put on an empty
dummy object::
@py.test.mark.webtest(firefox=30)
def test_receive():
...
after which ``test_receive.webtest.firefox == 30`` holds true.
In addition to keyword arguments you can also use positional arguments::
@py.test.mark.webtest("triangular")
def test_receive():
...
after which ``test_receive.webtest._1 == 'triangular`` hold true.
Start improving this plugin in 30 seconds
=========================================
1. Download `pytest_keyword.py`_ plugin source code
2. put it somewhere as ``pytest_keyword.py`` into your import path
3. a subsequent ``py.test`` run will use your local version
Checkout customize_, other plugins_ or `get in contact`_.
.. include:: links.txt

View File

@ -3,7 +3,6 @@
.. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_recwarn.py
.. _`unittest`: unittest.html
.. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_monkeypatch.py
.. _`pytest_keyword.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_keyword.py
.. _`pastebin`: pastebin.html
.. _`skipping`: skipping.html
.. _`plugins`: index.html
@ -13,6 +12,7 @@
.. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_restdoc.py
.. _`restdoc`: restdoc.html
.. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_pastebin.py
.. _`mark`: mark.html
.. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_figleaf.py
.. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_hooklog.py
.. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_skipping.py
@ -20,6 +20,7 @@
.. _`pytest_helpconfig.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_helpconfig.py
.. _`oejskit`: oejskit.html
.. _`doctest`: doctest.html
.. _`pytest_mark.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_mark.py
.. _`get in contact`: ../../contact.html
.. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_capture.py
.. _`figleaf`: figleaf.html
@ -30,7 +31,6 @@
.. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_pdb.py
.. _`monkeypatch`: monkeypatch.html
.. _`resultlog`: resultlog.html
.. _`keyword`: keyword.html
.. _`django`: django.html
.. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/trunk/_py/test/plugin/pytest_unittest.py
.. _`nose`: nose.html

85
doc/test/plugin/mark.txt Normal file
View File

@ -0,0 +1,85 @@
pytest_mark plugin
==================
generic mechanism for marking python functions.
.. contents::
:local:
By using the ``py.test.mark`` helper you can instantiate
decorators that will set named meta data on test functions.
Marking a single function
----------------------------------------------------
You can "mark" a test function with meta data like this::
@py.test.mark.webtest
def test_send_http():
...
This will set a "Marker" instance as a function attribute named "webtest".
You can also specify parametrized meta data like this::
@py.test.mark.webtest(firefox=30)
def test_receive():
...
The named marker can be accessed like this later::
test_receive.webtest.kwargs['firefox'] == 30
In addition to set key-value pairs you can also use positional arguments::
@py.test.mark.webtest("triangular")
def test_receive():
...
and later access it with ``test_receive.webtest.args[0] == 'triangular``.
.. _`scoped-marking`:
Marking classes or modules
----------------------------------------------------
To mark all methods of a class set a ``pytestmark`` attribute like this::
import py
class TestClass:
pytestmark = py.test.mark.webtest
You can re-use the same markers that you would use for decorating
a function - in fact this marker decorator will be applied
to all test methods of the class.
You can also set a module level marker::
import py
pytestmark = py.test.mark.webtest
in which case then the marker decorator will be applied to all functions and
methods defined in the module.
The order in which marker functions are called is this::
per-function (upon import of module already)
per-class
per-module
Later called markers may overwrite previous key-value settings.
Positional arguments are all appended to the same 'args' list
of the Marker object.
Start improving this plugin in 30 seconds
=========================================
1. Download `pytest_mark.py`_ plugin source code
2. put it somewhere as ``pytest_mark.py`` into your import path
3. a subsequent ``py.test`` run will use your local version
Checkout customize_, other plugins_ or `get in contact`_.
.. include:: links.txt

View File

@ -2,32 +2,39 @@
pytest_skipping plugin
======================
advanced conditional skipping for python test functions, classes or modules.
advanced skipping for python test functions, classes or modules.
.. contents::
:local:
You can mark functions, classes or modules for for conditional
skipping (skipif) or as expected-to-fail (xfail). The difference
between the two is that 'xfail' will still execute test functions
but it will invert the outcome: a passing test becomes a failure and
a failing test is a semi-passing one. All skip conditions are
reported at the end of test run through the terminal reporter.
With this plugin you can mark test functions for conditional skipping
or as "xfail", expected-to-fail. Skipping a test will avoid running it
while xfail-marked tests will run and result in an inverted outcome:
a pass becomes a failure and a fail becomes a semi-passing one.
The need for skipping a test is usually connected to a condition.
If a test fails under all conditions then it's probably better
to mark your test as 'xfail'.
By passing ``--report=xfailed,skipped`` to the terminal reporter
you will see summary information on skips and xfail-run tests
at the end of a test run.
.. _skipif:
skip a test function conditionally
mark a test function to be skipped
-------------------------------------------
Here is an example for skipping a test function on Python3::
Here is an example for skipping a test function when
running on Python3::
@py.test.mark.skipif("sys.version_info >= (3,0)")
def test_function():
...
The 'skipif' marker accepts an **arbitrary python expression**
as a condition. When setting up the test function the condition
is evaluated by calling ``eval(expr, namespace)``. The namespace
During test function setup the skipif condition is
evaluated by calling ``eval(expr, namespace)``. The namespace
contains the ``sys`` and ``os`` modules as well as the
test ``config`` object. The latter allows you to skip based
on a test configuration value e.g. like this::
@ -37,71 +44,74 @@ on a test configuration value e.g. like this::
...
conditionally mark a function as "expected to fail"
mark many test functions at once
--------------------------------------
As with all metadata function marking you can do it at
`whole class- or module level`_. Here is an example
for skipping all methods of a test class based on platform::
class TestPosixCalls:
pytestmark = py.test.mark.skipif("sys.platform == 'win32'")
def test_function(self):
# will not be setup or run under 'win32' platform
#
.. _`whole class- or module level`: mark.html#scoped-marking
mark a test function as expected to fail
-------------------------------------------------------
You can use the ``xfail`` keyword to mark your test functions as
'expected to fail'::
You can use the ``xfail`` marker to indicate that you
expect the test to fail::
@py.test.mark.xfail
def test_hello():
...
This test will be executed but no traceback will be reported
when it fails. Instead terminal reporting will list it in the
"expected to fail" or "unexpectedly passing" sections.
As with skipif_ you may selectively expect a failure
depending on platform::
@py.test.mark.xfail("sys.version_info >= (3,0)")
def test_function():
...
skip/xfail a whole test class or module
-------------------------------------------
This test will be run but no traceback will be reported
when it fails. Instead terminal reporting will list it in the
"expected to fail" or "unexpectedly passing" sections.
Instead of marking single functions you can skip
a whole class of tests when running on a specific
platform::
Same as with skipif_ you can also selectively expect a failure
depending on platform::
class TestSomething:
skipif = "sys.platform == 'win32'"
@py.test.mark.xfail(if"sys.version_info >= (3,0)")
Or you can mark all test functions as expected
to fail for a specific test configuration::
xfail = "config.getvalue('db') == 'mysql'"
def test_function():
...
skip if a dependency cannot be imported
---------------------------------------------
skipping on a missing import dependency
--------------------------------------------------
You can use a helper to skip on a failing import::
You can use the following import helper at module level
or within a test or setup function.
docutils = py.test.importorskip("docutils")
You can use this helper at module level or within
a test or setup function.
You can also skip if a library does not come with a high enough version::
If ``docutils`` cannot be imported here, this will lead to a
skip outcome of the test. You can also skip dependeing if
if a library does not come with a high enough version::
docutils = py.test.importorskip("docutils", minversion="0.3")
The version will be read from the specified module's ``__version__`` attribute.
dynamically skip from within a test or setup
-------------------------------------------------
imperative skip from within a test or setup function
------------------------------------------------------
If you want to skip the execution of a test you can call
``py.test.skip()`` within a test, a setup or from a
`funcarg factory`_ function. Example::
If for some reason you cannot declare skip-conditions
you can also imperatively produce a Skip-outcome from
within test or setup code. Example::
def test_function():
if not valid_config():
py.test.skip("unsuppored configuration")
.. _`funcarg factory`: ../funcargs.html#factory
Start improving this plugin in 30 seconds
=========================================

View File

@ -13,16 +13,24 @@ command line options
--------------------
``-v, --verbose``
increase verbosity.
``-l, --showlocals``
show locals in tracebacks (disabled by default).
``--report=opts``
comma separated reporting options
``--tb=style``
traceback verboseness (long/short/no).
``--fulltrace``
don't cut any tracebacks (default is to cut).
``--collectonly``
only collect tests, don't execute them.
``--traceconfig``
trace considerations of conftest.py files.
``--nomagic``
don't reinterpret asserts, no traceback cutting.
``--fulltrace``
don't cut any tracebacks (default is to cut).
``--debug``
generate and show debugging information.
generate and show internal debugging information.
Start improving this plugin in 30 seconds
=========================================

View File

@ -1,5 +1,5 @@
import py
from _py.test.plugin.pytest_keyword import Mark
from _py.test.plugin.pytest_mark import Mark
class TestMark:
def test_pytest_mark_notcallable(self):
@ -31,15 +31,22 @@ class TestMark:
assert f.world.y == 4
mark.world(y=1)(f)
assert f.world.y == 1
assert len(f.world._args) == 0
assert len(f.world.args) == 0
def test_pytest_mark_positional(self):
mark = Mark()
def f(): pass
mark.world("hello")(f)
assert f.world._args[0] == "hello"
assert f.world.args[0] == "hello"
mark.world("world")(f)
def test_oldstyle_marker_access(self, recwarn):
mark = Mark()
def f(): pass
mark.world(x=1)(f)
assert f.world.x == 1
assert recwarn.pop()
class TestFunctional:
def test_mark_per_function(self, testdir):
p = testdir.makepyfile("""
@ -89,10 +96,8 @@ class TestFunctional:
item, = items
keywords = item.readkeywords()
marker = keywords['hello']
assert marker._args == ["pos0", "pos1"]
assert marker.x == 3
assert marker.y == 2
assert marker.z == 4
assert marker.args == ["pos0", "pos1"]
assert marker.kwargs == {'x': 3, 'y': 2, 'z': 4}
def test_mark_other(self, testdir):
item = testdir.getitem("""