merging and refining examples, also refining skipping documentation.

This commit is contained in:
holger krekel 2010-11-20 21:35:55 +01:00
parent bd5a9ba392
commit 158e160823
45 changed files with 371 additions and 452 deletions

View File

@ -6,6 +6,10 @@ Release announcements
:maxdepth: 2 :maxdepth: 2
release-2.0.0 release-2.0.0
.. toctree::
:hidden:
release-1.3.4 release-1.3.4
release-1.3.3 release-1.3.3
release-1.3.2 release-1.3.2

View File

@ -1,6 +1,7 @@
import py import py
failure_demo = py.path.local(__file__).dirpath('failure_demo.py') failure_demo = py.path.local(__file__).dirpath('failure_demo.py')
pytest_plugins = 'pytester',
def test_failure_demo_fails_properly(testdir): def test_failure_demo_fails_properly(testdir):
target = testdir.tmpdir.join(failure_demo.basename) target = testdir.tmpdir.join(failure_demo.basename)

View File

@ -1,36 +0,0 @@
writing well integrated assertion helpers
========================================================
If you have a test helper function called from a test you can
use the ``pytest.fail`` marker to fail a test with a certain message.
The test support function will not show up in the traceback if you
set the ``__tracebackhide__`` option somewhere in the helper function.
Example::
# content of test_checkconfig.py
import pytest
def checkconfig(x):
__tracebackhide__ = True
if not hasattr(x, "config"):
pytest.fail("not configured: %s" %(x,))
def test_something():
checkconfig(42)
The ``__tracebackhide__`` setting influences py.test showing
of tracebacks: the ``checkconfig`` function will not be shown
unless the ``--fulltrace`` command line option is specified.
Let's run our little function::
$ py.test -q
F
================================= FAILURES =================================
______________________________ test_something ______________________________
def test_something():
> checkconfig(42)
E Failed: not configured: 42
test_checkconfig.py:8: Failed
1 failed in 0.02 seconds

View File

@ -1,57 +0,0 @@
.. _`retrieved by hooks as item keywords`:
control skipping of tests according to command line option
--------------------------------------------------------------
Here is a ``conftest.py`` file adding a ``--runslow`` command
line option to control skipping of ``slow`` marked tests::
# content of conftest.py
import pytest
def pytest_addoption(parser):
parser.addoption("--runslow", action="store_true",
help="run slow tests")
def pytest_runtest_setup(item):
if 'slow' in item.keywords and not item.config.getvalue("runslow"):
pytest.skip("need --runslow option to run")
We can now write a test module like this::
# content of test_module.py
import pytest
slow = pytest.mark.slow
def test_func_fast():
pass
@slow
def test_func_slow():
pass
and when running it will see a skipped "slow" test::
$ py.test test_module.py -rs # "-rs" means report details on the little 's'
=========================== test session starts ============================
platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
test path 1: test_module.py
test_module.py .s
========================= short test summary info ==========================
SKIP [1] /tmp/doc-exec-104/conftest.py:9: need --runslow option to run
=================== 1 passed, 1 skipped in 0.02 seconds ====================
Or run it including the ``slow`` marked test::
$ py.test test_module.py --runslow
=========================== test session starts ============================
platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
test path 1: test_module.py
test_module.py ..
========================= 2 passed in 0.01 seconds =========================

View File

@ -9,6 +9,7 @@ def pytest_funcarg__setup(request):
class CostlySetup: class CostlySetup:
def __init__(self): def __init__(self):
import time import time
print ("performing costly setup")
time.sleep(5) time.sleep(5)
self.timecostly = 1 self.timecostly = 1

View File

@ -0,0 +1,3 @@
def test_quick(setup):
pass

View File

@ -1,29 +0,0 @@
Detect if running from within a py.test run
--------------------------------------------------------------
Usually it is a bad idea to make application code
behave differently if called from a test. But if you
absolutely must find out if your application code is
running from a test you can do something like this::
# content of conftest.py in your testing directory
def pytest_configure(config):
import sys
sys._called_from_test = True
def pytest_unconfigure(config):
del sys._called_from_test
and then check for the ``sys._called_from_test`` flag::
if hasattr(sys, '_called_from_test'):
# called from within a test run
else:
# called "normally"
accordingly in your application. It's also a good idea
to rather use your own application module rather than ``sys``
for handling flag.

View File

@ -10,11 +10,8 @@ need more examples or have questions.
.. toctree:: .. toctree::
:maxdepth: 2 :maxdepth: 2
builtin.txt
pythoncollection.txt
controlskip.txt
mysetup.txt
detectpytest.txt
nonpython.txt
simple.txt simple.txt
xunit_setup.txt pythoncollection.txt
mysetup.txt
parametrize.txt
nonpython.txt

View File

@ -1,12 +1,10 @@
""" """
module containing a parametrized tests testing cross-python module containing a parametrized tests testing cross-python
serialization via the pickle module. serialization via the pickle module.
""" """
import py import py
pythonlist = ['python2.3', 'python2.4', 'python2.5', 'python2.6'] pythonlist = ['python2.4', 'python2.5', 'python2.6', 'python2.7', 'python2.8']
# 'jython' 'python3.1']
def pytest_generate_tests(metafunc): def pytest_generate_tests(metafunc):
if 'python1' in metafunc.funcargnames: if 'python1' in metafunc.funcargnames:

142
doc/example/parametrize.txt Normal file
View File

@ -0,0 +1,142 @@
parametrizing tests
=================================================
py.test allows to easily implement your own custom
parametrization scheme for tests. Here we provide
some examples for inspiration and re-use.
Parametrizing test methods through per-class configuration
--------------------------------------------------------------
.. _`unittest parameterizer`: http://code.google.com/p/unittest-ext/source/browse/trunk/params.py
Here is an example ``pytest_generate_function`` function implementing a
parametrization scheme similar to Michael Foords `unittest
parameterizer`_ in a lot less code::
# content of ./test_parametrize.py
import pytest
def pytest_generate_tests(metafunc):
# called once per each test function
for funcargs in metafunc.cls.params[metafunc.function.__name__]:
# schedule a new test function run with applied **funcargs
metafunc.addcall(funcargs=funcargs)
class TestClass:
# a map specifying multiple argument sets for a test method
params = {
'test_equals': [dict(a=1, b=2), dict(a=3, b=3), ],
'test_zerodivision': [dict(a=1, b=0), dict(a=3, b=2)],
}
def test_equals(self, a, b):
assert a == b
def test_zerodivision(self, a, b):
pytest.raises(ZeroDivisionError, "a/b")
Running it means we are two tests for each test functions, using
the respective settings::
$ py.test -q
F..F
================================= FAILURES =================================
_________________________ TestClass.test_equals[0] _________________________
self = <test_parametrize.TestClass instance at 0x128a638>, a = 1, b = 2
def test_equals(self, a, b):
> assert a == b
E assert 1 == 2
test_parametrize.py:17: AssertionError
______________________ TestClass.test_zerodivision[1] ______________________
self = <test_parametrize.TestClass instance at 0x1296440>, a = 3, b = 2
def test_zerodivision(self, a, b):
> pytest.raises(ZeroDivisionError, "a/b")
E Failed: DID NOT RAISE
test_parametrize.py:20: Failed
2 failed, 2 passed in 0.03 seconds
Parametrizing test methods through a decorator
--------------------------------------------------------------
Modifying the previous example we can also allow decorators
for parametrizing test methods::
# content of test_parametrize2.py
import pytest
# test support code
def params(funcarglist):
def wrapper(function):
function.funcarglist = funcarglist
return function
return wrapper
def pytest_generate_tests(metafunc):
for funcargs in getattr(metafunc.function, 'funcarglist', ()):
metafunc.addcall(funcargs=funcargs)
# actual test code
class TestClass:
@params([dict(a=1, b=2), dict(a=3, b=3), ])
def test_equals(self, a, b):
assert a == b
@params([dict(a=1, b=0), dict(a=3, b=2)])
def test_zerodivision(self, a, b):
pytest.raises(ZeroDivisionError, "a/b")
Running it gives similar results as before::
$ py.test -q test_parametrize2.py
F..F
================================= FAILURES =================================
_________________________ TestClass.test_equals[0] _________________________
self = <test_parametrize2.TestClass instance at 0x1dbcc68>, a = 1, b = 2
@params([dict(a=1, b=2), dict(a=3, b=3), ])
def test_equals(self, a, b):
> assert a == b
E assert 1 == 2
test_parametrize2.py:19: AssertionError
______________________ TestClass.test_zerodivision[1] ______________________
self = <test_parametrize2.TestClass instance at 0x1dd0488>, a = 3, b = 2
@params([dict(a=1, b=0), dict(a=3, b=2)])
def test_zerodivision(self, a, b):
> pytest.raises(ZeroDivisionError, "a/b")
E Failed: DID NOT RAISE
test_parametrize2.py:23: Failed
2 failed, 2 passed in 0.03 seconds
checking serialization between Python interpreters
--------------------------------------------------------------
Here is a stripped down real-life example of using parametrized
testing for testing serialization betwee different interpreters.
We define a ``test_basic_objects`` function which is to be run
with different sets of arguments for its three arguments::
* ``python1``: first python interpreter
* ``python2``: second python interpreter
* ``obj``: object to be dumped from first interpreter and loaded into second interpreter
.. literalinclude:: multipython.py
Running it (with Python-2.4 through to Python2.7 installed)::
. $ py.test -q multipython.py
....s....s....s....ssssss....s....s....s....ssssss....s....s....s....ssssss
48 passed, 27 skipped in 2.55 seconds

View File

@ -21,7 +21,7 @@ their file system path and then running the test. Through
an ini-file and the :confval:`addopts` option you can make an ini-file and the :confval:`addopts` option you can make
this change more permanently:: this change more permanently::
# content of setup.cfg or tox.ini # content of pytest.ini
[pytest] [pytest]
addopts = --pyargs addopts = --pyargs
@ -30,8 +30,8 @@ finding out what is collected
You can always peek at the collection tree without running tests like this:: You can always peek at the collection tree without running tests like this::
. $ py.test --collectonly collectonly.py . $ py.test --collectonly pythoncollection.py
<Module 'collectonly.py'> <Module 'pythoncollection.py'>
<Function 'test_function'> <Function 'test_function'>
<Class 'TestClass'> <Class 'TestClass'>
<Instance '()'> <Instance '()'>

View File

@ -1,9 +1,26 @@
.. highlightlang:: python .. highlightlang:: python
simple patterns using hooks simple hook using patterns
========================================================== ==========================================================
adding custom options
----------------------
py.test supports adding of standard optparse_ Options.
A plugin may implement the ``addoption`` hook for registering
custom options::
def pytest_addoption(parser):
parser.addoption("-M", "--myopt", action="store",
help="specify string to set myopt")
def pytest_configure(config):
if config.option.myopt:
# do action based on option value
.. _optparse: http://docs.python.org/library/optparse.html
pass different values to a test function, depending on command line options pass different values to a test function, depending on command line options
---------------------------------------------------------------------------- ----------------------------------------------------------------------------
@ -134,3 +151,128 @@ let's run the full monty::
As expected when running the full range of ``param1`` values As expected when running the full range of ``param1`` values
we'll get an error on the last one. we'll get an error on the last one.
.. _`retrieved by hooks as item keywords`:
control skipping of tests according to command line option
--------------------------------------------------------------
Here is a ``conftest.py`` file adding a ``--runslow`` command
line option to control skipping of ``slow`` marked tests::
# content of conftest.py
import pytest
def pytest_addoption(parser):
parser.addoption("--runslow", action="store_true",
help="run slow tests")
def pytest_runtest_setup(item):
if 'slow' in item.keywords and not item.config.getvalue("runslow"):
pytest.skip("need --runslow option to run")
We can now write a test module like this::
# content of test_module.py
import pytest
slow = pytest.mark.slow
def test_func_fast():
pass
@slow
def test_func_slow():
pass
and when running it will see a skipped "slow" test::
$ py.test test_module.py -rs # "-rs" means report details on the little 's'
=========================== test session starts ============================
platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
test path 1: test_module.py
test_module.py .s
========================= short test summary info ==========================
SKIP [1] /tmp/doc-exec-104/conftest.py:9: need --runslow option to run
=================== 1 passed, 1 skipped in 0.02 seconds ====================
Or run it including the ``slow`` marked test::
$ py.test test_module.py --runslow
=========================== test session starts ============================
platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
test path 1: test_module.py
test_module.py ..
========================= 2 passed in 0.01 seconds =========================
writing well integrated assertion helpers
--------------------------------------------------
If you have a test helper function called from a test you can
use the ``pytest.fail`` marker to fail a test with a certain message.
The test support function will not show up in the traceback if you
set the ``__tracebackhide__`` option somewhere in the helper function.
Example::
# content of test_checkconfig.py
import pytest
def checkconfig(x):
__tracebackhide__ = True
if not hasattr(x, "config"):
pytest.fail("not configured: %s" %(x,))
def test_something():
checkconfig(42)
The ``__tracebackhide__`` setting influences py.test showing
of tracebacks: the ``checkconfig`` function will not be shown
unless the ``--fulltrace`` command line option is specified.
Let's run our little function::
$ py.test -q
F
================================= FAILURES =================================
______________________________ test_something ______________________________
def test_something():
> checkconfig(42)
E Failed: not configured: 42
test_checkconfig.py:8: Failed
1 failed in 0.02 seconds
Detect if running from within a py.test run
--------------------------------------------------------------
Usually it is a bad idea to make application code
behave differently if called from a test. But if you
absolutely must find out if your application code is
running from a test you can do something like this::
# content of conftest.py in your testing directory
def pytest_configure(config):
import sys
sys._called_from_test = True
def pytest_unconfigure(config):
del sys._called_from_test
and then check for the ``sys._called_from_test`` flag::
if hasattr(sys, '_called_from_test'):
# called from within a test run
else:
# called "normally"
accordingly in your application. It's also a good idea
to rather use your own application module rather than ``sys``
for handling flag.

21
doc/example/xfail_demo.py Normal file
View File

@ -0,0 +1,21 @@
import pytest
xfail = pytest.mark.xfail
@xfail
def test_hello():
assert 0
@xfail(run=False)
def test_hello2():
assert 0
@xfail("hasattr(os, 'sep')")
def test_hello3():
assert 0
@xfail(reason="bug 110")
def test_hello4():
assert 0
def test_hello5():
pytest.xfail("reason")

View File

@ -1,74 +0,0 @@
Learning by examples
=====================
adding custom options
----------------------
py.test supports adding of standard optparse_ Options.
A plugin may implement the ``addoption`` hook for registering
custom options::
def pytest_addoption(parser):
parser.addoption("-M", "--myopt", action="store",
help="specify string to set myopt")
def pytest_configure(config):
if config.option.myopt:
# do action based on option value
#
.. _optparse: http://docs.python.org/library/optparse.html
order of setup/teardown module/class/item methods
====================================================
managing state at module, class and method level
------------------------------------------------------------
Here is a working example for what goes on when you setup modules,
classes and methods::
# [[from py/documentation/example/pytest/test_setup_flow_example.py]]
def setup_module(module):
module.TestStateFullThing.classcount = 0
class TestStateFullThing:
def setup_class(cls):
cls.classcount += 1
def teardown_class(cls):
cls.classcount -= 1
def setup_method(self, method):
self.id = eval(method.func_name[5:])
def test_42(self):
assert self.classcount == 1
assert self.id == 42
def test_23(self):
assert self.classcount == 1
assert self.id == 23
def teardown_module(module):
assert module.TestStateFullThing.classcount == 0
For this example the control flow happens as follows::
import test_setup_flow_example
setup_module(test_setup_flow_example)
setup_class(TestStateFullThing)
instance = TestStateFullThing()
setup_method(instance, instance.test_42)
instance.test_42()
setup_method(instance, instance.test_23)
instance.test_23()
teardown_class(TestStateFullThing)
teardown_module(test_setup_flow_example)
Note that ``setup_class(TestStateFullThing)`` is called and not
``TestStateFullThing.setup_class()`` which would require you
to insert ``setup_class = classmethod(setup_class)`` to make
your setup function callable. Did we mention that lazyness
is a virtue?

View File

@ -65,7 +65,7 @@ tool, for example::
If a plugin is installed, py.test automatically finds and integrates it, If a plugin is installed, py.test automatically finds and integrates it,
there is no need to activate it. If you don't need a plugin anymore simply there is no need to activate it. If you don't need a plugin anymore simply
de-install it. You can find a list of valid plugins through a de-install it. You can find a list of available plugins through a
`pytest- pypi.python.org search`_. `pytest- pypi.python.org search`_.
.. _`available installable plugins`: .. _`available installable plugins`:

2
doc/pytest.ini Normal file
View File

@ -0,0 +1,2 @@
[pytest]
# just defined to prevent the root level tox.ini to kick in

View File

@ -2,18 +2,19 @@
skip and xfail mechanisms skip and xfail mechanisms
===================================================================== =====================================================================
You can mark test functions for a conditional *skip* or as *xfail*, You can skip or "xfail" test functions, either by marking functions
expected-to-fail. Skipping a test avoids running a test. through a decorator or by calling the ``pytest.skip|xfail`` helpers.
Whereas an xfail-marked test usually is run but if it fails it is A *skip* means that you expect your test to pass unless a certain configuration or condition (e.g. wrong Python interpreter, missing dependency) prevents it to run. And *xfail* means that you expect your test to fail because there is an
not reported in detail and counted separately. The latter allows implementation problem. Counting and listing *xfailing* tests separately
to keep track of real implementation problems whereas test skips helps to maintain a list of implementation problems and you can provide
are normally tied to a condition, such as a platform or dependency info such as a bug number or a URL to provide a human readable problem context.
requirement without which considering or running the test does
not make sense. If a test fails under all conditions then it's
probably best to mark your test as 'xfail'.
By running ``py.test -rxs`` you will see extra reporting Usually detailed information about skipped/xfailed tests is not shown
information on skips and xfail-run tests at the end of a test run. to avoid cluttering the output. You can use the ``-r`` option to
see details corresponding to the "short" letters shown in the
test progress::
py.test -rxs # show extra info on skips and xfail tests
.. _skipif: .. _skipif:
@ -47,7 +48,7 @@ at module level like this::
... ...
skip groups of test functions skip test functions of a class
-------------------------------------- --------------------------------------
As with all function :ref:`marking` you can do it at As with all function :ref:`marking` you can do it at
@ -58,8 +59,7 @@ for skipping all methods of a test class based on platform::
pytestmark = pytest.mark.skipif("sys.platform == 'win32'") pytestmark = pytest.mark.skipif("sys.platform == 'win32'")
def test_function(self): def test_function(self):
# will not be setup or run under 'win32' platform "will not be setup or run under 'win32' platform"
#
The ``pytestmark`` decorator will be applied to each test function. The ``pytestmark`` decorator will be applied to each test function.
If your code targets python2.6 or above you can equivalently use If your code targets python2.6 or above you can equivalently use
@ -69,8 +69,7 @@ the skipif decorator on classes::
class TestPosixCalls: class TestPosixCalls:
def test_function(self): def test_function(self):
# will not be setup or run under 'win32' platform "will not be setup or run under 'win32' platform"
#
It is fine in general to apply multiple "skipif" decorators It is fine in general to apply multiple "skipif" decorators
on a single function - this means that if any of the conditions on a single function - this means that if any of the conditions
@ -94,6 +93,13 @@ This test will be run but no traceback will be reported
when it fails. Instead terminal reporting will list it in the when it fails. Instead terminal reporting will list it in the
"expected to fail" or "unexpectedly passing" sections. "expected to fail" or "unexpectedly passing" sections.
By specifying on the commandline::
pytest --runxfail
you can force the running and reporting of an ``xfail`` marked test
as if it weren't marked at all.
Same as with skipif_ you can also selectively expect a failure Same as with skipif_ you can also selectively expect a failure
depending on platform:: depending on platform::
@ -101,19 +107,32 @@ depending on platform::
def test_function(): def test_function():
... ...
To not run a test and still regard it as "xfailed":: You can also avoid running an "xfail" test at all or
specify a reason such as a bug ID or similar. Here is
a simple test file with usages:
@pytest.mark.xfail(..., run=False) .. literalinclude:: example/xfail_demo.py
To specify an explicit reason to be shown with xfailure detail:: Running it with the report-on-xfail option gives this output::
@pytest.mark.xfail(..., reason="my reason") example $ py.test -rx xfail_demo.py
=========================== test session starts ============================
By specifying on the commandline:: platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev31
test path 1: xfail_demo.py
pytest --runxfail
xfail_demo.py xxxxx
you can force the running and reporting of a runnable ``xfail`` marked test. ========================= short test summary info ==========================
XFAIL xfail_demo.py::test_hello
XFAIL xfail_demo.py::test_hello2
reason: [NOTRUN]
XFAIL xfail_demo.py::test_hello3
condition: hasattr(os, 'sep')
XFAIL xfail_demo.py::test_hello4
bug 110
XFAIL xfail_demo.py::test_hello5
reason: reason
======================== 5 xfailed in 0.04 seconds =========================
imperative xfail from within a test or setup function imperative xfail from within a test or setup function
------------------------------------------------------ ------------------------------------------------------

View File

@ -1,3 +0,0 @@
import py
collect_ignore = 'mysetup', 'mysetup2', 'test_simpleprovider.py', 'parametrize'

View File

@ -1,3 +0,0 @@
def test_quick():
pass

View File

@ -1 +0,0 @@
# XXX this file should not need to be here but is here for proper sys.path mangling

View File

@ -1,9 +0,0 @@
from mysetup.myapp import MyApp
def pytest_funcarg__mysetup(request):
return MySetup()
class MySetup:
def myapp(self):
return MyApp()

View File

@ -1,5 +0,0 @@
class MyApp:
def question(self):
return 6 * 9

View File

@ -1,5 +0,0 @@
def test_answer(mysetup):
app = mysetup.myapp()
answer = app.question()
assert answer == 42

View File

@ -1 +0,0 @@
# XXX this file should not need to be here but is here for proper sys.path mangling

View File

@ -1,24 +0,0 @@
import py
from mysetup2.myapp import MyApp
def pytest_funcarg__mysetup(request):
return MySetup(request)
def pytest_addoption(parser):
parser.addoption("--ssh", action="store", default=None,
help="specify ssh host to run tests with")
class MySetup:
def __init__(self, request):
self.config = request.config
def myapp(self):
return MyApp()
def getsshconnection(self):
host = self.config.option.ssh
if host is None:
py.test.skip("specify ssh host with --ssh")
return execnet.SshGateway(host)

View File

@ -1,5 +0,0 @@
class MyApp:
def question(self):
return 6 * 9

View File

@ -1,6 +0,0 @@
def test_answer(mysetup):
app = mysetup.myapp()
answer = app.question()
assert answer == 42

View File

@ -1,5 +0,0 @@
class TestClass:
def test_function(self, mysetup):
conn = mysetup.getsshconnection()
# work with conn

View File

@ -1,17 +0,0 @@
import py
def pytest_generate_tests(metafunc):
for funcargs in metafunc.cls.params[metafunc.function.__name__]:
metafunc.addcall(funcargs=funcargs)
class TestClass:
params = {
'test_equals': [dict(a=1, b=2), dict(a=3, b=3), dict(a=5, b=4)],
'test_zerodivision': [dict(a=1, b=0), dict(a=3, b=2)],
}
def test_equals(self, a, b):
assert a == b
def test_zerodivision(self, a, b):
py.test.raises(ZeroDivisionError, "a/b")

View File

@ -1,25 +0,0 @@
import py
# test support code
def params(funcarglist):
def wrapper(function):
function.funcarglist = funcarglist
return function
return wrapper
def pytest_generate_tests(metafunc):
for funcargs in getattr(metafunc.function, 'funcarglist', ()):
metafunc.addcall(funcargs=funcargs)
# actual test code
class TestClass:
@params([dict(a=1, b=2), dict(a=3, b=3), dict(a=5, b=4)], )
def test_equals(self, a, b):
assert a == b
@params([dict(a=1, b=0), dict(a=3, b=2)])
def test_zerodivision(self, a, b):
py.test.raises(ZeroDivisionError, "a/b")

View File

@ -1,15 +0,0 @@
# following hook can be put unchanged into a local or global plugin
def pytest_generate_tests(metafunc):
for scenario in metafunc.cls.scenarios:
metafunc.addcall(id=scenario[0], funcargs=scenario[1])
scenario1 = ('basic', {'attribute': 'value'})
scenario2 = ('advanced', {'attribute': 'value2'})
class TestSampleWithScenarios:
scenarios = [scenario1, scenario2]
def test_demo(self, attribute):
assert isinstance(attribute, str)

View File

@ -1,7 +0,0 @@
# ./test_simpleprovider.py
def pytest_funcarg__myfuncarg(request):
return 42
def test_function(myfuncarg):
assert myfuncarg == 17

View File

@ -1,15 +0,0 @@
# conftest.py
import py
def pytest_addoption(parser):
grp = parser.getgroup("testserver options")
grp.addoption("--url", action="store", default=None,
help="url for testserver")
def pytest_funcarg__url(request):
url = request.config.getvalue("url")
if url is None:
py.test.skip("need --url")
return url

View File

@ -1,13 +0,0 @@
from py.xml import html
paras = "First Para", "Second para"
doc = html.html(
html.head(
html.meta(name="Content-Type", value="text/html; charset=latin1")),
html.body(
[html.p(p) for p in paras]))
print unicode(doc).encode('latin1')

View File

@ -1,23 +0,0 @@
import py
html = py.xml.html
class my(html):
"a custom style"
class body(html.body):
style = html.Style(font_size = "120%")
class h2(html.h2):
style = html.Style(background = "grey")
class p(html.p):
style = html.Style(font_weight="bold")
doc = my.html(
my.head(),
my.body(
my.h2("hello world"),
my.p("bold as bold can")
)
)
print doc.unicode(indent=2)

View File

@ -1,17 +0,0 @@
import py
class ns(py.xml.Namespace):
pass
doc = ns.books(
ns.book(
ns.author("May Day"),
ns.title("python for java programmers"),),
ns.book(
ns.author("why", class_="somecssclass"),
ns.title("Java for Python programmers"),),
publisher="N.N",
)
print doc.unicode(indent=2).encode('utf8')

View File

@ -1,16 +0,0 @@
import py
@py.test.mark.xfail
def test_hello():
assert 0
@py.test.mark.xfail(run=False)
def test_hello2():
assert 0
@py.test.mark.xfail("hasattr(os, 'sep')")
def test_hello3():
assert 0
def test_hello5():
py.test.xfail("reason")