Merge remote-tracking branch 'upstream/master' into features
This commit is contained in:
commit
c67bf9d82a
1
AUTHORS
1
AUTHORS
|
@ -144,6 +144,7 @@ Ross Lawley
|
|||
Russel Winder
|
||||
Ryan Wooden
|
||||
Samuele Pedroni
|
||||
Segev Finer
|
||||
Simon Gomizelj
|
||||
Skylar Downes
|
||||
Stefan Farmbauer
|
||||
|
|
|
@ -11,7 +11,6 @@ import re
|
|||
import struct
|
||||
import sys
|
||||
import types
|
||||
from fnmatch import fnmatch
|
||||
|
||||
import py
|
||||
from _pytest.assertion import util
|
||||
|
@ -167,7 +166,7 @@ class AssertionRewritingHook(object):
|
|||
# latter might trigger an import to fnmatch.fnmatch
|
||||
# internally, which would cause this method to be
|
||||
# called recursively
|
||||
if fnmatch(fn_pypath.basename, pat):
|
||||
if fn_pypath.fnmatch(pat):
|
||||
state.trace("matched test file %r" % (fn,))
|
||||
return True
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ from __future__ import absolute_import, division, print_function
|
|||
import contextlib
|
||||
import sys
|
||||
import os
|
||||
import io
|
||||
from io import UnsupportedOperation
|
||||
from tempfile import TemporaryFile
|
||||
|
||||
|
@ -33,8 +34,10 @@ def pytest_addoption(parser):
|
|||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_load_initial_conftests(early_config, parser, args):
|
||||
_readline_workaround()
|
||||
ns = early_config.known_args_namespace
|
||||
if ns.capture == "fd":
|
||||
_py36_windowsconsoleio_workaround()
|
||||
_readline_workaround()
|
||||
pluginmanager = early_config.pluginmanager
|
||||
capman = CaptureManager(ns.capture)
|
||||
pluginmanager.register(capman, "capturemanager")
|
||||
|
@ -491,3 +494,49 @@ def _readline_workaround():
|
|||
import readline # noqa
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
def _py36_windowsconsoleio_workaround():
|
||||
"""
|
||||
Python 3.6 implemented unicode console handling for Windows. This works
|
||||
by reading/writing to the raw console handle using
|
||||
``{Read,Write}ConsoleW``.
|
||||
|
||||
The problem is that we are going to ``dup2`` over the stdio file
|
||||
descriptors when doing ``FDCapture`` and this will ``CloseHandle`` the
|
||||
handles used by Python to write to the console. Though there is still some
|
||||
weirdness and the console handle seems to only be closed randomly and not
|
||||
on the first call to ``CloseHandle``, or maybe it gets reopened with the
|
||||
same handle value when we suspend capturing.
|
||||
|
||||
The workaround in this case will reopen stdio with a different fd which
|
||||
also means a different handle by replicating the logic in
|
||||
"Py_lifecycle.c:initstdio/create_stdio".
|
||||
|
||||
See https://github.com/pytest-dev/py/issues/103
|
||||
"""
|
||||
if not sys.platform.startswith('win32') or sys.version_info[:2] < (3, 6):
|
||||
return
|
||||
|
||||
buffered = hasattr(sys.stdout.buffer, 'raw')
|
||||
raw_stdout = sys.stdout.buffer.raw if buffered else sys.stdout.buffer
|
||||
|
||||
if not isinstance(raw_stdout, io._WindowsConsoleIO):
|
||||
return
|
||||
|
||||
def _reopen_stdio(f, mode):
|
||||
if not buffered and mode[0] == 'w':
|
||||
buffering = 0
|
||||
else:
|
||||
buffering = -1
|
||||
|
||||
return io.TextIOWrapper(
|
||||
open(os.dup(f.fileno()), mode, buffering),
|
||||
f.encoding,
|
||||
f.errors,
|
||||
f.newlines,
|
||||
f.line_buffering)
|
||||
|
||||
sys.__stdin__ = sys.stdin = _reopen_stdio(sys.stdin, 'rb')
|
||||
sys.__stdout__ = sys.stdout = _reopen_stdio(sys.stdout, 'wb')
|
||||
sys.__stderr__ = sys.stderr = _reopen_stdio(sys.stderr, 'wb')
|
||||
|
|
|
@ -71,6 +71,12 @@ class UsageError(Exception):
|
|||
""" error in pytest usage or invocation"""
|
||||
|
||||
|
||||
class PrintHelp(Exception):
|
||||
"""Raised when pytest should print it's help to skip the rest of the
|
||||
argument parsing and validation."""
|
||||
pass
|
||||
|
||||
|
||||
def filename_arg(path, optname):
|
||||
""" Argparse type validator for filename arguments.
|
||||
|
||||
|
@ -163,7 +169,7 @@ def _prepareconfig(args=None, plugins=None):
|
|||
|
||||
class PytestPluginManager(PluginManager):
|
||||
"""
|
||||
Overwrites :py:class:`pluggy.PluginManager` to add pytest-specific
|
||||
Overwrites :py:class:`pluggy.PluginManager <_pytest.vendored_packages.pluggy.PluginManager>` to add pytest-specific
|
||||
functionality:
|
||||
|
||||
* loading plugins from the command line, ``PYTEST_PLUGIN`` env variable and
|
||||
|
@ -200,7 +206,7 @@ class PytestPluginManager(PluginManager):
|
|||
"""
|
||||
.. deprecated:: 2.8
|
||||
|
||||
Use :py:meth:`pluggy.PluginManager.add_hookspecs` instead.
|
||||
Use :py:meth:`pluggy.PluginManager.add_hookspecs <_pytest.vendored_packages.pluggy.PluginManager.add_hookspecs>` instead.
|
||||
"""
|
||||
warning = dict(code="I2",
|
||||
fslocation=_pytest._code.getfslineno(sys._getframe(1)),
|
||||
|
@ -1100,14 +1106,18 @@ class Config(object):
|
|||
self._preparse(args, addopts=addopts)
|
||||
# XXX deprecated hook:
|
||||
self.hook.pytest_cmdline_preparse(config=self, args=args)
|
||||
args = self._parser.parse_setoption(args, self.option, namespace=self.option)
|
||||
if not args:
|
||||
cwd = os.getcwd()
|
||||
if cwd == self.rootdir:
|
||||
args = self.getini('testpaths')
|
||||
self._parser.after_preparse = True
|
||||
try:
|
||||
args = self._parser.parse_setoption(args, self.option, namespace=self.option)
|
||||
if not args:
|
||||
args = [cwd]
|
||||
self.args = args
|
||||
cwd = os.getcwd()
|
||||
if cwd == self.rootdir:
|
||||
args = self.getini('testpaths')
|
||||
if not args:
|
||||
args = [cwd]
|
||||
self.args = args
|
||||
except PrintHelp:
|
||||
pass
|
||||
|
||||
def addinivalue_line(self, name, line):
|
||||
""" add a line to an ini-file option. The option must have been
|
||||
|
@ -1120,7 +1130,7 @@ class Config(object):
|
|||
def getini(self, name):
|
||||
""" return configuration value from an :ref:`ini file <inifiles>`. If the
|
||||
specified name hasn't been registered through a prior
|
||||
:py:func:`parser.addini <pytest.config.Parser.addini>`
|
||||
:py:func:`parser.addini <_pytest.config.Parser.addini>`
|
||||
call (usually from a plugin), a ValueError is raised. """
|
||||
try:
|
||||
return self._inicache[name]
|
||||
|
|
|
@ -3,13 +3,46 @@ from __future__ import absolute_import, division, print_function
|
|||
|
||||
import py
|
||||
import pytest
|
||||
from _pytest.config import PrintHelp
|
||||
import os, sys
|
||||
from argparse import Action
|
||||
|
||||
|
||||
class HelpAction(Action):
|
||||
"""This is an argparse Action that will raise an exception in
|
||||
order to skip the rest of the argument parsing when --help is passed.
|
||||
This prevents argparse from quitting due to missing required arguments
|
||||
when any are defined, for example by ``pytest_addoption``.
|
||||
This is similar to the way that the builtin argparse --help option is
|
||||
implemented by raising SystemExit.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
option_strings,
|
||||
dest=None,
|
||||
default=False,
|
||||
help=None):
|
||||
super(HelpAction, self).__init__(
|
||||
option_strings=option_strings,
|
||||
dest=dest,
|
||||
const=True,
|
||||
default=default,
|
||||
nargs=0,
|
||||
help=help)
|
||||
|
||||
def __call__(self, parser, namespace, values, option_string=None):
|
||||
setattr(namespace, self.dest, self.const)
|
||||
|
||||
# We should only skip the rest of the parsing after preparse is done
|
||||
if getattr(parser._parser, 'after_preparse', False):
|
||||
raise PrintHelp
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup('debugconfig')
|
||||
group.addoption('--version', action="store_true",
|
||||
help="display pytest lib version and import information.")
|
||||
group._addoption("-h", "--help", action="store_true", dest="help",
|
||||
group._addoption("-h", "--help", action=HelpAction, dest="help",
|
||||
help="show help message and configuration info")
|
||||
group._addoption('-p', action="append", dest="plugins", default = [],
|
||||
metavar="name",
|
||||
|
|
|
@ -213,7 +213,7 @@ def pytest_runtest_teardown(item, nextitem):
|
|||
@hookspec(firstresult=True)
|
||||
def pytest_runtest_makereport(item, call):
|
||||
""" return a :py:class:`_pytest.runner.TestReport` object
|
||||
for the given :py:class:`pytest.Item` and
|
||||
for the given :py:class:`pytest.Item <_pytest.main.Item>` and
|
||||
:py:class:`_pytest.runner.CallInfo`.
|
||||
"""
|
||||
|
||||
|
|
|
@ -540,7 +540,7 @@ class PluginManager(object):
|
|||
of HookImpl instances and the keyword arguments for the hook call.
|
||||
|
||||
``after(outcome, hook_name, hook_impls, kwargs)`` receives the
|
||||
same arguments as ``before`` but also a :py:class:`_CallOutcome`` object
|
||||
same arguments as ``before`` but also a :py:class:`_CallOutcome <_pytest.vendored_packages.pluggy._CallOutcome>` object
|
||||
which represents the result of the overall hook call.
|
||||
"""
|
||||
return _TracedHookExecution(self, before, after).undo
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
Required options added via ``pytest_addoption`` will no longer prevent
|
||||
using --help without passing them.
|
|
@ -0,0 +1 @@
|
|||
Respect ``python_files`` in assertion rewriting.
|
|
@ -0,0 +1 @@
|
|||
Fix internal API links to ``pluggy`` objects.
|
|
@ -0,0 +1,3 @@
|
|||
Added a workaround for Python 3.6 WindowsConsoleIO breaking due to Pytests's
|
||||
FDCapture. Other code using console handles might still be affected by the
|
||||
very same issue and might require further workarounds/fixes, i.e. colorama.
|
|
@ -0,0 +1 @@
|
|||
Make it clear that ``pytest.xfail`` stops test execution at the calling point and improve overall flow of the ``skipping`` docs.
|
|
@ -5,14 +5,17 @@
|
|||
Skip and xfail: dealing with tests that cannot succeed
|
||||
=====================================================================
|
||||
|
||||
If you have test functions that cannot be run on certain platforms
|
||||
or that you expect to fail you can mark them accordingly or you
|
||||
may call helper functions during execution of setup or test functions.
|
||||
You can mark test functions that cannot be run on certain platforms
|
||||
or that you expect to fail so pytest can deal with them accordingly and
|
||||
present a summary of the test session, while keeping the test suite *green*.
|
||||
|
||||
A *skip* means that you expect your test to pass unless the environment
|
||||
(e.g. wrong Python interpreter, missing dependency) prevents it to run.
|
||||
And *xfail* means that your test can run but you expect it to fail
|
||||
because there is an implementation problem.
|
||||
A **skip** means that you expect your test to pass only if some conditions are met,
|
||||
otherwise pytest should skip running the test altogether. Common examples are skipping
|
||||
windows-only tests on non-windows platforms, or skipping tests that depend on an external
|
||||
resource which is not available at the moment (for example a database).
|
||||
|
||||
A **xfail** means that you expect a test to fail for some reason.
|
||||
A common example is a test for a feature not yet implemented, or a bug not yet fixed.
|
||||
|
||||
``pytest`` counts and lists *skip* and *xfail* tests separately. Detailed
|
||||
information about skipped/xfailed tests is not shown by default to avoid
|
||||
|
@ -26,8 +29,8 @@ corresponding to the "short" letters shown in the test progress::
|
|||
.. _skipif:
|
||||
.. _`condition booleans`:
|
||||
|
||||
Marking a test function to be skipped
|
||||
-------------------------------------------
|
||||
Skipping test functions
|
||||
-----------------------
|
||||
|
||||
.. versionadded:: 2.9
|
||||
|
||||
|
@ -40,10 +43,23 @@ which may be passed an optional ``reason``:
|
|||
def test_the_unknown():
|
||||
...
|
||||
|
||||
|
||||
Alternatively, it is also possible to skip imperatively during test execution or setup
|
||||
by calling the ``pytest.skip(reason)`` function:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_function():
|
||||
if not valid_config():
|
||||
pytest.skip("unsupported configuration")
|
||||
|
||||
The imperative method is useful when it is not possible to evaluate the skip condition
|
||||
during import time.
|
||||
|
||||
``skipif``
|
||||
~~~~~~~~~~
|
||||
|
||||
.. versionadded:: 2.0, 2.4
|
||||
.. versionadded:: 2.0
|
||||
|
||||
If you wish to skip something conditionally then you can use ``skipif`` instead.
|
||||
Here is an example of marking a test function to be skipped
|
||||
|
@ -55,16 +71,12 @@ when run on a Python3.3 interpreter::
|
|||
def test_function():
|
||||
...
|
||||
|
||||
During test function setup the condition ("sys.version_info >= (3,3)") is
|
||||
checked. If it evaluates to True, the test function will be skipped
|
||||
with the specified reason. Note that pytest enforces specifying a reason
|
||||
in order to report meaningful "skip reasons" (e.g. when using ``-rs``).
|
||||
If the condition is a string, it will be evaluated as python expression.
|
||||
If the condition evaluates to ``True`` during collection, the test function will be skipped,
|
||||
with the specified reason appearing in the summary when using ``-rs``.
|
||||
|
||||
You can share skipif markers between modules. Consider this test module::
|
||||
You can share ``skipif`` markers between modules. Consider this test module::
|
||||
|
||||
# content of test_mymodule.py
|
||||
|
||||
import mymodule
|
||||
minversion = pytest.mark.skipif(mymodule.__versioninfo__ < (1,1),
|
||||
reason="at least mymodule-1.1 required")
|
||||
|
@ -72,7 +84,7 @@ You can share skipif markers between modules. Consider this test module::
|
|||
def test_function():
|
||||
...
|
||||
|
||||
You can import it from another test module::
|
||||
You can import the marker and reuse it in another test module::
|
||||
|
||||
# test_myothermodule.py
|
||||
from test_mymodule import minversion
|
||||
|
@ -85,16 +97,15 @@ For larger test suites it's usually a good idea to have one file
|
|||
where you define the markers which you then consistently apply
|
||||
throughout your test suite.
|
||||
|
||||
Alternatively, the pre pytest-2.4 way to specify :ref:`condition strings
|
||||
<string conditions>` instead of booleans will remain fully supported in future
|
||||
versions of pytest. It couldn't be easily used for importing markers
|
||||
between test modules so it's no longer advertised as the primary method.
|
||||
Alternatively, you can use :ref:`condition strings
|
||||
<string conditions>` instead of booleans, but they can't be shared between modules easily
|
||||
so they are supported mainly for backward compatibility reasons.
|
||||
|
||||
|
||||
Skip all test functions of a class or module
|
||||
---------------------------------------------
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
You can use the ``skipif`` decorator (and any other marker) on classes::
|
||||
You can use the ``skipif`` marker (as any other marker) on classes::
|
||||
|
||||
@pytest.mark.skipif(sys.platform == 'win32',
|
||||
reason="does not run on windows")
|
||||
|
@ -103,10 +114,10 @@ You can use the ``skipif`` decorator (and any other marker) on classes::
|
|||
def test_function(self):
|
||||
"will not be setup or run under 'win32' platform"
|
||||
|
||||
If the condition is true, this marker will produce a skip result for
|
||||
each of the test methods.
|
||||
If the condition is ``True``, this marker will produce a skip result for
|
||||
each of the test methods of that class.
|
||||
|
||||
If you want to skip all test functions of a module, you must use
|
||||
If you want to skip all test functions of a module, you may use
|
||||
the ``pytestmark`` name on the global level:
|
||||
|
||||
.. code-block:: python
|
||||
|
@ -114,15 +125,57 @@ the ``pytestmark`` name on the global level:
|
|||
# test_module.py
|
||||
pytestmark = pytest.mark.skipif(...)
|
||||
|
||||
If multiple "skipif" decorators are applied to a test function, it
|
||||
If multiple ``skipif`` decorators are applied to a test function, it
|
||||
will be skipped if any of the skip conditions is true.
|
||||
|
||||
.. _`whole class- or module level`: mark.html#scoped-marking
|
||||
|
||||
|
||||
Skipping on a missing import dependency
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
You can use the following helper at module level
|
||||
or within a test or test setup function::
|
||||
|
||||
docutils = pytest.importorskip("docutils")
|
||||
|
||||
If ``docutils`` cannot be imported here, this will lead to a
|
||||
skip outcome of the test. You can also skip based on the
|
||||
version number of a library::
|
||||
|
||||
docutils = pytest.importorskip("docutils", minversion="0.3")
|
||||
|
||||
The version will be read from the specified
|
||||
module's ``__version__`` attribute.
|
||||
|
||||
Summary
|
||||
~~~~~~~
|
||||
|
||||
Here's a quick guide on how to skip tests in a module in different situations:
|
||||
|
||||
1. Skip all tests in a module unconditionally:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
pytestmark = pytest.mark.skip('all tests still WIP')
|
||||
|
||||
2. Skip all tests in a module based on some condition:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
pytestmark = pytest.mark.skipif(sys.platform == 'win32', 'tests for linux only')
|
||||
|
||||
3. Skip all tests in a module if some import is missing:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
pexpect = pytest.importorskip('pexpect')
|
||||
|
||||
|
||||
.. _xfail:
|
||||
|
||||
Mark a test function as expected to fail
|
||||
-------------------------------------------------------
|
||||
XFail: mark test functions as expected to fail
|
||||
----------------------------------------------
|
||||
|
||||
You can use the ``xfail`` marker to indicate that you
|
||||
expect a test to fail::
|
||||
|
@ -135,6 +188,29 @@ This test will be run but no traceback will be reported
|
|||
when it fails. Instead terminal reporting will list it in the
|
||||
"expected to fail" (``XFAIL``) or "unexpectedly passing" (``XPASS``) sections.
|
||||
|
||||
Alternatively, you can also mark a test as ``XFAIL`` from within a test or setup function
|
||||
imperatively:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_function():
|
||||
if not valid_config():
|
||||
pytest.xfail("failing configuration (but should work)")
|
||||
|
||||
This will unconditionally make ``test_function`` ``XFAIL``. Note that no other code is executed
|
||||
after ``pytest.xfail`` call, differently from the marker. That's because it is implemented
|
||||
internally by raising a known exception.
|
||||
|
||||
Here's the signature of the ``xfail`` **marker** (not the function), using Python 3 keyword-only
|
||||
arguments syntax:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def xfail(condition=None, *, reason=None, raises=None, run=True, strict=False):
|
||||
|
||||
|
||||
|
||||
|
||||
``strict`` parameter
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
@ -200,18 +276,19 @@ even executed, use the ``run`` parameter as ``False``:
|
|||
def test_function():
|
||||
...
|
||||
|
||||
This is specially useful for marking crashing tests for later inspection.
|
||||
This is specially useful for xfailing tests that are crashing the interpreter and should be
|
||||
investigated later.
|
||||
|
||||
|
||||
Ignoring xfail marks
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
Ignoring xfail
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
By specifying on the commandline::
|
||||
|
||||
pytest --runxfail
|
||||
|
||||
you can force the running and reporting of an ``xfail`` marked test
|
||||
as if it weren't marked at all.
|
||||
as if it weren't marked at all. This also causes ``pytest.xfail`` to produce no effect.
|
||||
|
||||
Examples
|
||||
~~~~~~~~
|
||||
|
@ -245,16 +322,6 @@ Running it with the report-on-xfail option gives this output::
|
|||
|
||||
======= 7 xfailed in 0.12 seconds ========
|
||||
|
||||
xfail signature summary
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Here's the signature of the ``xfail`` marker, using Python 3 keyword-only
|
||||
arguments syntax:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def xfail(condition=None, *, reason=None, raises=None, run=True, strict=False):
|
||||
|
||||
|
||||
|
||||
.. _`skip/xfail with parametrize`:
|
||||
|
@ -263,73 +330,29 @@ Skip/xfail with parametrize
|
|||
---------------------------
|
||||
|
||||
It is possible to apply markers like skip and xfail to individual
|
||||
test instances when using parametrize::
|
||||
test instances when using parametrize:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import pytest
|
||||
|
||||
@pytest.mark.parametrize(("n", "expected"), [
|
||||
(1, 2),
|
||||
pytest.mark.xfail((1, 0)),
|
||||
pytest.mark.xfail(reason="some bug")((1, 3)),
|
||||
pytest.param(1, 0, marks=pytest.mark.xfail),
|
||||
pytest.param(1, 3, marks=pytest.mark.xfail(reason="some bug")),
|
||||
(2, 3),
|
||||
(3, 4),
|
||||
(4, 5),
|
||||
pytest.mark.skipif("sys.version_info >= (3,0)")((10, 11)),
|
||||
pytest.param(10, 11, marks=pytest.mark.skipif(sys.version_info >= (3, 0), reason="py2k")),
|
||||
])
|
||||
def test_increment(n, expected):
|
||||
assert n + 1 == expected
|
||||
|
||||
|
||||
Imperative xfail from within a test or setup function
|
||||
------------------------------------------------------
|
||||
|
||||
If you cannot declare xfail- of skipif conditions at import
|
||||
time you can also imperatively produce an according outcome
|
||||
imperatively, in test or setup code::
|
||||
|
||||
def test_function():
|
||||
if not valid_config():
|
||||
pytest.xfail("failing configuration (but should work)")
|
||||
# or
|
||||
pytest.skip("unsupported configuration")
|
||||
|
||||
Note that calling ``pytest.skip`` at the module level
|
||||
is not allowed since pytest 3.0. If you are upgrading
|
||||
and ``pytest.skip`` was being used at the module level, you can set a
|
||||
``pytestmark`` variable:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# before pytest 3.0
|
||||
pytest.skip('skipping all tests because of reasons')
|
||||
# after pytest 3.0
|
||||
pytestmark = pytest.mark.skip('skipping all tests because of reasons')
|
||||
|
||||
``pytestmark`` applies a mark or list of marks to all tests in a module.
|
||||
|
||||
|
||||
Skipping on a missing import dependency
|
||||
--------------------------------------------------
|
||||
|
||||
You can use the following import helper at module level
|
||||
or within a test or test setup function::
|
||||
|
||||
docutils = pytest.importorskip("docutils")
|
||||
|
||||
If ``docutils`` cannot be imported here, this will lead to a
|
||||
skip outcome of the test. You can also skip based on the
|
||||
version number of a library::
|
||||
|
||||
docutils = pytest.importorskip("docutils", minversion="0.3")
|
||||
|
||||
The version will be read from the specified
|
||||
module's ``__version__`` attribute.
|
||||
|
||||
|
||||
.. _string conditions:
|
||||
|
||||
specifying conditions as strings versus booleans
|
||||
----------------------------------------------------------
|
||||
Conditions as strings instead of booleans
|
||||
-----------------------------------------
|
||||
|
||||
Prior to pytest-2.4 the only way to specify skipif/xfail conditions was
|
||||
to use strings::
|
||||
|
@ -346,7 +369,7 @@ all the module globals, and ``os`` and ``sys`` as a minimum.
|
|||
Since pytest-2.4 `condition booleans`_ are considered preferable
|
||||
because markers can then be freely imported between test modules.
|
||||
With strings you need to import not only the marker but all variables
|
||||
everything used by the marker, which violates encapsulation.
|
||||
used by the marker, which violates encapsulation.
|
||||
|
||||
The reason for specifying the condition as a string was that ``pytest`` can
|
||||
report a summary of skip conditions based purely on the condition string.
|
||||
|
@ -387,25 +410,3 @@ The equivalent with "boolean conditions" is::
|
|||
``config.getvalue()`` will not execute correctly.
|
||||
|
||||
|
||||
Summary
|
||||
-------
|
||||
|
||||
Here's a quick guide on how to skip tests in a module in different situations:
|
||||
|
||||
1. Skip all tests in a module unconditionally:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
pytestmark = pytest.mark.skip('all tests still WIP')
|
||||
|
||||
2. Skip all tests in a module based on some condition:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
pytestmark = pytest.mark.skipif(sys.platform == 'win32', 'tests for linux only')
|
||||
|
||||
3. Skip all tests in a module if some import is missing:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
pexpect = pytest.importorskip('pexpect')
|
||||
|
|
|
@ -383,7 +383,7 @@ hook wrappers and passes the same arguments as to the regular hooks.
|
|||
|
||||
At the yield point of the hook wrapper pytest will execute the next hook
|
||||
implementations and return their result to the yield point in the form of
|
||||
a :py:class:`CallOutcome` instance which encapsulates a result or
|
||||
a :py:class:`CallOutcome <_pytest.vendored_packages.pluggy._CallOutcome>` instance which encapsulates a result or
|
||||
exception info. The yield point itself will thus typically not raise
|
||||
exceptions (unless there are bugs).
|
||||
|
||||
|
@ -448,7 +448,7 @@ Here is the order of execution:
|
|||
Plugin1).
|
||||
|
||||
4. Plugin3's pytest_collection_modifyitems then executing the code after the yield
|
||||
point. The yield receives a :py:class:`CallOutcome` instance which encapsulates
|
||||
point. The yield receives a :py:class:`CallOutcome <_pytest.vendored_packages.pluggy._CallOutcome>` instance which encapsulates
|
||||
the result from calling the non-wrappers. Wrappers shall not modify the result.
|
||||
|
||||
It's possible to use ``tryfirst`` and ``trylast`` also in conjunction with
|
||||
|
@ -525,7 +525,7 @@ Initialization, command line and configuration hooks
|
|||
Generic "runtest" hooks
|
||||
-----------------------
|
||||
|
||||
All runtest related hooks receive a :py:class:`pytest.Item` object.
|
||||
All runtest related hooks receive a :py:class:`pytest.Item <_pytest.main.Item>` object.
|
||||
|
||||
.. autofunction:: pytest_runtest_protocol
|
||||
.. autofunction:: pytest_runtest_setup
|
||||
|
|
2
setup.py
2
setup.py
|
@ -43,7 +43,7 @@ def has_environment_marker_support():
|
|||
|
||||
|
||||
def main():
|
||||
install_requires = ['py>=1.4.29', 'setuptools'] # pluggy is vendored in _pytest.vendored_packages
|
||||
install_requires = ['py>=1.4.33', 'setuptools'] # pluggy is vendored in _pytest.vendored_packages
|
||||
extras_require = {}
|
||||
if has_environment_marker_support():
|
||||
extras_require[':python_version=="2.6"'] = ['argparse']
|
||||
|
|
|
@ -956,3 +956,17 @@ class TestIssue925(object):
|
|||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines('*E*assert True == ((False == True) == True)')
|
||||
|
||||
|
||||
class TestIssue2121():
|
||||
def test_simple(self, testdir):
|
||||
testdir.tmpdir.join("tests/file.py").ensure().write("""
|
||||
def test_simple_failure():
|
||||
assert 1 + 1 == 3
|
||||
""")
|
||||
testdir.tmpdir.join("pytest.ini").write(py.std.textwrap.dedent("""
|
||||
[pytest]
|
||||
python_files = tests/**.py
|
||||
"""))
|
||||
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines('*E*assert (1 + 1) == 3')
|
||||
|
|
|
@ -449,3 +449,15 @@ def test_hook_proxy(testdir):
|
|||
'*test_foo4.py*',
|
||||
'*3 passed*',
|
||||
])
|
||||
|
||||
|
||||
def test_required_option_help(testdir):
|
||||
testdir.makeconftest("assert 0")
|
||||
x = testdir.mkdir("x")
|
||||
x.join("conftest.py").write(_pytest._code.Source("""
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption("--xyz", action="store_true", required=True)
|
||||
"""))
|
||||
result = testdir.runpytest("-h", x)
|
||||
assert 'argument --xyz is required' not in result.stdout.str()
|
||||
assert 'general:' in result.stdout.str()
|
||||
|
|
Loading…
Reference in New Issue