Merge remote-tracking branch 'upstream/master' into features
This commit is contained in:
commit
f825b4979b
22
.travis.yml
22
.travis.yml
|
@ -1,9 +1,10 @@
|
|||
sudo: false
|
||||
language: python
|
||||
python:
|
||||
- '3.5'
|
||||
- '3.6'
|
||||
# command to install dependencies
|
||||
install: "pip install -U tox"
|
||||
install:
|
||||
- pip install --upgrade --pre tox
|
||||
# # command to run tests
|
||||
env:
|
||||
matrix:
|
||||
|
@ -13,18 +14,17 @@ env:
|
|||
- TOXENV=linting
|
||||
- TOXENV=py27
|
||||
- TOXENV=py34
|
||||
- TOXENV=py35
|
||||
- TOXENV=py36
|
||||
- TOXENV=py27-pexpect
|
||||
- TOXENV=py27-xdist
|
||||
- TOXENV=py27-trial
|
||||
- TOXENV=py27-numpy
|
||||
- TOXENV=py35-pexpect
|
||||
- TOXENV=py35-xdist
|
||||
- TOXENV=py35-trial
|
||||
- TOXENV=py35-numpy
|
||||
- TOXENV=py36-pexpect
|
||||
- TOXENV=py36-xdist
|
||||
- TOXENV=py36-trial
|
||||
- TOXENV=py36-numpy
|
||||
- TOXENV=py27-nobyte
|
||||
- TOXENV=doctesting
|
||||
- TOXENV=freeze
|
||||
- TOXENV=docs
|
||||
|
||||
matrix:
|
||||
|
@ -35,8 +35,10 @@ matrix:
|
|||
python: '3.3'
|
||||
- env: TOXENV=pypy
|
||||
python: 'pypy-5.4'
|
||||
- env: TOXENV=py36
|
||||
python: '3.6'
|
||||
- env: TOXENV=py35
|
||||
python: '3.5'
|
||||
- env: TOXENV=py35-freeze
|
||||
python: '3.5'
|
||||
- env: TOXENV=py37
|
||||
python: 'nightly'
|
||||
allow_failures:
|
||||
|
|
1
AUTHORS
1
AUTHORS
|
@ -156,6 +156,7 @@ Samuele Pedroni
|
|||
Segev Finer
|
||||
Simon Gomizelj
|
||||
Skylar Downes
|
||||
Srinivas Reddy Thatiparthy
|
||||
Stefan Farmbauer
|
||||
Stefan Zimmermann
|
||||
Stefano Taschini
|
||||
|
|
|
@ -8,6 +8,36 @@
|
|||
|
||||
.. towncrier release notes start
|
||||
|
||||
Pytest 3.2.1 (2017-08-08)
|
||||
=========================
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- Fixed small terminal glitch when collecting a single test item. (`#2579
|
||||
<https://github.com/pytest-dev/pytest/issues/2579>`_)
|
||||
|
||||
- Correctly consider ``/`` as the file separator to automatically mark plugin
|
||||
files for rewrite on Windows. (`#2591 <https://github.com/pytest-
|
||||
dev/pytest/issues/2591>`_)
|
||||
|
||||
- Properly escape test names when setting ``PYTEST_CURRENT_TEST`` environment
|
||||
variable. (`#2644 <https://github.com/pytest-dev/pytest/issues/2644>`_)
|
||||
|
||||
- Fix error on Windows and Python 3.6+ when ``sys.stdout`` has been replaced
|
||||
with a stream-like object which does not implement the full ``io`` module
|
||||
buffer protocol. In particular this affects ``pytest-xdist`` users on the
|
||||
aforementioned platform. (`#2666 <https://github.com/pytest-
|
||||
dev/pytest/issues/2666>`_)
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Explicitly document which pytest features work with ``unittest``. (`#2626
|
||||
<https://github.com/pytest-dev/pytest/issues/2626>`_)
|
||||
|
||||
|
||||
Pytest 3.2.0 (2017-07-30)
|
||||
=========================
|
||||
|
||||
|
@ -113,7 +143,7 @@ Bug Fixes
|
|||
- capture: ensure that EncodedFile.name is a string. (`#2555
|
||||
<https://github.com/pytest-dev/pytest/issues/2555>`_)
|
||||
|
||||
- The options ```--fixtures`` and ```--fixtures-per-test`` will now keep
|
||||
- The options ``--fixtures`` and ``--fixtures-per-test`` will now keep
|
||||
indentation within docstrings. (`#2574 <https://github.com/pytest-
|
||||
dev/pytest/issues/2574>`_)
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ Features
|
|||
|
||||
- Python2.6+, Python3.3+, PyPy-2.3, Jython-2.5 (untested);
|
||||
|
||||
- Rich plugin architecture, with over 150+ `external plugins <http://docs.pytest.org/en/latest/plugins.html#installing-external-plugins-searching>`_ and thriving community;
|
||||
- Rich plugin architecture, with over 315+ `external plugins <http://plugincompat.herokuapp.com>`_ and thriving community;
|
||||
|
||||
|
||||
Documentation
|
||||
|
|
|
@ -861,7 +861,7 @@ class ReprFuncArgs(TerminalRepr):
|
|||
if self.args:
|
||||
linesofar = ""
|
||||
for name, value in self.args:
|
||||
ns = "%s = %s" % (name, value)
|
||||
ns = "%s = %s" % (safe_str(name), safe_str(value))
|
||||
if len(ns) + len(linesofar) + 2 > tw.fullwidth:
|
||||
if linesofar:
|
||||
tw.line(linesofar)
|
||||
|
|
|
@ -35,7 +35,7 @@ def pytest_addoption(parser):
|
|||
def pytest_load_initial_conftests(early_config, parser, args):
|
||||
ns = early_config.known_args_namespace
|
||||
if ns.capture == "fd":
|
||||
_py36_windowsconsoleio_workaround()
|
||||
_py36_windowsconsoleio_workaround(sys.stdout)
|
||||
_colorama_workaround()
|
||||
_readline_workaround()
|
||||
pluginmanager = early_config.pluginmanager
|
||||
|
@ -523,7 +523,7 @@ def _readline_workaround():
|
|||
pass
|
||||
|
||||
|
||||
def _py36_windowsconsoleio_workaround():
|
||||
def _py36_windowsconsoleio_workaround(stream):
|
||||
"""
|
||||
Python 3.6 implemented unicode console handling for Windows. This works
|
||||
by reading/writing to the raw console handle using
|
||||
|
@ -540,13 +540,20 @@ def _py36_windowsconsoleio_workaround():
|
|||
also means a different handle by replicating the logic in
|
||||
"Py_lifecycle.c:initstdio/create_stdio".
|
||||
|
||||
:param stream: in practice ``sys.stdout`` or ``sys.stderr``, but given
|
||||
here as parameter for unittesting purposes.
|
||||
|
||||
See https://github.com/pytest-dev/py/issues/103
|
||||
"""
|
||||
if not sys.platform.startswith('win32') or sys.version_info[:2] < (3, 6):
|
||||
return
|
||||
|
||||
buffered = hasattr(sys.stdout.buffer, 'raw')
|
||||
raw_stdout = sys.stdout.buffer.raw if buffered else sys.stdout.buffer
|
||||
# bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666)
|
||||
if not hasattr(stream, 'buffer'):
|
||||
return
|
||||
|
||||
buffered = hasattr(stream.buffer, 'raw')
|
||||
raw_stdout = stream.buffer.raw if buffered else stream.buffer
|
||||
|
||||
if not isinstance(raw_stdout, io._WindowsConsoleIO):
|
||||
return
|
||||
|
|
|
@ -11,6 +11,7 @@ import functools
|
|||
import py
|
||||
|
||||
import _pytest
|
||||
from _pytest.outcomes import TEST_OUTCOME
|
||||
|
||||
|
||||
try:
|
||||
|
@ -221,14 +222,16 @@ def getimfunc(func):
|
|||
|
||||
|
||||
def safe_getattr(object, name, default):
|
||||
""" Like getattr but return default upon any Exception.
|
||||
""" Like getattr but return default upon any Exception or any OutcomeException.
|
||||
|
||||
Attribute access can potentially fail for 'evil' Python objects.
|
||||
See issue #214.
|
||||
It catches OutcomeException because of #2490 (issue #580), new outcomes are derived from BaseException
|
||||
instead of Exception (for more details check #2707)
|
||||
"""
|
||||
try:
|
||||
return getattr(object, name, default)
|
||||
except Exception:
|
||||
except TEST_OUTCOME:
|
||||
return default
|
||||
|
||||
|
||||
|
|
|
@ -873,6 +873,18 @@ notset = Notset()
|
|||
FILE_OR_DIR = 'file_or_dir'
|
||||
|
||||
|
||||
def _iter_rewritable_modules(package_files):
|
||||
for fn in package_files:
|
||||
is_simple_module = '/' not in fn and fn.endswith('.py')
|
||||
is_package = fn.count('/') == 1 and fn.endswith('__init__.py')
|
||||
if is_simple_module:
|
||||
module_name, _ = os.path.splitext(fn)
|
||||
yield module_name
|
||||
elif is_package:
|
||||
package_name = os.path.dirname(fn)
|
||||
yield package_name
|
||||
|
||||
|
||||
class Config(object):
|
||||
""" access to configuration values, pluginmanager and plugin hooks. """
|
||||
|
||||
|
@ -1033,15 +1045,8 @@ class Config(object):
|
|||
for entry in entrypoint.dist._get_metadata(metadata)
|
||||
)
|
||||
|
||||
for fn in package_files:
|
||||
is_simple_module = os.sep not in fn and fn.endswith('.py')
|
||||
is_package = fn.count(os.sep) == 1 and fn.endswith('__init__.py')
|
||||
if is_simple_module:
|
||||
module_name, ext = os.path.splitext(fn)
|
||||
hook.mark_rewrite(module_name)
|
||||
elif is_package:
|
||||
package_name = os.path.dirname(fn)
|
||||
hook.mark_rewrite(package_name)
|
||||
for name in _iter_rewritable_modules(package_files):
|
||||
hook.mark_rewrite(name)
|
||||
|
||||
def _warn_about_missing_assertion(self, mode):
|
||||
try:
|
||||
|
@ -1343,7 +1348,7 @@ def determine_setup(inifile, args, warnfunc=None):
|
|||
rootdir, inifile, inicfg = getcfg(dirs, warnfunc=warnfunc)
|
||||
if rootdir is None:
|
||||
rootdir = get_common_ancestor([py.path.local(), ancestor])
|
||||
is_fs_root = os.path.splitdrive(str(rootdir))[1] == os.sep
|
||||
is_fs_root = os.path.splitdrive(str(rootdir))[1] == '/'
|
||||
if is_fs_root:
|
||||
rootdir = ancestor
|
||||
return rootdir, inifile, inicfg or {}
|
||||
|
|
|
@ -26,7 +26,10 @@ SETUP_CFG_PYTEST = '[pytest] section in setup.cfg files is deprecated, use [tool
|
|||
|
||||
GETFUNCARGVALUE = "use of getfuncargvalue is deprecated, use getfixturevalue"
|
||||
|
||||
RESULT_LOG = '--result-log is deprecated and scheduled for removal in pytest 4.0'
|
||||
RESULT_LOG = (
|
||||
'--result-log is deprecated and scheduled for removal in pytest 4.0.\n'
|
||||
'See https://docs.pytest.org/en/latest/usage.html#creating-resultlog-format-files for more information.'
|
||||
)
|
||||
|
||||
MARK_INFO_ATTRIBUTE = RemovedInPytest4Warning(
|
||||
"MarkInfo objects are deprecated as they contain the merged marks"
|
||||
|
|
|
@ -432,7 +432,8 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
|||
from _pytest import deprecated
|
||||
warnings.warn(
|
||||
deprecated.GETFUNCARGVALUE,
|
||||
DeprecationWarning)
|
||||
DeprecationWarning,
|
||||
stacklevel=2)
|
||||
return self.getfixturevalue(argname)
|
||||
|
||||
def _get_active_fixturedef(self, argname):
|
||||
|
|
|
@ -379,13 +379,17 @@ class RunResult:
|
|||
return d
|
||||
raise ValueError("Pytest terminal report not found")
|
||||
|
||||
def assert_outcomes(self, passed=0, skipped=0, failed=0):
|
||||
def assert_outcomes(self, passed=0, skipped=0, failed=0, error=0):
|
||||
""" assert that the specified outcomes appear with the respective
|
||||
numbers (0 means it didn't occur) in the text output from a test run."""
|
||||
d = self.parseoutcomes()
|
||||
assert passed == d.get("passed", 0)
|
||||
assert skipped == d.get("skipped", 0)
|
||||
assert failed == d.get("failed", 0)
|
||||
obtained = {
|
||||
'passed': d.get('passed', 0),
|
||||
'skipped': d.get('skipped', 0),
|
||||
'failed': d.get('failed', 0),
|
||||
'error': d.get('error', 0),
|
||||
}
|
||||
assert obtained == dict(passed=passed, skipped=skipped, failed=failed, error=error)
|
||||
|
||||
|
||||
class Testdir:
|
||||
|
|
|
@ -493,7 +493,8 @@ def raises(expected_exception, *args, **kwargs):
|
|||
...
|
||||
>>> assert exc_info.type == ValueError
|
||||
|
||||
Or you can use the keyword argument ``match`` to assert that the
|
||||
|
||||
Since version ``3.1`` you can use the keyword argument ``match`` to assert that the
|
||||
exception matches a text or regex::
|
||||
|
||||
>>> with raises(ValueError, match='must be 0 or None'):
|
||||
|
@ -502,7 +503,12 @@ def raises(expected_exception, *args, **kwargs):
|
|||
>>> with raises(ValueError, match=r'must be \d+$'):
|
||||
... raise ValueError("value must be 42")
|
||||
|
||||
Or you can specify a callable by passing a to-be-called lambda::
|
||||
**Legacy forms**
|
||||
|
||||
The forms below are fully supported but are discouraged for new code because the
|
||||
context manager form is regarded as more readable and less error-prone.
|
||||
|
||||
It is possible to specify a callable by passing a to-be-called lambda::
|
||||
|
||||
>>> raises(ZeroDivisionError, lambda: 1/0)
|
||||
<ExceptionInfo ...>
|
||||
|
@ -516,11 +522,14 @@ def raises(expected_exception, *args, **kwargs):
|
|||
>>> raises(ZeroDivisionError, f, x=0)
|
||||
<ExceptionInfo ...>
|
||||
|
||||
A third possibility is to use a string to be executed::
|
||||
It is also possible to pass a string to be evaluated at runtime::
|
||||
|
||||
>>> raises(ZeroDivisionError, "f(0)")
|
||||
<ExceptionInfo ...>
|
||||
|
||||
The string will be evaluated using the same ``locals()`` and ``globals()``
|
||||
at the moment of the ``raises`` call.
|
||||
|
||||
.. autoclass:: _pytest._code.ExceptionInfo
|
||||
:members:
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ import sys
|
|||
from time import time
|
||||
|
||||
import py
|
||||
from _pytest.compat import _PY2
|
||||
from _pytest._code.code import TerminalRepr, ExceptionInfo
|
||||
from _pytest.outcomes import skip, Skipped, TEST_OUTCOME
|
||||
|
||||
|
@ -134,7 +135,11 @@ def _update_current_test_var(item, when):
|
|||
"""
|
||||
var_name = 'PYTEST_CURRENT_TEST'
|
||||
if when:
|
||||
os.environ[var_name] = '{0} ({1})'.format(item.nodeid, when)
|
||||
value = '{0} ({1})'.format(item.nodeid, when)
|
||||
if _PY2:
|
||||
# python 2 doesn't like null bytes on environment variables (see #2644)
|
||||
value = value.replace('\x00', '(null)')
|
||||
os.environ[var_name] = value
|
||||
else:
|
||||
os.environ.pop(var_name)
|
||||
|
||||
|
|
|
@ -180,8 +180,22 @@ class TerminalReporter:
|
|||
self._tw.line(line, **markup)
|
||||
|
||||
def rewrite(self, line, **markup):
|
||||
"""
|
||||
Rewinds the terminal cursor to the beginning and writes the given line.
|
||||
|
||||
:kwarg erase: if True, will also add spaces until the full terminal width to ensure
|
||||
previous lines are properly erased.
|
||||
|
||||
The rest of the keyword arguments are markup instructions.
|
||||
"""
|
||||
erase = markup.pop('erase', False)
|
||||
if erase:
|
||||
fill_count = self._tw.fullwidth - len(line)
|
||||
fill = ' ' * fill_count
|
||||
else:
|
||||
fill = ''
|
||||
line = str(line)
|
||||
self._tw.write("\r" + line, **markup)
|
||||
self._tw.write("\r" + line + fill, **markup)
|
||||
|
||||
def write_sep(self, sep, title=None, **markup):
|
||||
self.ensure_newline()
|
||||
|
@ -292,12 +306,9 @@ class TerminalReporter:
|
|||
if skipped:
|
||||
line += " / %d skipped" % skipped
|
||||
if self.isatty:
|
||||
self.rewrite(line, bold=True, erase=True)
|
||||
if final:
|
||||
line += " \n"
|
||||
# Rewrite with empty line so we will not see the artifact of
|
||||
# previous write
|
||||
self.rewrite('')
|
||||
self.rewrite(line, bold=True)
|
||||
self.write('\n')
|
||||
else:
|
||||
self.write_line(line)
|
||||
|
||||
|
|
12
appveyor.yml
12
appveyor.yml
|
@ -21,13 +21,13 @@ environment:
|
|||
- TOXENV: "py27-xdist"
|
||||
- TOXENV: "py27-trial"
|
||||
- TOXENV: "py27-numpy"
|
||||
- TOXENV: "py35-pexpect"
|
||||
- TOXENV: "py35-xdist"
|
||||
- TOXENV: "py35-trial"
|
||||
- TOXENV: "py35-numpy"
|
||||
- TOXENV: "py36-pexpect"
|
||||
- TOXENV: "py36-xdist"
|
||||
- TOXENV: "py36-trial"
|
||||
- TOXENV: "py36-numpy"
|
||||
- TOXENV: "py27-nobyte"
|
||||
- TOXENV: "doctesting"
|
||||
- TOXENV: "freeze"
|
||||
- TOXENV: "py35-freeze"
|
||||
- TOXENV: "docs"
|
||||
|
||||
install:
|
||||
|
@ -36,7 +36,7 @@ install:
|
|||
|
||||
- if "%TOXENV%" == "pypy" call scripts\install-pypy.bat
|
||||
|
||||
- C:\Python35\python -m pip install tox
|
||||
- C:\Python36\python -m pip install --upgrade --pre tox
|
||||
|
||||
build: false # Not a C# project, build stuff at the test step instead.
|
||||
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
In one of the simple examples, use `pytest_collection_modifyitems()` to skip tests based on a command-line option, allowing its sharing while preventing a user error when acessing `pytest.config` before the argument parsing.
|
|
@ -0,0 +1 @@
|
|||
Calling the deprecated `request.getfuncargvalue()` now shows the source of the call.
|
|
@ -0,0 +1 @@
|
|||
Fixed minor error in 'Good Practices/Manual Integration' code snippet.
|
|
@ -0,0 +1 @@
|
|||
Fixed edge-case during collection: attributes which raised ``pytest.fail`` when accessed would abort the entire collection.
|
|
@ -0,0 +1 @@
|
|||
Fixed typo in goodpractices.rst.
|
|
@ -0,0 +1 @@
|
|||
Fix ``ReprFuncArgs`` with mixed unicode and UTF-8 args.
|
|
@ -0,0 +1 @@
|
|||
Improve user guidance regarding ``--resultlog`` deprecation.
|
|
@ -6,6 +6,7 @@ Release announcements
|
|||
:maxdepth: 2
|
||||
|
||||
|
||||
release-3.2.1
|
||||
release-3.2.0
|
||||
release-3.1.3
|
||||
release-3.1.2
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
pytest-3.2.1
|
||||
=======================================
|
||||
|
||||
pytest 3.2.1 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at http://doc.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Alex Gaynor
|
||||
* Bruno Oliveira
|
||||
* Florian Bruhin
|
||||
* Ronny Pfannschmidt
|
||||
* Srinivas Reddy Thatiparthy
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
|
@ -119,9 +119,9 @@ exceptions your own code is deliberately raising, whereas using
|
|||
like documenting unfixed bugs (where the test describes what "should" happen)
|
||||
or bugs in dependencies.
|
||||
|
||||
If you want to test that a regular expression matches on the string
|
||||
representation of an exception (like the ``TestCase.assertRaisesRegexp`` method
|
||||
from ``unittest``) you can use the ``ExceptionInfo.match`` method::
|
||||
Also, the context manager form accepts a ``match`` keyword parameter to test
|
||||
that a regular expression matches on the string representation of an exception
|
||||
(like the ``TestCase.assertRaisesRegexp`` method from ``unittest``)::
|
||||
|
||||
import pytest
|
||||
|
||||
|
@ -129,12 +129,11 @@ from ``unittest``) you can use the ``ExceptionInfo.match`` method::
|
|||
raise ValueError("Exception 123 raised")
|
||||
|
||||
def test_match():
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
with pytest.raises(ValueError, match=r'.* 123 .*'):
|
||||
myfunc()
|
||||
excinfo.match(r'.* 123 .*')
|
||||
|
||||
The regexp parameter of the ``match`` method is matched with the ``re.search``
|
||||
function. So in the above example ``excinfo.match('123')`` would have worked as
|
||||
function. So in the above example ``match='123'`` would have worked as
|
||||
well.
|
||||
|
||||
|
||||
|
|
|
@ -230,13 +230,16 @@ Builtin configuration file options
|
|||
.. confval:: python_files
|
||||
|
||||
One or more Glob-style file patterns determining which python files
|
||||
are considered as test modules.
|
||||
are considered as test modules. By default, pytest will consider
|
||||
any file matching with ``test_*.py`` and ``*_test.py`` globs as a test
|
||||
module.
|
||||
|
||||
.. confval:: python_classes
|
||||
|
||||
One or more name prefixes or glob-style patterns determining which classes
|
||||
are considered for test collection. Here is an example of how to collect
|
||||
tests from classes that end in ``Suite``:
|
||||
are considered for test collection. By default, pytest will consider any
|
||||
class prefixed with ``Test`` as a test collection. Here is an example of how
|
||||
to collect tests from classes that end in ``Suite``:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
|
@ -251,7 +254,8 @@ Builtin configuration file options
|
|||
.. confval:: python_functions
|
||||
|
||||
One or more name prefixes or glob-patterns determining which test functions
|
||||
and methods are considered tests. Here is an example of how
|
||||
and methods are considered tests. By default, pytest will consider any
|
||||
function prefixed with ``test`` as a test. Here is an example of how
|
||||
to collect test functions and methods that end in ``_test``:
|
||||
|
||||
.. code-block:: ini
|
||||
|
|
|
@ -127,7 +127,7 @@ Control skipping of tests according to command line option
|
|||
.. regendoc:wipe
|
||||
|
||||
Here is a ``conftest.py`` file adding a ``--runslow`` command
|
||||
line option to control skipping of ``slow`` marked tests:
|
||||
line option to control skipping of ``pytest.mark.slow`` marked tests:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
|
@ -136,7 +136,16 @@ line option to control skipping of ``slow`` marked tests:
|
|||
import pytest
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption("--runslow", action="store_true",
|
||||
help="run slow tests")
|
||||
default=False, help="run slow tests")
|
||||
|
||||
def pytest_collection_modifyitems(config, items):
|
||||
if config.getoption("--runslow"):
|
||||
# --runslow given in cli: do not skip slow tests
|
||||
return
|
||||
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
|
||||
for item in items:
|
||||
if "slow" in item.keywords:
|
||||
item.add_marker(skip_slow)
|
||||
|
||||
We can now write a test module like this:
|
||||
|
||||
|
@ -146,17 +155,11 @@ We can now write a test module like this:
|
|||
import pytest
|
||||
|
||||
|
||||
slow = pytest.mark.skipif(
|
||||
not pytest.config.getoption("--runslow"),
|
||||
reason="need --runslow option to run"
|
||||
)
|
||||
|
||||
|
||||
def test_func_fast():
|
||||
pass
|
||||
|
||||
|
||||
@slow
|
||||
@pytest.mark.slow
|
||||
def test_func_slow():
|
||||
pass
|
||||
|
||||
|
@ -170,7 +173,7 @@ and when running it will see a skipped "slow" test::
|
|||
|
||||
test_module.py .s
|
||||
======= short test summary info ========
|
||||
SKIP [1] test_module.py:14: need --runslow option to run
|
||||
SKIP [1] test_module.py:8: need --runslow option to run
|
||||
|
||||
======= 1 passed, 1 skipped in 0.12 seconds ========
|
||||
|
||||
|
@ -363,14 +366,14 @@ out which tests are the slowest. Let's make an artificial test suite:
|
|||
import time
|
||||
|
||||
def test_funcfast():
|
||||
pass
|
||||
|
||||
def test_funcslow1():
|
||||
time.sleep(0.1)
|
||||
|
||||
def test_funcslow2():
|
||||
def test_funcslow1():
|
||||
time.sleep(0.2)
|
||||
|
||||
def test_funcslow2():
|
||||
time.sleep(0.3)
|
||||
|
||||
Now we can profile which test functions execute the slowest::
|
||||
|
||||
$ pytest --durations=3
|
||||
|
@ -382,9 +385,9 @@ Now we can profile which test functions execute the slowest::
|
|||
test_some_are_slow.py ...
|
||||
|
||||
======= slowest 3 test durations ========
|
||||
0.20s call test_some_are_slow.py::test_funcslow2
|
||||
0.10s call test_some_are_slow.py::test_funcslow1
|
||||
0.00s setup test_some_are_slow.py::test_funcfast
|
||||
0.30s call test_some_are_slow.py::test_funcslow2
|
||||
0.20s call test_some_are_slow.py::test_funcslow1
|
||||
0.10s call test_some_are_slow.py::test_funcfast
|
||||
======= 3 passed in 0.12 seconds ========
|
||||
|
||||
incremental testing - test steps
|
||||
|
|
|
@ -122,7 +122,7 @@ want to distribute them along with your application::
|
|||
test_view.py
|
||||
...
|
||||
|
||||
In this scheme, it is easy to your run tests using the ``--pyargs`` option::
|
||||
In this scheme, it is easy to run your tests using the ``--pyargs`` option::
|
||||
|
||||
pytest --pyargs mypkg
|
||||
|
||||
|
@ -267,7 +267,7 @@ your own setuptools Test command for invoking pytest.
|
|||
|
||||
def initialize_options(self):
|
||||
TestCommand.initialize_options(self)
|
||||
self.pytest_args = []
|
||||
self.pytest_args = ''
|
||||
|
||||
def run_tests(self):
|
||||
import shlex
|
||||
|
|
|
@ -59,7 +59,7 @@ Features
|
|||
|
||||
- Python2.6+, Python3.3+, PyPy-2.3, Jython-2.5 (untested);
|
||||
|
||||
- Rich plugin architecture, with over 150+ :ref:`external plugins <extplugins>` and thriving community;
|
||||
- Rich plugin architecture, with over 315+ `external plugins <http://plugincompat.herokuapp.com>`_ and thriving community;
|
||||
|
||||
|
||||
Documentation
|
||||
|
|
|
@ -10,6 +10,7 @@ By using the ``pytest.mark`` helper you can easily set
|
|||
metadata on your test functions. There are
|
||||
some builtin markers, for example:
|
||||
|
||||
* :ref:`skip <skip>` - always skip a test function
|
||||
* :ref:`skipif <skipif>` - skip a test function if a certain condition is met
|
||||
* :ref:`xfail <xfail>` - produce an "expected failure" outcome if a certain
|
||||
condition is met
|
||||
|
|
|
@ -27,6 +27,7 @@ corresponding to the "short" letters shown in the test progress::
|
|||
(See :ref:`how to change command line options defaults`)
|
||||
|
||||
.. _skipif:
|
||||
.. _skip:
|
||||
.. _`condition booleans`:
|
||||
|
||||
Skipping test functions
|
||||
|
|
|
@ -2,58 +2,77 @@
|
|||
.. _`unittest.TestCase`:
|
||||
.. _`unittest`:
|
||||
|
||||
Support for unittest.TestCase / Integration of fixtures
|
||||
=====================================================================
|
||||
unittest.TestCase Support
|
||||
=========================
|
||||
|
||||
.. _`unittest.py style`: http://docs.python.org/library/unittest.html
|
||||
``pytest`` supports running Python ``unittest``-based tests out of the box.
|
||||
It's meant for leveraging existing ``unittest``-based test suites
|
||||
to use pytest as a test runner and also allow to incrementally adapt
|
||||
the test suite to take full advantage of pytest's features.
|
||||
|
||||
``pytest`` has support for running Python `unittest.py style`_ tests.
|
||||
It's meant for leveraging existing unittest-style projects
|
||||
to use pytest features. Concretely, pytest will automatically
|
||||
collect ``unittest.TestCase`` subclasses and their ``test`` methods in
|
||||
test files. It will invoke typical setup/teardown methods and
|
||||
generally try to make test suites written to run on unittest, to also
|
||||
run using ``pytest``. We assume here that you are familiar with writing
|
||||
``unittest.TestCase`` style tests and rather focus on
|
||||
integration aspects.
|
||||
To run an existing ``unittest``-style test suite using ``pytest``, type::
|
||||
|
||||
Note that this is meant as a provisional way of running your test code
|
||||
until you fully convert to pytest-style tests. To fully take advantage of
|
||||
:ref:`fixtures <fixture>`, :ref:`parametrization <parametrize>` and
|
||||
:ref:`hooks <writing-plugins>` you should convert (tools like `unittest2pytest
|
||||
<https://pypi.python.org/pypi/unittest2pytest/>`__ are helpful).
|
||||
Also, not all 3rd party pluging are expected to work best with
|
||||
``unittest.TestCase`` style tests.
|
||||
pytest tests
|
||||
|
||||
Usage
|
||||
-------------------------------------------------------------------
|
||||
|
||||
After :ref:`installation` type::
|
||||
pytest will automatically collect ``unittest.TestCase`` subclasses and
|
||||
their ``test`` methods in ``test_*.py`` or ``*_test.py`` files.
|
||||
|
||||
pytest
|
||||
Almost all ``unittest`` features are supported:
|
||||
|
||||
and you should be able to run your unittest-style tests if they
|
||||
are contained in ``test_*`` modules. If that works for you then
|
||||
you can make use of most :ref:`pytest features <features>`, for example
|
||||
``--pdb`` debugging in failures, using :ref:`plain assert-statements <assert>`,
|
||||
:ref:`more informative tracebacks <tbreportdemo>`, stdout-capturing or
|
||||
distributing tests to multiple CPUs via the ``-nNUM`` option if you
|
||||
installed the ``pytest-xdist`` plugin. Please refer to
|
||||
the general ``pytest`` documentation for many more examples.
|
||||
* ``@unittest.skip`` style decorators;
|
||||
* ``setUp/tearDown``;
|
||||
* ``setUpClass/tearDownClass()``;
|
||||
|
||||
.. note::
|
||||
.. _`load_tests protocol`: https://docs.python.org/3/library/unittest.html#load-tests-protocol
|
||||
.. _`setUpModule/tearDownModule`: https://docs.python.org/3/library/unittest.html#setupmodule-and-teardownmodule
|
||||
.. _`subtests`: https://docs.python.org/3/library/unittest.html#distinguishing-test-iterations-using-subtests
|
||||
|
||||
Running tests from ``unittest.TestCase`` subclasses with ``--pdb`` will
|
||||
disable tearDown and cleanup methods for the case that an Exception
|
||||
occurs. This allows proper post mortem debugging for all applications
|
||||
which have significant logic in their tearDown machinery. However,
|
||||
supporting this feature has the following side effect: If people
|
||||
overwrite ``unittest.TestCase`` ``__call__`` or ``run``, they need to
|
||||
to overwrite ``debug`` in the same way (this is also true for standard
|
||||
unittest).
|
||||
Up to this point pytest does not have support for the following features:
|
||||
|
||||
Mixing pytest fixtures into unittest.TestCase style tests
|
||||
-----------------------------------------------------------
|
||||
* `load_tests protocol`_;
|
||||
* `setUpModule/tearDownModule`_;
|
||||
* `subtests`_;
|
||||
|
||||
Benefits out of the box
|
||||
-----------------------
|
||||
|
||||
By running your test suite with pytest you can make use of several features,
|
||||
in most cases without having to modify existing code:
|
||||
|
||||
* Obtain :ref:`more informative tracebacks <tbreportdemo>`;
|
||||
* :ref:`stdout and stderr <captures>` capturing;
|
||||
* :ref:`Test selection options <select-tests>` using ``-k`` and ``-m`` flags;
|
||||
* :ref:`maxfail`;
|
||||
* :ref:`--pdb <pdb-option>` command-line option for debugging on test failures
|
||||
(see :ref:`note <pdb-unittest-note>` below);
|
||||
* Distribute tests to multiple CPUs using the `pytest-xdist <http://pypi.python.org/pypi/pytest-xdist>`_ plugin;
|
||||
* Use :ref:`plain assert-statements <assert>` instead of ``self.assert*`` functions (`unittest2pytest
|
||||
<https://pypi.python.org/pypi/unittest2pytest/>`__ is immensely helpful in this);
|
||||
|
||||
|
||||
pytest features in ``unittest.TestCase`` subclasses
|
||||
---------------------------------------------------
|
||||
|
||||
The following pytest features work in ``unittest.TestCase`` subclasses:
|
||||
|
||||
* :ref:`Marks <mark>`: :ref:`skip <skip>`, :ref:`skipif <skipif>`, :ref:`xfail <xfail>`;
|
||||
* :ref:`Auto-use fixtures <mixing-fixtures>`;
|
||||
|
||||
The following pytest features **do not** work, and probably
|
||||
never will due to different design philosophies:
|
||||
|
||||
* :ref:`Fixtures <fixture>` (except for ``autouse`` fixtures, see :ref:`below <mixing-fixtures>`);
|
||||
* :ref:`Parametrization <parametrize>`;
|
||||
* :ref:`Custom hooks <writing-plugins>`;
|
||||
|
||||
|
||||
Third party plugins may or may not work well, depending on the plugin and the test suite.
|
||||
|
||||
.. _mixing-fixtures:
|
||||
|
||||
Mixing pytest fixtures into ``unittest.TestCase`` subclasses using marks
|
||||
------------------------------------------------------------------------
|
||||
|
||||
Running your unittest with ``pytest`` allows you to use its
|
||||
:ref:`fixture mechanism <fixture>` with ``unittest.TestCase`` style
|
||||
|
@ -143,8 +162,8 @@ share the same ``self.db`` instance which was our intention
|
|||
when writing the class-scoped fixture function above.
|
||||
|
||||
|
||||
autouse fixtures and accessing other fixtures
|
||||
-------------------------------------------------------------------
|
||||
Using autouse fixtures and accessing other fixtures
|
||||
---------------------------------------------------
|
||||
|
||||
Although it's usually better to explicitly declare use of fixtures you need
|
||||
for a given test, you may sometimes want to have fixtures that are
|
||||
|
@ -165,6 +184,7 @@ creation of a per-test temporary directory::
|
|||
import unittest
|
||||
|
||||
class MyTest(unittest.TestCase):
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def initdir(self, tmpdir):
|
||||
tmpdir.chdir() # change to pytest-provided temporary directory
|
||||
|
@ -200,3 +220,16 @@ was executed ahead of the ``test_method``.
|
|||
|
||||
You can also gradually move away from subclassing from ``unittest.TestCase`` to *plain asserts*
|
||||
and then start to benefit from the full pytest feature set step by step.
|
||||
|
||||
.. _pdb-unittest-note:
|
||||
|
||||
.. note::
|
||||
|
||||
Running tests from ``unittest.TestCase`` subclasses with ``--pdb`` will
|
||||
disable tearDown and cleanup methods for the case that an Exception
|
||||
occurs. This allows proper post mortem debugging for all applications
|
||||
which have significant logic in their tearDown machinery. However,
|
||||
supporting this feature has the following side effect: If people
|
||||
overwrite ``unittest.TestCase`` ``__call__`` or ``run``, they need to
|
||||
to overwrite ``debug`` in the same way (this is also true for standard
|
||||
unittest).
|
||||
|
|
|
@ -41,6 +41,8 @@ Getting help on version, option names, environment variables
|
|||
pytest -h | --help # show help on command line and config file options
|
||||
|
||||
|
||||
.. _maxfail:
|
||||
|
||||
Stopping after the first (or N) failures
|
||||
---------------------------------------------------
|
||||
|
||||
|
@ -49,6 +51,8 @@ To stop the testing process after the first (N) failures::
|
|||
pytest -x # stop after first failure
|
||||
pytest --maxfail=2 # stop after two failures
|
||||
|
||||
.. _select-tests:
|
||||
|
||||
Specifying tests / selecting tests
|
||||
---------------------------------------------------
|
||||
|
||||
|
@ -135,6 +139,9 @@ with Ctrl+C to find out where the tests are *hanging*. By default no output
|
|||
will be shown (because KeyboardInterrupt is caught by pytest). By using this
|
||||
option you make sure a trace is shown.
|
||||
|
||||
|
||||
.. _pdb-option:
|
||||
|
||||
Dropping to PDB_ (Python Debugger) on failures
|
||||
-----------------------------------------------
|
||||
|
||||
|
@ -304,6 +311,13 @@ Creating resultlog format files
|
|||
|
||||
This option is rarely used and is scheduled for removal in 4.0.
|
||||
|
||||
An alternative for users which still need similar functionality is to use the
|
||||
`pytest-tap <https://pypi.python.org/pypi/pytest-tap>`_ plugin which provides
|
||||
a stream of test data.
|
||||
|
||||
If you have any concerns, please don't hesitate to
|
||||
`open an issue <https://github.com/pytest-dev/pytest/issues>`_.
|
||||
|
||||
To create plain-text machine-readable result files you can issue::
|
||||
|
||||
pytest --resultlog=path
|
||||
|
|
|
@ -5,4 +5,4 @@ if "%TOXENV%" == "coveralls" (
|
|||
exit /b 0
|
||||
)
|
||||
)
|
||||
C:\Python35\python -m tox
|
||||
C:\Python36\python -m tox
|
||||
|
|
|
@ -400,7 +400,7 @@ class TestGeneralUsage(object):
|
|||
monkeypatch.setitem(sys.modules, 'myplugin', mod)
|
||||
assert pytest.main(args=[str(tmpdir)], plugins=['myplugin']) == 0
|
||||
|
||||
def test_parameterized_with_bytes_regex(self, testdir):
|
||||
def test_parametrized_with_bytes_regex(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import re
|
||||
import pytest
|
||||
|
@ -414,6 +414,19 @@ class TestGeneralUsage(object):
|
|||
'*1 passed*'
|
||||
])
|
||||
|
||||
def test_parametrized_with_null_bytes(self, testdir):
|
||||
"""Test parametrization with values that contain null bytes and unicode characters (#2644)"""
|
||||
p = testdir.makepyfile(u"""
|
||||
# encoding: UTF-8
|
||||
import pytest
|
||||
|
||||
@pytest.mark.parametrize("data", ["\\x00", u'ação'])
|
||||
def test_foo(data):
|
||||
assert data
|
||||
""")
|
||||
res = testdir.runpytest(p)
|
||||
res.assert_outcomes(passed=2)
|
||||
|
||||
|
||||
class TestInvocationVariants(object):
|
||||
def test_earlyinit(self, testdir):
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
# coding: utf-8
|
||||
from __future__ import absolute_import, division, print_function
|
||||
import sys
|
||||
|
||||
import _pytest._code
|
||||
import py
|
||||
import pytest
|
||||
from test_excinfo import TWMock
|
||||
|
||||
|
||||
def test_ne():
|
||||
|
@ -172,3 +174,23 @@ class TestTracebackEntry(object):
|
|||
source = entry.getsource()
|
||||
assert len(source) == 6
|
||||
assert 'assert False' in source[5]
|
||||
|
||||
|
||||
class TestReprFuncArgs(object):
|
||||
|
||||
def test_not_raise_exception_with_mixed_encoding(self):
|
||||
from _pytest._code.code import ReprFuncArgs
|
||||
|
||||
tw = TWMock()
|
||||
|
||||
args = [
|
||||
('unicode_string', u"São Paulo"),
|
||||
('utf8_string', 'S\xc3\xa3o Paulo'),
|
||||
]
|
||||
|
||||
r = ReprFuncArgs(args)
|
||||
r.toterminal(tw)
|
||||
if sys.version_info[0] >= 3:
|
||||
assert tw.lines[0] == 'unicode_string = São Paulo, utf8_string = São Paulo'
|
||||
else:
|
||||
assert tw.lines[0] == 'unicode_string = São Paulo, utf8_string = São Paulo'
|
||||
|
|
|
@ -78,4 +78,7 @@ def test_resultlog_is_deprecated(testdir):
|
|||
pass
|
||||
''')
|
||||
result = testdir.runpytest('--result-log=%s' % testdir.tmpdir.join('result.log'))
|
||||
result.stdout.fnmatch_lines(['*--result-log is deprecated and scheduled for removal in pytest 4.0*'])
|
||||
result.stdout.fnmatch_lines([
|
||||
'*--result-log is deprecated and scheduled for removal in pytest 4.0*',
|
||||
'*See https://docs.pytest.org/*/usage.html#creating-resultlog-format-files for more information*',
|
||||
])
|
||||
|
|
|
@ -1140,6 +1140,23 @@ def test_error_attribute_issue555(testdir):
|
|||
reprec.assertoutcome(passed=1)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not sys.platform.startswith('win') and sys.version_info[:2] >= (3, 6),
|
||||
reason='only py3.6+ on windows')
|
||||
def test_py36_windowsconsoleio_workaround_non_standard_streams():
|
||||
"""
|
||||
Ensure _py36_windowsconsoleio_workaround function works with objects that
|
||||
do not implement the full ``io``-based stream protocol, for example execnet channels (#2666).
|
||||
"""
|
||||
from _pytest.capture import _py36_windowsconsoleio_workaround
|
||||
|
||||
class DummyStream:
|
||||
def write(self, s):
|
||||
pass
|
||||
|
||||
stream = DummyStream()
|
||||
_py36_windowsconsoleio_workaround(stream)
|
||||
|
||||
|
||||
def test_dontreadfrominput_has_encoding(testdir):
|
||||
testdir.makepyfile("""
|
||||
import sys
|
||||
|
|
|
@ -2,7 +2,8 @@ from __future__ import absolute_import, division, print_function
|
|||
import sys
|
||||
|
||||
import pytest
|
||||
from _pytest.compat import is_generator, get_real_func
|
||||
from _pytest.compat import is_generator, get_real_func, safe_getattr
|
||||
from _pytest.outcomes import OutcomeException
|
||||
|
||||
|
||||
def test_is_generator():
|
||||
|
@ -74,3 +75,27 @@ def test_is_generator_async_syntax(testdir):
|
|||
""")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(['*1 passed*'])
|
||||
|
||||
|
||||
class ErrorsHelper(object):
|
||||
@property
|
||||
def raise_exception(self):
|
||||
raise Exception('exception should be catched')
|
||||
|
||||
@property
|
||||
def raise_fail(self):
|
||||
pytest.fail('fail should be catched')
|
||||
|
||||
|
||||
def test_helper_failures():
|
||||
helper = ErrorsHelper()
|
||||
with pytest.raises(Exception):
|
||||
helper.raise_exception
|
||||
with pytest.raises(OutcomeException):
|
||||
helper.raise_fail
|
||||
|
||||
|
||||
def test_safe_getattr():
|
||||
helper = ErrorsHelper()
|
||||
assert safe_getattr(helper, 'raise_exception', 'default') == 'default'
|
||||
assert safe_getattr(helper, 'raise_fail', 'default') == 'default'
|
||||
|
|
|
@ -3,7 +3,7 @@ import py
|
|||
import pytest
|
||||
|
||||
import _pytest._code
|
||||
from _pytest.config import getcfg, get_common_ancestor, determine_setup
|
||||
from _pytest.config import getcfg, get_common_ancestor, determine_setup, _iter_rewritable_modules
|
||||
from _pytest.main import EXIT_NOTESTSCOLLECTED
|
||||
|
||||
|
||||
|
@ -308,6 +308,16 @@ class TestConfigAPI(object):
|
|||
config = testdir.parseconfig('--confcutdir', testdir.tmpdir.join('dir').ensure(dir=1))
|
||||
assert config.getoption('confcutdir') == str(testdir.tmpdir.join('dir'))
|
||||
|
||||
@pytest.mark.parametrize('names, expected', [
|
||||
(['bar.py'], ['bar']),
|
||||
(['foo', 'bar.py'], []),
|
||||
(['foo', 'bar.pyc'], []),
|
||||
(['foo', '__init__.py'], ['foo']),
|
||||
(['foo', 'bar', '__init__.py'], []),
|
||||
])
|
||||
def test_iter_rewritable_modules(self, names, expected):
|
||||
assert list(_iter_rewritable_modules(['/'.join(names)])) == expected
|
||||
|
||||
|
||||
class TestConfigFromdictargs(object):
|
||||
def test_basic_behavior(self):
|
||||
|
|
|
@ -214,6 +214,16 @@ class TestTerminal(object):
|
|||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(['collected 1 item'])
|
||||
|
||||
def test_rewrite(self, testdir, monkeypatch):
|
||||
config = testdir.parseconfig()
|
||||
f = py.io.TextIO()
|
||||
monkeypatch.setattr(f, 'isatty', lambda *args: True)
|
||||
tr = TerminalReporter(config, f)
|
||||
tr.writer.fullwidth = 10
|
||||
tr.write('hello')
|
||||
tr.rewrite('hey', erase=True)
|
||||
assert f.getvalue() == 'hello' + '\r' + 'hey' + (7 * ' ')
|
||||
|
||||
|
||||
class TestCollectonly(object):
|
||||
def test_collectonly_basic(self, testdir):
|
||||
|
|
4
tox.ini
4
tox.ini
|
@ -15,7 +15,7 @@ envlist =
|
|||
{py27,py35}-{pexpect,xdist,trial,numpy}
|
||||
py27-nobyte
|
||||
doctesting
|
||||
freeze
|
||||
py35-freeze
|
||||
docs
|
||||
|
||||
[testenv]
|
||||
|
@ -169,7 +169,7 @@ changedir = testing
|
|||
commands =
|
||||
{envpython} {envbindir}/py.test-jython -rfsxX {posargs}
|
||||
|
||||
[testenv:freeze]
|
||||
[testenv:py35-freeze]
|
||||
changedir = testing/freeze
|
||||
deps = pyinstaller
|
||||
commands =
|
||||
|
|
Loading…
Reference in New Issue