Merge pull request #1554 from RonnyPfannschmidt/merge-master

Merge master into features
This commit is contained in:
Bruno Oliveira 2016-05-24 11:09:33 -03:00
commit 8c1be624a6
31 changed files with 306 additions and 148 deletions

View File

@ -50,6 +50,7 @@ Jaap Broekhuizen
Jan Balster
Janne Vanhala
Jason R. Coombs
John Towler
Joshua Bronson
Jurko Gospodnetić
Katarzyna Jachim
@ -63,6 +64,7 @@ Marc Schlaich
Mark Abramowitz
Markus Unterwaditzer
Martijn Faassen
Martin Prusse
Matt Bachmann
Matt Williams
Michael Aquilina
@ -74,6 +76,7 @@ Omar Kohl
Pieter Mulder
Piotr Banaszkiewicz
Punyashloka Biswal
Quentin Pradet
Ralf Schmitt
Raphael Pierzina
Roman Bolshakov
@ -92,3 +95,4 @@ Russel Winder
Ben Webb
Alexei Kozlenok
Cal Leeming
Feng Ma

View File

@ -86,11 +86,24 @@
* When receiving identical test ids in parametrize we generate unique test ids.
*
* Fix win32 path issue when puttinging custom config file with absolute path
in ``pytest.main("-c your_absolute_path")``.
*
* Fix maximum recursion depth detection when raised error class is not aware
of unicode/encoded bytes.
Thanks `@prusse-martin`_ for the PR (`#1506`_).
*
* Fix ``pytest.mark.skip`` mark when used in strict mode.
Thanks `@pquentin`_ for the PR and `@RonnyPfannschmidt`_ for
showing how to fix the bug.
* Minor improvements and fixes to the documentation.
Thanks `@omarkohl`_ for the PR.
.. _#1506: https://github.com/pytest-dev/pytest/pull/1506
.. _@prusse-martin: https://github.com/prusse-martin
2.9.1
@ -165,7 +178,7 @@
**Changes**
* **Important**: `py.code <http://pylib.readthedocs.org/en/latest/code.html>`_ has been
* **Important**: `py.code <https://pylib.readthedocs.io/en/latest/code.html>`_ has been
merged into the ``pytest`` repository as ``pytest._code``. This decision
was made because ``py.code`` had very few uses outside ``pytest`` and the
fact that it was in a different repository made it difficult to fix bugs on
@ -178,7 +191,7 @@
**experimental**, so you definitely should not import it explicitly!
Please note that the original ``py.code`` is still available in
`pylib <http://pylib.readthedocs.org>`_.
`pylib <https://pylib.readthedocs.io>`_.
* ``pytest_enter_pdb`` now optionally receives the pytest config object.
Thanks `@nicoddemus`_ for the PR.
@ -246,6 +259,7 @@
.. _@RonnyPfannschmidt: https://github.com/RonnyPfannschmidt
.. _@rabbbit: https://github.com/rabbbit
.. _@hackebrot: https://github.com/hackebrot
.. _@pquentin: https://github.com/pquentin
2.8.7
=====

View File

@ -305,11 +305,11 @@ class Traceback(list):
def filter(self, fn=lambda x: not x.ishidden()):
""" return a Traceback instance with certain items removed
fn is a function that gets a single argument, a TracebackItem
fn is a function that gets a single argument, a TracebackEntry
instance, and should return True when the item should be added
to the Traceback, False when not
by default this removes all the TracebackItems which are hidden
by default this removes all the TracebackEntries which are hidden
(see ishidden() above)
"""
return Traceback(filter(fn, self), self._excinfo)
@ -325,7 +325,7 @@ class Traceback(list):
return self[-1]
def recursionindex(self):
""" return the index of the frame/TracebackItem where recursion
""" return the index of the frame/TracebackEntry where recursion
originates if appropriate, None if no recursion occurred
"""
cache = {}
@ -603,8 +603,7 @@ class FormattedExcinfo(object):
if self.tbfilter:
traceback = traceback.filter()
recursionindex = None
if excinfo.errisinstance(RuntimeError):
if "maximum recursion depth exceeded" in str(excinfo.value):
if is_recursion_error(excinfo):
recursionindex = traceback.recursionindex()
last = traceback[-1]
entries = []
@ -867,3 +866,14 @@ def getrawcode(obj, trycall=True):
return x
return obj
if sys.version_info[:2] >= (3, 5): # RecursionError introduced in 3.5
def is_recursion_error(excinfo):
return excinfo.errisinstance(RecursionError) # noqa
else:
def is_recursion_error(excinfo):
if not excinfo.errisinstance(RuntimeError):
return False
try:
return "maximum recursion depth exceeded" in str(excinfo.value)
except UnicodeError:
return False

View File

@ -104,7 +104,7 @@ def _prepareconfig(args=None, plugins=None):
elif not isinstance(args, (tuple, list)):
if not isinstance(args, str):
raise ValueError("not a string or argument list: %r" % (args,))
args = shlex.split(args)
args = shlex.split(args, posix=sys.platform != "win32")
config = get_config()
pluginmanager = config.pluginmanager
try:

View File

@ -373,7 +373,7 @@ class LogXML(object):
suite_stop_time = time.time()
suite_time_delta = suite_stop_time - self.suite_start_time
numtests = self.stats['passed'] + self.stats['failure']
numtests = self.stats['passed'] + self.stats['failure'] + self.stats['skipped']
logfile.write('<?xml version="1.0" encoding="utf-8"?>')

View File

@ -58,7 +58,7 @@ pytest_cmdline_main.tryfirst = True
def pytest_collection_modifyitems(items, config):
keywordexpr = config.option.keyword
keywordexpr = config.option.keyword.lstrip()
matchexpr = config.option.markexpr
if not keywordexpr and not matchexpr:
return

View File

@ -1746,7 +1746,7 @@ class FixtureRequest(FuncargnamesCompatAttr):
self._pyfuncitem = pyfuncitem
#: fixture for which this request is being performed
self.fixturename = None
#: Scope string, one of "function", "cls", "module", "session"
#: Scope string, one of "function", "class", "module", "session"
self.scope = "function"
self._funcargs = {}
self._fixturedefs = {}

View File

@ -30,6 +30,11 @@ def pytest_configure(config):
nop.Exception = XFailed
setattr(pytest, "xfail", nop)
config.addinivalue_line("markers",
"skip(reason=None): skip the given test function with an optional reason. "
"Example: skip(reason=\"no way of currently testing this\") skips the "
"test."
)
config.addinivalue_line("markers",
"skipif(condition): skip the given test function if eval(condition) "
"results in a True value. Evaluation happens within the "
@ -38,13 +43,13 @@ def pytest_configure(config):
"http://pytest.org/latest/skipping.html"
)
config.addinivalue_line("markers",
"xfail(condition, reason=None, run=True, raises=None): mark the the test function "
"as an expected failure if eval(condition) has a True value. "
"Optionally specify a reason for better reporting and run=False if "
"you don't even want to execute the test function. If only specific "
"exception(s) are expected, you can list them in raises, and if the test fails "
"in other ways, it will be reported as a true failure. "
"See http://pytest.org/latest/skipping.html"
"xfail(condition, reason=None, run=True, raises=None, strict=False): "
"mark the the test function as an expected failure if eval(condition) "
"has a True value. Optionally specify a reason for better reporting "
"and run=False if you don't even want to execute the test function. "
"If only specific exception(s) are expected, you can list them in "
"raises, and if the test fails in other ways, it will be reported as "
"a true failure. See http://pytest.org/latest/skipping.html"
)

View File

@ -75,7 +75,7 @@ The py.test Development Team
**Changes**
* **Important**: `py.code <http://pylib.readthedocs.org/en/latest/code.html>`_ has been
* **Important**: `py.code <https://pylib.readthedocs.io/en/latest/code.html>`_ has been
merged into the ``pytest`` repository as ``pytest._code``. This decision
was made because ``py.code`` had very few uses outside ``pytest`` and the
fact that it was in a different repository made it difficult to fix bugs on
@ -88,7 +88,7 @@ The py.test Development Team
**experimental**, so you definitely should not import it explicitly!
Please note that the original ``py.code`` is still available in
`pylib <http://pylib.readthedocs.org>`_.
`pylib <https://pylib.readthedocs.io>`_.
* ``pytest_enter_pdb`` now optionally receives the pytest config object.
Thanks `@nicoddemus`_ for the PR.

View File

@ -5,7 +5,7 @@ Setting up bash completion
==========================
When using bash as your shell, ``pytest`` can use argcomplete
(https://argcomplete.readthedocs.org/) for auto-completion.
(https://argcomplete.readthedocs.io/) for auto-completion.
For this ``argcomplete`` needs to be installed **and** enabled.
Install argcomplete using::

View File

@ -9,19 +9,19 @@ by passing the ``--ignore=path`` option on the cli. ``pytest`` allows multiple
``--ignore`` options. Example::
tests/
├── example
│   ├── test_example_01.py
│   ├── test_example_02.py
│   └── test_example_03.py
├── foobar
│   ├── test_foobar_01.py
│   ├── test_foobar_02.py
│   └── test_foobar_03.py
└── hello
└── world
├── test_world_01.py
├── test_world_02.py
└── test_world_03.py
|-- example
| |-- test_example_01.py
| |-- test_example_02.py
| '-- test_example_03.py
|-- foobar
| |-- test_foobar_01.py
| |-- test_foobar_02.py
| '-- test_foobar_03.py
'-- hello
'-- world
|-- test_world_01.py
|-- test_world_02.py
'-- test_world_03.py
Now if you invoke ``pytest`` with ``--ignore=tests/foobar/test_foobar_03.py --ignore=tests/hello/``,
you will see that ``pytest`` only collects test-modules, which do not match the patterns specified::
@ -177,16 +177,27 @@ and a setup.py dummy file like this::
# content of setup.py
0/0 # will raise exception if imported
then a pytest run on python2 will find the one test when run with a python2
interpreters and will leave out the setup.py file::
then a pytest run on Python2 will find the one test and will leave out the
setup.py file::
$ py.test --collect-only
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
====== test session starts ======
platform linux2 -- Python 2.7.10, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
collected 1 items
<Module 'pkg/module_py2.py'>
<Function 'test_only_on_python2'>
====== no tests ran in 0.04 seconds ======
If you run with a Python3 interpreter both the one test and the setup.py file
will be left out::
$ py.test --collect-only
====== test session starts ======
platform linux -- Python 3.4.3+, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
collected 0 items
======= no tests ran in 0.12 seconds ========
If you run with a Python3 interpreter the moduled added through the conftest.py file will not be considered for test collection.
====== no tests ran in 0.03 seconds ======

View File

@ -201,7 +201,7 @@ Example::
The ``__tracebackhide__`` setting influences ``pytest`` showing
of tracebacks: the ``checkconfig`` function will not be shown
unless the ``--fulltrace`` command line option is specified.
unless the ``--full-trace`` command line option is specified.
Let's run our little function::
$ py.test -q test_checkconfig.py
@ -725,7 +725,7 @@ Integrating pytest runner and cx_freeze
-----------------------------------------------------------
If you freeze your application using a tool like
`cx_freeze <http://cx-freeze.readthedocs.org>`_ in order to distribute it
`cx_freeze <https://cx-freeze.readthedocs.io>`_ in order to distribute it
to your end-users, it is a good idea to also package your test runner and run
your tests using the frozen application.

View File

@ -36,9 +36,9 @@ style <unittest.TestCase>` or :ref:`nose based <nosestyle>` projects.
.. note::
pytest-2.4 introduced an additional experimental
:ref:`yield fixture mechanism <yieldfixture>` for easier context manager
integration and more linear writing of teardown code.
pytest-2.4 introduced an additional :ref:`yield fixture mechanism
<yieldfixture>` for easier context manager integration and more linear
writing of teardown code.
.. _`funcargs`:
.. _`funcarg mechanism`:
@ -283,6 +283,14 @@ module itself does not need to change or know about these details
of fixture setup.
Finalization/teardown with yield fixtures
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Another alternative to the *request.addfinalizer()* method is to use *yield
fixtures*. All the code after the *yield* statement serves as the teardown
code. See the :ref:`yield fixture documentation <yieldfixture>`.
.. _`request-context`:
Fixtures can introspect the requesting test context
@ -577,55 +585,85 @@ to show the setup/teardown flow::
@pytest.fixture(scope="module", params=["mod1", "mod2"])
def modarg(request):
param = request.param
print ("create", param)
print (" SETUP modarg %s" % param)
def fin():
print ("fin %s" % param)
print (" TEARDOWN modarg %s" % param)
request.addfinalizer(fin)
return param
@pytest.fixture(scope="function", params=[1,2])
def otherarg(request):
return request.param
param = request.param
print (" SETUP otherarg %s" % param)
def fin():
print (" TEARDOWN otherarg %s" % param)
request.addfinalizer(fin)
return param
def test_0(otherarg):
print (" test0", otherarg)
print (" RUN test0 with otherarg %s" % otherarg)
def test_1(modarg):
print (" test1", modarg)
print (" RUN test1 with modarg %s" % modarg)
def test_2(otherarg, modarg):
print (" test2", otherarg, modarg)
print (" RUN test2 with otherarg %s and modarg %s" % (otherarg, modarg))
Let's run the tests in verbose mode and with looking at the print-output::
$ py.test -v -s test_module.py
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
====== test session starts ======
platform linux -- Python 3.4.3+, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3
cachedir: .cache
rootdir: $REGENDOC_TMPDIR, inifile:
collecting ... collected 8 items
collected 8 items
test_module.py::test_0[1] test0 1
PASSED
test_module.py::test_0[2] test0 2
PASSED
test_module.py::test_1[mod1] create mod1
test1 mod1
PASSED
test_module.py::test_2[1-mod1] test2 1 mod1
PASSED
test_module.py::test_2[2-mod1] test2 2 mod1
PASSED
test_module.py::test_1[mod2] create mod2
test1 mod2
PASSED
test_module.py::test_2[1-mod2] test2 1 mod2
PASSED
test_module.py::test_2[2-mod2] test2 2 mod2
PASSED
test_module.py::test_0[1] SETUP otherarg 1
RUN test0 with otherarg 1
PASSED TEARDOWN otherarg 1
======= 8 passed in 0.12 seconds ========
test_module.py::test_0[2] SETUP otherarg 2
RUN test0 with otherarg 2
PASSED TEARDOWN otherarg 2
You can see that the parametrized module-scoped ``modarg`` resource caused
an ordering of test execution that lead to the fewest possible "active" resources. The finalizer for the ``mod1`` parametrized resource was executed
before the ``mod2`` resource was setup.
test_module.py::test_1[mod1] SETUP modarg mod1
RUN test1 with modarg mod1
PASSED
test_module.py::test_2[1-mod1] SETUP otherarg 1
RUN test2 with otherarg 1 and modarg mod1
PASSED TEARDOWN otherarg 1
test_module.py::test_2[2-mod1] SETUP otherarg 2
RUN test2 with otherarg 2 and modarg mod1
PASSED TEARDOWN otherarg 2
test_module.py::test_1[mod2] TEARDOWN modarg mod1
SETUP modarg mod2
RUN test1 with modarg mod2
PASSED
test_module.py::test_2[1-mod2] SETUP otherarg 1
RUN test2 with otherarg 1 and modarg mod2
PASSED TEARDOWN otherarg 1
test_module.py::test_2[2-mod2] SETUP otherarg 2
RUN test2 with otherarg 2 and modarg mod2
PASSED TEARDOWN otherarg 2
TEARDOWN modarg mod2
====== 8 passed in 0.01 seconds ======
You can see that the parametrized module-scoped ``modarg`` resource caused an
ordering of test execution that lead to the fewest possible "active" resources.
The finalizer for the ``mod1`` parametrized resource was executed before the
``mod2`` resource was setup.
In particular notice that test_0 is completely independent and finishes first.
Then test_1 is executed with ``mod1``, then test_2 with ``mod1``, then test_1
with ``mod2`` and finally test_2 with ``mod2``.
The ``otherarg`` parametrized resource (having function scope) was set up before
and teared down after every test that used it.
.. _`usefixtures`:

View File

@ -153,7 +153,9 @@ against your source code checkout, helping to detect packaging
glitches.
Continuous integration services such as Jenkins_ can make use of the
``--junitxml=PATH`` option to create a JUnitXML file and generate reports.
``--junitxml=PATH`` option to create a JUnitXML file and generate reports (e.g.
by publishing the results in a nice format with the `Jenkins xUnit Plugin
<https://wiki.jenkins-ci.org/display/JENKINS/xUnit+Plugin>`_).
Integrating with setuptools / ``python setup.py test`` / ``pytest-runner``

View File

@ -6,7 +6,7 @@
.. _`pytest_nose`: plugin/nose.html
.. _`reStructured Text`: http://docutils.sourceforge.net
.. _`Python debugger`: http://docs.python.org/lib/module-pdb.html
.. _nose: https://nose.readthedocs.org/en/latest/
.. _nose: https://nose.readthedocs.io/en/latest/
.. _pytest: http://pypi.python.org/pypi/pytest
.. _mercurial: http://mercurial.selenic.com/wiki/
.. _`setuptools`: http://pypi.python.org/pypi/setuptools
@ -18,4 +18,4 @@
.. _hudson: http://hudson-ci.org/
.. _jenkins: http://jenkins-ci.org/
.. _tox: http://testrun.org/tox
.. _pylib: http://py.readthedocs.org/en/latest/
.. _pylib: https://py.readthedocs.io/en/latest/

View File

@ -46,7 +46,7 @@ Unsupported idioms / known issues
(e.g. ``tests/test_mode.py`` and ``other/tests/test_mode.py``)
by extending sys.path/import semantics. pytest does not do that
but there is discussion in `issue268 <https://github.com/pytest-dev/pytest/issues/268>`_ for adding some support. Note that
`nose2 choose to avoid this sys.path/import hackery <https://nose2.readthedocs.org/en/latest/differences.html#test-discovery-and-loading>`_.
`nose2 choose to avoid this sys.path/import hackery <https://nose2.readthedocs.io/en/latest/differences.html#test-discovery-and-loading>`_.
- nose-style doctests are not collected and executed correctly,
also doctest fixtures don't work.

View File

@ -90,7 +90,7 @@ Finding out which plugins are active
If you want to find out which plugins are active in your
environment you can type::
py.test --traceconfig
py.test --trace-config
and will get an extended test header which shows activated plugins
and their names. It will also print local plugins aka

View File

@ -57,7 +57,7 @@ Here are some examples of projects using ``pytest`` (please send notes via :ref:
* `bu <http://packages.python.org/bu/>`_ a microscopic build system
* `katcp <https://bitbucket.org/hodgestar/katcp>`_ Telescope communication protocol over Twisted
* `kss plugin timer <http://pypi.python.org/pypi/kss.plugin.timer>`_
* `pyudev <http://pyudev.readthedocs.org/en/latest/tests/plugins.html>`_ a pure Python binding to the Linux library libudev
* `pyudev <https://pyudev.readthedocs.io/en/latest/tests/plugins.html>`_ a pure Python binding to the Linux library libudev
* `pytest-localserver <https://bitbucket.org/basti/pytest-localserver/>`_ a plugin for pytest that provides a httpserver and smtpserver
* `pytest-monkeyplus <http://pypi.python.org/pypi/pytest-monkeyplus/>`_ a plugin that extends monkeypatch

View File

@ -21,7 +21,7 @@ but note that project specific settings will be considered
first. There is a flag that helps you debugging your
conftest.py configurations::
py.test --traceconfig
py.test --trace-config
customizing the collecting and running process

View File

@ -16,7 +16,7 @@ command line options
display py lib version and import information.
``-p name``
early-load given plugin (multi-allowed).
``--traceconfig``
``--trace-config``
trace considerations of conftest.py files.
``--nomagic``
don't reinterpret asserts, no traceback cutting.

View File

@ -22,7 +22,7 @@ command line options
(deprecated, use -r)
``--tb=style``
traceback print mode (long/short/line/no).
``--fulltrace``
``--full-trace``
don't cut any tracebacks (default is to cut).
``--fixtures``
show available function arguments, sorted by plugin

View File

@ -56,7 +56,7 @@ Several test run options::
Import 'pkg' and use its filesystem location to find and run tests::
py.test --pyargs pkg # run all tests found below directory of pypkg
py.test --pyargs pkg # run all tests found below directory of pkg
Modifying Python traceback printing
----------------------------------------------
@ -74,6 +74,14 @@ Examples for modifying traceback printing::
py.test --tb=native # Python standard library formatting
py.test --tb=no # no traceback at all
The ``--full-trace`` causes very long traces to be printed on error (longer
than ``--tb=long``). It also ensures that a stack trace is printed on
**KeyboardInterrrupt** (Ctrl+C).
This is very useful if the tests are taking too long and you interrupt them
with Ctrl+C to find out where the tests are *hanging*. By default no output
will be shown (because KeyboardInterrupt is catched by pytest). By using this
option you make sure a trace is shown.
Dropping to PDB_ (Python Debugger) on failures
-----------------------------------------------
@ -150,7 +158,7 @@ To get a list of the slowest 10 test durations::
Creating JUnitXML format files
----------------------------------------------------
To create result files which can be read by Hudson_ or other Continuous
To create result files which can be read by Jenkins_ or other Continuous
integration servers, use this invocation::
py.test --junitxml=path

View File

@ -23,7 +23,7 @@ reporting by calling `well specified hooks`_ of the following plugins:
In principle, each hook call is a ``1:N`` Python function call where ``N`` is the
number of registered implementation functions for a given specification.
All specifications and implementations following the ``pytest_`` prefix
All specifications and implementations follow the ``pytest_`` prefix
naming convention, making them easy to distinguish and find.
.. _`pluginorder`:
@ -158,13 +158,22 @@ it in your setuptools-invocation:
'name_of_plugin = myproject.pluginmodule',
]
},
# custom PyPI classifier for pytest plugins
classifiers=[
"Framework :: Pytest",
],
)
If a package is installed this way, ``pytest`` will load
``myproject.pluginmodule`` as a plugin which can define
`well specified hooks`_.
.. note::
Make sure to include ``Framework :: Pytest`` in your list of
`PyPI classifiers <https://python-packaging-user-guide.readthedocs.io/en/latest/distributing/#classifiers>`_
to make it easy for users to find your plugin.
Requiring/Loading plugins in a test module or conftest file
@ -194,7 +203,7 @@ the plugin manager like this:
plugin = config.pluginmanager.getplugin("name_of_plugin")
If you want to look at the names of existing plugins, use
the ``--traceconfig`` option.
the ``--trace-config`` option.
Testing plugins
---------------

View File

@ -2,30 +2,34 @@ import json
import py
import textwrap
issues_url = "http://bitbucket.org/api/1.0/repositories/pytest-dev/pytest/issues"
issues_url = "https://api.github.com/repos/pytest-dev/pytest/issues"
import requests
def get_issues():
chunksize = 50
start = 0
issues = []
url = issues_url
while 1:
post_data = {"accountname": "pytest-dev",
"repo_slug": "pytest",
"start": start,
"limit": chunksize}
print ("getting from", start)
r = requests.get(issues_url, params=post_data)
get_data = {"state": "all"}
r = requests.get(url, params=get_data)
data = r.json()
issues.extend(data["issues"])
if start + chunksize >= data["count"]:
if r.status_code == 403:
# API request limit exceeded
print(data['message'])
exit(1)
issues.extend(data)
# Look for next page
links = requests.utils.parse_header_links(r.headers['Link'])
another_page = False
for link in links:
if link['rel'] == 'next':
url = link['url']
another_page = True
if not another_page:
return issues
start += chunksize
kind2num = "bug enhancement task proposal".split()
status2num = "new open resolved duplicate invalid wontfix".split()
def main(args):
cachefile = py.path.local(args.cache)
@ -35,33 +39,38 @@ def main(args):
else:
issues = json.loads(cachefile.read())
open_issues = [x for x in issues
if x["status"] in ("new", "open")]
open_issues = [x for x in issues if x["state"] == "open"]
def kind_and_id(x):
kind = x["metadata"]["kind"]
return kind2num.index(kind), len(issues)-int(x["local_id"])
open_issues.sort(key=kind_and_id)
open_issues.sort(key=lambda x: x["number"])
report(open_issues)
def _get_kind(issue):
labels = [l['name'] for l in issue['labels']]
for key in ('bug', 'enhancement', 'proposal'):
if key in labels:
return key
return 'issue'
def report(issues):
for issue in issues:
metadata = issue["metadata"]
priority = issue["priority"]
title = issue["title"]
content = issue["content"]
kind = metadata["kind"]
status = issue["status"]
id = issue["local_id"]
link = "https://bitbucket.org/pytest-dev/pytest/issue/%s/" % id
body = issue["body"]
kind = _get_kind(issue)
status = issue["state"]
number = issue["number"]
link = "https://github.com/pytest-dev/pytest/issues/%s/" % number
print("----")
print(status, kind, link)
print(title)
#print()
#lines = content.split("\n")
#lines = body.split("\n")
#print ("\n".join(lines[:3]))
#if len(lines) > 3 or len(content) > 240:
#if len(lines) > 3 or len(body) > 240:
# print ("...")
print("\n\nFound %s open issues" % len(issues))
if __name__ == "__main__":
import argparse
@ -72,3 +81,4 @@ if __name__ == "__main__":
help="cache file")
args = parser.parse_args()
main(args)

View File

@ -37,7 +37,7 @@ def has_environment_marker_support():
References:
* https://wheel.readthedocs.org/en/latest/index.html#defining-conditional-dependencies
* https://wheel.readthedocs.io/en/latest/index.html#defining-conditional-dependencies
* https://www.python.org/dev/peps/pep-0426/#environment-markers
"""
try:

View File

@ -671,6 +671,11 @@ class TestDurations:
"*call*test_1*",
])
def test_with_not(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("-k not 1")
assert result.ret == 0
class TestDurationWithFixture:
source = """

View File

@ -4,7 +4,10 @@ import operator
import _pytest
import py
import pytest
from _pytest._code.code import (FormattedExcinfo, ReprExceptionInfo,
from _pytest._code.code import (
ExceptionInfo,
FormattedExcinfo,
ReprExceptionInfo,
ExceptionChainRepr)
queue = py.builtin._tryimport('queue', 'Queue')
@ -1048,3 +1051,18 @@ raise ValueError()
assert tw.lines[40] == "E AttributeError"
assert tw.lines[41] == ""
assert tw.lines[42].endswith("mod.py:15: AttributeError")
@pytest.mark.parametrize("style", ["short", "long"])
@pytest.mark.parametrize("encoding", [None, "utf8", "utf16"])
def test_repr_traceback_with_unicode(style, encoding):
msg = u''
if encoding is not None:
msg = msg.encode(encoding)
try:
raise RuntimeError(msg)
except RuntimeError:
e_info = ExceptionInfo()
formatter = FormattedExcinfo(style=style)
repr_traceback = formatter.repr_traceback(e_info)
assert repr_traceback is not None

View File

@ -101,6 +101,16 @@ class TestConfigCmdlineParsing:
config = testdir.parseconfig("-c", "custom.cfg")
assert config.getini("custom") == "1"
def test_absolute_win32_path(self, testdir):
temp_cfg_file = testdir.makefile(".cfg", custom="""
[pytest]
addopts = --version
""")
from os.path import normpath
temp_cfg_file = normpath(str(temp_cfg_file))
ret = pytest.main("-c " + temp_cfg_file)
assert ret == _pytest.main.EXIT_OK
class TestConfigAPI:
def test_config_trace(self, testdir):
config = testdir.parseconfig()

View File

@ -100,7 +100,7 @@ class TestPython:
result, dom = runandparse(testdir)
assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(name="pytest", errors=0, failures=1, skips=3, tests=2)
node.assert_attr(name="pytest", errors=0, failures=1, skips=3, tests=5)
def test_timing_function(self, testdir):
testdir.makepyfile("""
@ -304,7 +304,7 @@ class TestPython:
result, dom = runandparse(testdir)
assert not result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(skips=1, tests=0)
node.assert_attr(skips=1, tests=1)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file="test_xfailure_function.py",
@ -325,7 +325,7 @@ class TestPython:
result, dom = runandparse(testdir)
# assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(skips=1, tests=0)
node.assert_attr(skips=1, tests=1)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file="test_xfailure_xpass.py",
@ -356,7 +356,7 @@ class TestPython:
result, dom = runandparse(testdir)
assert result.ret == EXIT_NOTESTSCOLLECTED
node = dom.find_first_by_tag("testsuite")
node.assert_attr(skips=1, tests=0)
node.assert_attr(skips=1, tests=1)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file="test_collect_skipped.py",

View File

@ -539,6 +539,19 @@ class TestSkip:
"*1 passed*2 skipped*",
])
def test_strict_and_skip(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skip
def test_hello():
pass
""")
result = testdir.runpytest("-rs --strict")
result.stdout.fnmatch_lines([
"*unconditional skip*",
"*1 skipped*",
])
class TestSkipif:
def test_skipif_conditional(self, testdir):
item = testdir.getitem("""
@ -812,7 +825,7 @@ def test_default_markers(testdir):
result = testdir.runpytest("--markers")
result.stdout.fnmatch_lines([
"*skipif(*condition)*skip*",
"*xfail(*condition, reason=None, run=True, raises=None)*expected failure*",
"*xfail(*condition, reason=None, run=True, raises=None, strict=False)*expected failure*",
])
def test_xfail_test_setup_exception(testdir):

View File

@ -17,10 +17,11 @@ deps=
[testenv:py26]
commands= py.test --lsof -rfsxX {posargs:testing}
# pinning mock to last supported version for python 2.6
deps=
hypothesis<3.0
nose
mock<1.1 # last supported version for py26
mock<1.1
[testenv:py27-subprocess]
changedir=.