Merge branch 'master' of git://github.com/Elizaveta239/pytest into Elizaveta239-master
This commit is contained in:
commit
b49bedcf0c
|
@ -0,0 +1 @@
|
||||||
|
CHANGELOG merge=union
|
|
@ -17,6 +17,11 @@ include/
|
||||||
*.orig
|
*.orig
|
||||||
*~
|
*~
|
||||||
|
|
||||||
|
.eggs/
|
||||||
|
|
||||||
|
# this file is managed by setuptools_scm
|
||||||
|
_pytest/__init__.py
|
||||||
|
|
||||||
doc/*/_build
|
doc/*/_build
|
||||||
build/
|
build/
|
||||||
dist/
|
dist/
|
||||||
|
|
|
@ -28,7 +28,7 @@ env:
|
||||||
- TESTENV=py35
|
- TESTENV=py35
|
||||||
- TESTENV=pypy
|
- TESTENV=pypy
|
||||||
|
|
||||||
script: tox --recreate -i ALL=https://devpi.net/hpk/dev/ -e $TESTENV
|
script: tox --recreate -e $TESTENV
|
||||||
|
|
||||||
notifications:
|
notifications:
|
||||||
irc:
|
irc:
|
||||||
|
|
7
AUTHORS
7
AUTHORS
|
@ -15,6 +15,7 @@ Bob Ippolito
|
||||||
Brian Dorsey
|
Brian Dorsey
|
||||||
Brian Okken
|
Brian Okken
|
||||||
Brianna Laugher
|
Brianna Laugher
|
||||||
|
Bruno Oliveira
|
||||||
Carl Friedrich Bolz
|
Carl Friedrich Bolz
|
||||||
Charles Cloud
|
Charles Cloud
|
||||||
Chris Lamb
|
Chris Lamb
|
||||||
|
@ -25,12 +26,12 @@ Daniel Grana
|
||||||
Daniel Nuri
|
Daniel Nuri
|
||||||
Dave Hunt
|
Dave Hunt
|
||||||
David Mohr
|
David Mohr
|
||||||
|
Edison Gustavo Muenz
|
||||||
Eduardo Schettino
|
Eduardo Schettino
|
||||||
Elizaveta Shashkova
|
Elizaveta Shashkova
|
||||||
|
Eric Hunsberger
|
||||||
Eric Siegerman
|
Eric Siegerman
|
||||||
Florian Bruhin
|
Florian Bruhin
|
||||||
Edison Gustavo Muenz
|
|
||||||
Eric Hunsberger
|
|
||||||
Floris Bruynooghe
|
Floris Bruynooghe
|
||||||
Graham Horler
|
Graham Horler
|
||||||
Grig Gheorghiu
|
Grig Gheorghiu
|
||||||
|
@ -48,6 +49,7 @@ Maciek Fijalkowski
|
||||||
Maho
|
Maho
|
||||||
Marc Schlaich
|
Marc Schlaich
|
||||||
Mark Abramowitz
|
Mark Abramowitz
|
||||||
|
Markus Unterwaditzer
|
||||||
Martijn Faassen
|
Martijn Faassen
|
||||||
Nicolas Delaby
|
Nicolas Delaby
|
||||||
Pieter Mulder
|
Pieter Mulder
|
||||||
|
@ -60,3 +62,4 @@ Samuele Pedroni
|
||||||
Tom Viner
|
Tom Viner
|
||||||
Trevor Bekolay
|
Trevor Bekolay
|
||||||
Wouter van Ackooy
|
Wouter van Ackooy
|
||||||
|
David Díaz-Barquero
|
||||||
|
|
35
CHANGELOG
35
CHANGELOG
|
@ -1,10 +1,25 @@
|
||||||
2.8.0.dev (compared to 2.7.X)
|
2.8.0.dev (compared to 2.7.X)
|
||||||
-----------------------------
|
-----------------------------
|
||||||
|
|
||||||
|
- Fix #562: @nose.tools.istest now fully respected.
|
||||||
|
|
||||||
|
- Fix issue736: Fix a bug where fixture params would be discarded when combined
|
||||||
|
with parametrization markers.
|
||||||
|
Thanks to Markus Unterwaditzer for the PR.
|
||||||
|
|
||||||
|
- fix issue710: introduce ALLOW_UNICODE doctest option: when enabled, the
|
||||||
|
``u`` prefix is stripped from unicode strings in expected doctest output. This
|
||||||
|
allows doctests which use unicode to run in Python 2 and 3 unchanged.
|
||||||
|
Thanks Jason R. Coombs for the report and Bruno Oliveira for the PR.
|
||||||
|
|
||||||
|
- parametrize now also generates meaningful test IDs for enum, regex and class
|
||||||
|
objects (as opposed to class instances).
|
||||||
|
Thanks to Florian Bruhin for the PR.
|
||||||
|
|
||||||
- Add 'warns' to assert that warnings are thrown (like 'raises').
|
- Add 'warns' to assert that warnings are thrown (like 'raises').
|
||||||
Thanks to Eric Hunsberger for the PR.
|
Thanks to Eric Hunsberger for the PR.
|
||||||
|
|
||||||
- Fix #683: Do not apply an already applied mark. Thanks ojake for the PR.
|
- Fix issue683: Do not apply an already applied mark. Thanks ojake for the PR.
|
||||||
|
|
||||||
- Deal with capturing failures better so fewer exceptions get lost to
|
- Deal with capturing failures better so fewer exceptions get lost to
|
||||||
/dev/null. Thanks David Szotten for the PR.
|
/dev/null. Thanks David Szotten for the PR.
|
||||||
|
@ -30,7 +45,7 @@
|
||||||
deprecated.
|
deprecated.
|
||||||
Thanks Bruno Oliveira for the PR.
|
Thanks Bruno Oliveira for the PR.
|
||||||
|
|
||||||
- fix issue 808: pytest's internal assertion rewrite hook now implements the
|
- fix issue808: pytest's internal assertion rewrite hook now implements the
|
||||||
optional PEP302 get_data API so tests can access data files next to them.
|
optional PEP302 get_data API so tests can access data files next to them.
|
||||||
Thanks xmo-odoo for request and example and Bruno Oliveira for
|
Thanks xmo-odoo for request and example and Bruno Oliveira for
|
||||||
the PR.
|
the PR.
|
||||||
|
@ -43,6 +58,15 @@
|
||||||
- Summary bar now is colored yellow for warning
|
- Summary bar now is colored yellow for warning
|
||||||
situations such as: all tests either were skipped or xpass/xfailed,
|
situations such as: all tests either were skipped or xpass/xfailed,
|
||||||
or no tests were run at all (this is a partial fix for issue500).
|
or no tests were run at all (this is a partial fix for issue500).
|
||||||
|
- fix issue812: pytest now exits with status code 5 in situations where no
|
||||||
|
tests were run at all, such as the directory given in the command line does
|
||||||
|
not contain any tests or as result of a command line option filters
|
||||||
|
all out all tests (-k for example).
|
||||||
|
Thanks Eric Siegerman (issue812) and Bruno Oliveira for the PR.
|
||||||
|
|
||||||
|
- Summary bar now is colored yellow for warning
|
||||||
|
situations such as: all tests either were skipped or xpass/xfailed,
|
||||||
|
or no tests were run at all (related to issue500).
|
||||||
Thanks Eric Siegerman.
|
Thanks Eric Siegerman.
|
||||||
|
|
||||||
- New `testpaths` ini option: list of directories to search for tests
|
- New `testpaths` ini option: list of directories to search for tests
|
||||||
|
@ -126,6 +150,13 @@
|
||||||
- fix issue714: add ability to apply indirect=True parameter on particular argnames.
|
- fix issue714: add ability to apply indirect=True parameter on particular argnames.
|
||||||
Thanks Elizaveta239.
|
Thanks Elizaveta239.
|
||||||
|
|
||||||
|
- fix issue714: add ability to apply indirect=True parameter on particular argnames.
|
||||||
|
|
||||||
|
- fix issue890: changed extension of all documentation files from ``txt`` to
|
||||||
|
``rst``. Thanks to Abhijeet for the PR.
|
||||||
|
|
||||||
|
- issue951: add new record_xml_property fixture, that supports logging
|
||||||
|
additional information on xml output. Thanks David Diaz for the PR.
|
||||||
|
|
||||||
2.7.3 (compared to 2.7.2)
|
2.7.3 (compared to 2.7.2)
|
||||||
-----------------------------
|
-----------------------------
|
||||||
|
|
|
@ -1,2 +0,0 @@
|
||||||
#
|
|
||||||
__version__ = '2.8.0.dev4'
|
|
|
@ -211,6 +211,10 @@ class PytestPluginManager(PluginManager):
|
||||||
# support deprecated naming because plugins (xdist e.g.) use it
|
# support deprecated naming because plugins (xdist e.g.) use it
|
||||||
return self.get_plugin(name)
|
return self.get_plugin(name)
|
||||||
|
|
||||||
|
def hasplugin(self, name):
|
||||||
|
"""Return True if the plugin with the given name is registered."""
|
||||||
|
return bool(self.get_plugin(name))
|
||||||
|
|
||||||
def pytest_configure(self, config):
|
def pytest_configure(self, config):
|
||||||
# XXX now that the pluginmanager exposes hookimpl(tryfirst...)
|
# XXX now that the pluginmanager exposes hookimpl(tryfirst...)
|
||||||
# we should remove tryfirst/trylast as markers
|
# we should remove tryfirst/trylast as markers
|
||||||
|
|
|
@ -63,7 +63,7 @@ class DoctestItem(pytest.Item):
|
||||||
lineno = test.lineno + example.lineno + 1
|
lineno = test.lineno + example.lineno + 1
|
||||||
message = excinfo.type.__name__
|
message = excinfo.type.__name__
|
||||||
reprlocation = ReprFileLocation(filename, lineno, message)
|
reprlocation = ReprFileLocation(filename, lineno, message)
|
||||||
checker = doctest.OutputChecker()
|
checker = _get_unicode_checker()
|
||||||
REPORT_UDIFF = doctest.REPORT_UDIFF
|
REPORT_UDIFF = doctest.REPORT_UDIFF
|
||||||
filelines = py.path.local(filename).readlines(cr=0)
|
filelines = py.path.local(filename).readlines(cr=0)
|
||||||
lines = []
|
lines = []
|
||||||
|
@ -100,7 +100,8 @@ def _get_flag_lookup():
|
||||||
NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE,
|
NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE,
|
||||||
ELLIPSIS=doctest.ELLIPSIS,
|
ELLIPSIS=doctest.ELLIPSIS,
|
||||||
IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL,
|
IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL,
|
||||||
COMPARISON_FLAGS=doctest.COMPARISON_FLAGS)
|
COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,
|
||||||
|
ALLOW_UNICODE=_get_allow_unicode_flag())
|
||||||
|
|
||||||
def get_optionflags(parent):
|
def get_optionflags(parent):
|
||||||
optionflags_str = parent.config.getini("doctest_optionflags")
|
optionflags_str = parent.config.getini("doctest_optionflags")
|
||||||
|
@ -110,15 +111,30 @@ def get_optionflags(parent):
|
||||||
flag_acc |= flag_lookup_table[flag]
|
flag_acc |= flag_lookup_table[flag]
|
||||||
return flag_acc
|
return flag_acc
|
||||||
|
|
||||||
|
|
||||||
class DoctestTextfile(DoctestItem, pytest.File):
|
class DoctestTextfile(DoctestItem, pytest.File):
|
||||||
|
|
||||||
def runtest(self):
|
def runtest(self):
|
||||||
import doctest
|
import doctest
|
||||||
fixture_request = _setup_fixtures(self)
|
fixture_request = _setup_fixtures(self)
|
||||||
failed, tot = doctest.testfile(
|
|
||||||
str(self.fspath), module_relative=False,
|
# inspired by doctest.testfile; ideally we would use it directly,
|
||||||
optionflags=get_optionflags(self),
|
# but it doesn't support passing a custom checker
|
||||||
extraglobs=dict(getfixture=fixture_request.getfuncargvalue),
|
text = self.fspath.read()
|
||||||
raise_on_error=True, verbose=0)
|
filename = str(self.fspath)
|
||||||
|
name = self.fspath.basename
|
||||||
|
globs = dict(getfixture=fixture_request.getfuncargvalue)
|
||||||
|
if '__name__' not in globs:
|
||||||
|
globs['__name__'] = '__main__'
|
||||||
|
|
||||||
|
optionflags = get_optionflags(self)
|
||||||
|
runner = doctest.DebugRunner(verbose=0, optionflags=optionflags,
|
||||||
|
checker=_get_unicode_checker())
|
||||||
|
|
||||||
|
parser = doctest.DocTestParser()
|
||||||
|
test = parser.get_doctest(text, globs, name, filename, 0)
|
||||||
|
runner.run(test)
|
||||||
|
|
||||||
|
|
||||||
class DoctestModule(pytest.File):
|
class DoctestModule(pytest.File):
|
||||||
def collect(self):
|
def collect(self):
|
||||||
|
@ -139,7 +155,8 @@ class DoctestModule(pytest.File):
|
||||||
# uses internal doctest module parsing mechanism
|
# uses internal doctest module parsing mechanism
|
||||||
finder = doctest.DocTestFinder()
|
finder = doctest.DocTestFinder()
|
||||||
optionflags = get_optionflags(self)
|
optionflags = get_optionflags(self)
|
||||||
runner = doctest.DebugRunner(verbose=0, optionflags=optionflags)
|
runner = doctest.DebugRunner(verbose=0, optionflags=optionflags,
|
||||||
|
checker=_get_unicode_checker())
|
||||||
for test in finder.find(module, module.__name__,
|
for test in finder.find(module, module.__name__,
|
||||||
extraglobs=doctest_globals):
|
extraglobs=doctest_globals):
|
||||||
if test.examples: # skip empty doctests
|
if test.examples: # skip empty doctests
|
||||||
|
@ -160,3 +177,59 @@ def _setup_fixtures(doctest_item):
|
||||||
fixture_request = FixtureRequest(doctest_item)
|
fixture_request = FixtureRequest(doctest_item)
|
||||||
fixture_request._fillfixtures()
|
fixture_request._fillfixtures()
|
||||||
return fixture_request
|
return fixture_request
|
||||||
|
|
||||||
|
|
||||||
|
def _get_unicode_checker():
|
||||||
|
"""
|
||||||
|
Returns a doctest.OutputChecker subclass that takes in account the
|
||||||
|
ALLOW_UNICODE option to ignore u'' prefixes in strings. Useful
|
||||||
|
when the same doctest should run in Python 2 and Python 3.
|
||||||
|
|
||||||
|
An inner class is used to avoid importing "doctest" at the module
|
||||||
|
level.
|
||||||
|
"""
|
||||||
|
if hasattr(_get_unicode_checker, 'UnicodeOutputChecker'):
|
||||||
|
return _get_unicode_checker.UnicodeOutputChecker()
|
||||||
|
|
||||||
|
import doctest
|
||||||
|
import re
|
||||||
|
|
||||||
|
class UnicodeOutputChecker(doctest.OutputChecker):
|
||||||
|
"""
|
||||||
|
Copied from doctest_nose_plugin.py from the nltk project:
|
||||||
|
https://github.com/nltk/nltk
|
||||||
|
"""
|
||||||
|
|
||||||
|
_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
|
||||||
|
|
||||||
|
def check_output(self, want, got, optionflags):
|
||||||
|
res = doctest.OutputChecker.check_output(self, want, got,
|
||||||
|
optionflags)
|
||||||
|
if res:
|
||||||
|
return True
|
||||||
|
|
||||||
|
if not (optionflags & _get_allow_unicode_flag()):
|
||||||
|
return False
|
||||||
|
|
||||||
|
else: # pragma: no cover
|
||||||
|
# the code below will end up executed only in Python 2 in
|
||||||
|
# our tests, and our coverage check runs in Python 3 only
|
||||||
|
def remove_u_prefixes(txt):
|
||||||
|
return re.sub(self._literal_re, r'\1\2', txt)
|
||||||
|
|
||||||
|
want = remove_u_prefixes(want)
|
||||||
|
got = remove_u_prefixes(got)
|
||||||
|
res = doctest.OutputChecker.check_output(self, want, got,
|
||||||
|
optionflags)
|
||||||
|
return res
|
||||||
|
|
||||||
|
_get_unicode_checker.UnicodeOutputChecker = UnicodeOutputChecker
|
||||||
|
return _get_unicode_checker.UnicodeOutputChecker()
|
||||||
|
|
||||||
|
|
||||||
|
def _get_allow_unicode_flag():
|
||||||
|
"""
|
||||||
|
Registers and returns the ALLOW_UNICODE flag.
|
||||||
|
"""
|
||||||
|
import doctest
|
||||||
|
return doctest.register_optionflag('ALLOW_UNICODE')
|
||||||
|
|
|
@ -31,6 +31,11 @@ def pkg_to_mapping(name):
|
||||||
else: # package
|
else: # package
|
||||||
for pyfile in toplevel.visit('*.py'):
|
for pyfile in toplevel.visit('*.py'):
|
||||||
pkg = pkgname(name, toplevel, pyfile)
|
pkg = pkgname(name, toplevel, pyfile)
|
||||||
|
if pkg == '_pytest.__init__':
|
||||||
|
# remove the coding comment line to avoid python bug
|
||||||
|
lines = pyfile.read().splitlines(True)
|
||||||
|
name2src[pkg] = ''.join(lines[1:])
|
||||||
|
else:
|
||||||
name2src[pkg] = pyfile.read()
|
name2src[pkg] = pyfile.read()
|
||||||
# with wheels py source code might be not be installed
|
# with wheels py source code might be not be installed
|
||||||
# and the resulting genscript is useless, just bail out.
|
# and the resulting genscript is useless, just bail out.
|
||||||
|
|
|
@ -9,6 +9,7 @@ import os
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
import pytest
|
||||||
|
|
||||||
# Python 2.X and 3.X compatibility
|
# Python 2.X and 3.X compatibility
|
||||||
if sys.version_info[0] < 3:
|
if sys.version_info[0] < 3:
|
||||||
|
@ -53,6 +54,20 @@ def bin_xml_escape(arg):
|
||||||
return unicode('#x%04X') % i
|
return unicode('#x%04X') % i
|
||||||
return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg)))
|
return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg)))
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def record_xml_property(request):
|
||||||
|
"""Fixture that adds extra xml properties to the tag for the calling test.
|
||||||
|
The fixture is callable with (name, value), with value being automatically
|
||||||
|
xml-encoded.
|
||||||
|
"""
|
||||||
|
def inner(name, value):
|
||||||
|
if hasattr(request.config, "_xml"):
|
||||||
|
request.config._xml.add_custom_property(name, value)
|
||||||
|
msg = 'record_xml_property is an experimental feature'
|
||||||
|
request.config.warn(code='C3', message=msg,
|
||||||
|
fslocation=request.node.location[:2])
|
||||||
|
return inner
|
||||||
|
|
||||||
def pytest_addoption(parser):
|
def pytest_addoption(parser):
|
||||||
group = parser.getgroup("terminal reporting")
|
group = parser.getgroup("terminal reporting")
|
||||||
group.addoption('--junitxml', '--junit-xml', action="store",
|
group.addoption('--junitxml', '--junit-xml', action="store",
|
||||||
|
@ -75,7 +90,6 @@ def pytest_unconfigure(config):
|
||||||
del config._xml
|
del config._xml
|
||||||
config.pluginmanager.unregister(xml)
|
config.pluginmanager.unregister(xml)
|
||||||
|
|
||||||
|
|
||||||
def mangle_testnames(names):
|
def mangle_testnames(names):
|
||||||
names = [x.replace(".py", "") for x in names if x != '()']
|
names = [x.replace(".py", "") for x in names if x != '()']
|
||||||
names[0] = names[0].replace("/", '.')
|
names[0] = names[0].replace("/", '.')
|
||||||
|
@ -89,6 +103,10 @@ class LogXML(object):
|
||||||
self.tests = []
|
self.tests = []
|
||||||
self.passed = self.skipped = 0
|
self.passed = self.skipped = 0
|
||||||
self.failed = self.errors = 0
|
self.failed = self.errors = 0
|
||||||
|
self.custom_properties = {}
|
||||||
|
|
||||||
|
def add_custom_property(self, name, value):
|
||||||
|
self.custom_properties[str(name)] = bin_xml_escape(str(value))
|
||||||
|
|
||||||
def _opentestcase(self, report):
|
def _opentestcase(self, report):
|
||||||
names = mangle_testnames(report.nodeid.split("::"))
|
names = mangle_testnames(report.nodeid.split("::"))
|
||||||
|
@ -118,6 +136,10 @@ class LogXML(object):
|
||||||
def append(self, obj):
|
def append(self, obj):
|
||||||
self.tests[-1].append(obj)
|
self.tests[-1].append(obj)
|
||||||
|
|
||||||
|
def append_custom_properties(self):
|
||||||
|
self.tests[-1].attr.__dict__.update(self.custom_properties)
|
||||||
|
self.custom_properties.clear()
|
||||||
|
|
||||||
def append_pass(self, report):
|
def append_pass(self, report):
|
||||||
self.passed += 1
|
self.passed += 1
|
||||||
self._write_captured_output(report)
|
self._write_captured_output(report)
|
||||||
|
@ -179,6 +201,7 @@ class LogXML(object):
|
||||||
if report.when == "setup":
|
if report.when == "setup":
|
||||||
self._opentestcase(report)
|
self._opentestcase(report)
|
||||||
self.tests[-1].attr.time += getattr(report, 'duration', 0)
|
self.tests[-1].attr.time += getattr(report, 'duration', 0)
|
||||||
|
self.append_custom_properties()
|
||||||
if report.passed:
|
if report.passed:
|
||||||
if report.when == "call": # ignore setup/teardown
|
if report.when == "call": # ignore setup/teardown
|
||||||
self.append_pass(report)
|
self.append_pass(report)
|
||||||
|
|
|
@ -19,6 +19,7 @@ EXIT_TESTSFAILED = 1
|
||||||
EXIT_INTERRUPTED = 2
|
EXIT_INTERRUPTED = 2
|
||||||
EXIT_INTERNALERROR = 3
|
EXIT_INTERNALERROR = 3
|
||||||
EXIT_USAGEERROR = 4
|
EXIT_USAGEERROR = 4
|
||||||
|
EXIT_NOTESTSCOLLECTED = 5
|
||||||
|
|
||||||
name_re = re.compile("^[a-zA-Z_]\w*$")
|
name_re = re.compile("^[a-zA-Z_]\w*$")
|
||||||
|
|
||||||
|
@ -100,8 +101,10 @@ def wrap_session(config, doit):
|
||||||
if excinfo.errisinstance(SystemExit):
|
if excinfo.errisinstance(SystemExit):
|
||||||
sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
|
sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
|
||||||
else:
|
else:
|
||||||
if session._testsfailed:
|
if session.testsfailed:
|
||||||
session.exitstatus = EXIT_TESTSFAILED
|
session.exitstatus = EXIT_TESTSFAILED
|
||||||
|
elif session.testscollected == 0:
|
||||||
|
session.exitstatus = EXIT_NOTESTSCOLLECTED
|
||||||
finally:
|
finally:
|
||||||
excinfo = None # Explicitly break reference cycle.
|
excinfo = None # Explicitly break reference cycle.
|
||||||
session.startdir.chdir()
|
session.startdir.chdir()
|
||||||
|
@ -509,7 +512,8 @@ class Session(FSCollector):
|
||||||
FSCollector.__init__(self, config.rootdir, parent=None,
|
FSCollector.__init__(self, config.rootdir, parent=None,
|
||||||
config=config, session=self)
|
config=config, session=self)
|
||||||
self._fs2hookproxy = {}
|
self._fs2hookproxy = {}
|
||||||
self._testsfailed = 0
|
self.testsfailed = 0
|
||||||
|
self.testscollected = 0
|
||||||
self.shouldstop = False
|
self.shouldstop = False
|
||||||
self.trace = config.trace.root.get("collection")
|
self.trace = config.trace.root.get("collection")
|
||||||
self._norecursepatterns = config.getini("norecursedirs")
|
self._norecursepatterns = config.getini("norecursedirs")
|
||||||
|
@ -527,11 +531,11 @@ class Session(FSCollector):
|
||||||
@pytest.hookimpl(tryfirst=True)
|
@pytest.hookimpl(tryfirst=True)
|
||||||
def pytest_runtest_logreport(self, report):
|
def pytest_runtest_logreport(self, report):
|
||||||
if report.failed and not hasattr(report, 'wasxfail'):
|
if report.failed and not hasattr(report, 'wasxfail'):
|
||||||
self._testsfailed += 1
|
self.testsfailed += 1
|
||||||
maxfail = self.config.getvalue("maxfail")
|
maxfail = self.config.getvalue("maxfail")
|
||||||
if maxfail and self._testsfailed >= maxfail:
|
if maxfail and self.testsfailed >= maxfail:
|
||||||
self.shouldstop = "stopping after %d failures" % (
|
self.shouldstop = "stopping after %d failures" % (
|
||||||
self._testsfailed)
|
self.testsfailed)
|
||||||
pytest_collectreport = pytest_runtest_logreport
|
pytest_collectreport = pytest_runtest_logreport
|
||||||
|
|
||||||
def isinitpath(self, path):
|
def isinitpath(self, path):
|
||||||
|
@ -564,6 +568,7 @@ class Session(FSCollector):
|
||||||
config=self.config, items=items)
|
config=self.config, items=items)
|
||||||
finally:
|
finally:
|
||||||
hook.pytest_collection_finish(session=self)
|
hook.pytest_collection_finish(session=self)
|
||||||
|
self.testscollected = len(items)
|
||||||
return items
|
return items
|
||||||
|
|
||||||
def _perform_collect(self, args, genitems):
|
def _perform_collect(self, args, genitems):
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
""" Python test discovery, setup and run of test functions. """
|
""" Python test discovery, setup and run of test functions. """
|
||||||
|
import re
|
||||||
import fnmatch
|
import fnmatch
|
||||||
import functools
|
import functools
|
||||||
import py
|
import py
|
||||||
|
@ -8,6 +9,12 @@ import pytest
|
||||||
from _pytest.mark import MarkDecorator, MarkerError
|
from _pytest.mark import MarkDecorator, MarkerError
|
||||||
from py._code.code import TerminalRepr
|
from py._code.code import TerminalRepr
|
||||||
|
|
||||||
|
try:
|
||||||
|
import enum
|
||||||
|
except ImportError: # pragma: no cover
|
||||||
|
# Only available in Python 3.4+ or as a backport
|
||||||
|
enum = None
|
||||||
|
|
||||||
import _pytest
|
import _pytest
|
||||||
import pluggy
|
import pluggy
|
||||||
|
|
||||||
|
@ -22,13 +29,15 @@ isclass = inspect.isclass
|
||||||
callable = py.builtin.callable
|
callable = py.builtin.callable
|
||||||
# used to work around a python2 exception info leak
|
# used to work around a python2 exception info leak
|
||||||
exc_clear = getattr(sys, 'exc_clear', lambda: None)
|
exc_clear = getattr(sys, 'exc_clear', lambda: None)
|
||||||
|
# The type of re.compile objects is not exposed in Python.
|
||||||
|
REGEX_TYPE = type(re.compile(''))
|
||||||
|
|
||||||
def filter_traceback(entry):
|
def filter_traceback(entry):
|
||||||
return entry.path != cutdir1 and not entry.path.relto(cutdir2)
|
return entry.path != cutdir1 and not entry.path.relto(cutdir2)
|
||||||
|
|
||||||
|
|
||||||
def get_real_func(obj):
|
def get_real_func(obj):
|
||||||
"""gets the real function object of the (possibly) wrapped object by
|
""" gets the real function object of the (possibly) wrapped object by
|
||||||
functools.wraps or functools.partial.
|
functools.wraps or functools.partial.
|
||||||
"""
|
"""
|
||||||
while hasattr(obj, "__wrapped__"):
|
while hasattr(obj, "__wrapped__"):
|
||||||
|
@ -55,6 +64,17 @@ def getimfunc(func):
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
return func
|
return func
|
||||||
|
|
||||||
|
def safe_getattr(object, name, default):
|
||||||
|
""" Like getattr but return default upon any Exception.
|
||||||
|
|
||||||
|
Attribute access can potentially fail for 'evil' Python objects.
|
||||||
|
See issue214
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return getattr(object, name, default)
|
||||||
|
except Exception:
|
||||||
|
return default
|
||||||
|
|
||||||
|
|
||||||
class FixtureFunctionMarker:
|
class FixtureFunctionMarker:
|
||||||
def __init__(self, scope, params,
|
def __init__(self, scope, params,
|
||||||
|
@ -248,11 +268,10 @@ def pytest_pycollect_makeitem(collector, name, obj):
|
||||||
raise StopIteration
|
raise StopIteration
|
||||||
# nothing was collected elsewhere, let's do it here
|
# nothing was collected elsewhere, let's do it here
|
||||||
if isclass(obj):
|
if isclass(obj):
|
||||||
if collector.classnamefilter(name):
|
if collector.istestclass(obj, name):
|
||||||
Class = collector._getcustomclass("Class")
|
Class = collector._getcustomclass("Class")
|
||||||
outcome.force_result(Class(name, parent=collector))
|
outcome.force_result(Class(name, parent=collector))
|
||||||
elif collector.funcnamefilter(name) and hasattr(obj, "__call__") and\
|
elif collector.istestfunction(obj, name):
|
||||||
getfixturemarker(obj) is None:
|
|
||||||
# mock seems to store unbound methods (issue473), normalize it
|
# mock seems to store unbound methods (issue473), normalize it
|
||||||
obj = getattr(obj, "__func__", obj)
|
obj = getattr(obj, "__func__", obj)
|
||||||
if not isfunction(obj):
|
if not isfunction(obj):
|
||||||
|
@ -338,9 +357,24 @@ class PyCollector(PyobjMixin, pytest.Collector):
|
||||||
def funcnamefilter(self, name):
|
def funcnamefilter(self, name):
|
||||||
return self._matches_prefix_or_glob_option('python_functions', name)
|
return self._matches_prefix_or_glob_option('python_functions', name)
|
||||||
|
|
||||||
|
def isnosetest(self, obj):
|
||||||
|
""" Look for the __test__ attribute, which is applied by the
|
||||||
|
@nose.tools.istest decorator
|
||||||
|
"""
|
||||||
|
return safe_getattr(obj, '__test__', False)
|
||||||
|
|
||||||
def classnamefilter(self, name):
|
def classnamefilter(self, name):
|
||||||
return self._matches_prefix_or_glob_option('python_classes', name)
|
return self._matches_prefix_or_glob_option('python_classes', name)
|
||||||
|
|
||||||
|
def istestfunction(self, obj, name):
|
||||||
|
return (
|
||||||
|
(self.funcnamefilter(name) or self.isnosetest(obj))
|
||||||
|
and safe_getattr(obj, "__call__", False) and getfixturemarker(obj) is None
|
||||||
|
)
|
||||||
|
|
||||||
|
def istestclass(self, obj, name):
|
||||||
|
return self.classnamefilter(name) or self.isnosetest(obj)
|
||||||
|
|
||||||
def _matches_prefix_or_glob_option(self, option_name, name):
|
def _matches_prefix_or_glob_option(self, option_name, name):
|
||||||
"""
|
"""
|
||||||
checks if the given name matches the prefix or glob-pattern defined
|
checks if the given name matches the prefix or glob-pattern defined
|
||||||
|
@ -485,7 +519,7 @@ class FuncFixtureInfo:
|
||||||
|
|
||||||
|
|
||||||
def _marked(func, mark):
|
def _marked(func, mark):
|
||||||
"""Returns True if :func: is already marked with :mark:, False orherwise.
|
""" Returns True if :func: is already marked with :mark:, False otherwise.
|
||||||
This can happen if marker is applied to class and the test file is
|
This can happen if marker is applied to class and the test file is
|
||||||
invoked more than once.
|
invoked more than once.
|
||||||
"""
|
"""
|
||||||
|
@ -911,15 +945,14 @@ class Metafunc(FuncargnamesCompatAttr):
|
||||||
scope = "function"
|
scope = "function"
|
||||||
scopenum = scopes.index(scope)
|
scopenum = scopes.index(scope)
|
||||||
valtypes = {}
|
valtypes = {}
|
||||||
|
for arg in argnames:
|
||||||
|
if arg not in self.fixturenames:
|
||||||
|
raise ValueError("%r uses no fixture %r" %(self.function, arg))
|
||||||
|
|
||||||
if indirect is True:
|
if indirect is True:
|
||||||
valtypes = dict.fromkeys(argnames, "params")
|
valtypes = dict.fromkeys(argnames, "params")
|
||||||
elif indirect is False:
|
elif indirect is False:
|
||||||
valtypes = dict.fromkeys(argnames, "funcargs")
|
valtypes = dict.fromkeys(argnames, "funcargs")
|
||||||
#XXX should we also check for the opposite case?
|
|
||||||
for arg in argnames:
|
|
||||||
if arg not in self.fixturenames:
|
|
||||||
raise ValueError("%r uses no fixture %r" %(
|
|
||||||
self.function, arg))
|
|
||||||
elif isinstance(indirect, (tuple, list)):
|
elif isinstance(indirect, (tuple, list)):
|
||||||
valtypes = dict.fromkeys(argnames, "funcargs")
|
valtypes = dict.fromkeys(argnames, "funcargs")
|
||||||
for arg in indirect:
|
for arg in indirect:
|
||||||
|
@ -992,8 +1025,15 @@ def _idval(val, argname, idx, idfn):
|
||||||
return s
|
return s
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if isinstance(val, (float, int, str, bool, NoneType)):
|
if isinstance(val, (float, int, str, bool, NoneType)):
|
||||||
return str(val)
|
return str(val)
|
||||||
|
elif isinstance(val, REGEX_TYPE):
|
||||||
|
return val.pattern
|
||||||
|
elif enum is not None and isinstance(val, enum.Enum):
|
||||||
|
return str(val)
|
||||||
|
elif isclass(val) and hasattr(val, '__name__'):
|
||||||
|
return val.__name__
|
||||||
return str(argname)+str(idx)
|
return str(argname)+str(idx)
|
||||||
|
|
||||||
def _idvalset(idx, valset, argnames, idfn):
|
def _idvalset(idx, valset, argnames, idfn):
|
||||||
|
@ -1127,9 +1167,9 @@ def raises(expected_exception, *args, **kwargs):
|
||||||
" derived from BaseException, not %s")
|
" derived from BaseException, not %s")
|
||||||
if isinstance(expected_exception, tuple):
|
if isinstance(expected_exception, tuple):
|
||||||
for exc in expected_exception:
|
for exc in expected_exception:
|
||||||
if not inspect.isclass(exc):
|
if not isclass(exc):
|
||||||
raise TypeError(msg % type(exc))
|
raise TypeError(msg % type(exc))
|
||||||
elif not inspect.isclass(expected_exception):
|
elif not isclass(expected_exception):
|
||||||
raise TypeError(msg % type(expected_exception))
|
raise TypeError(msg % type(expected_exception))
|
||||||
|
|
||||||
if not args:
|
if not args:
|
||||||
|
@ -1376,7 +1416,7 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
||||||
return self._pyfuncitem.session
|
return self._pyfuncitem.session
|
||||||
|
|
||||||
def addfinalizer(self, finalizer):
|
def addfinalizer(self, finalizer):
|
||||||
"""add finalizer/teardown function to be called after the
|
""" add finalizer/teardown function to be called after the
|
||||||
last test within the requesting test context finished
|
last test within the requesting test context finished
|
||||||
execution. """
|
execution. """
|
||||||
# XXX usually this method is shadowed by fixturedef specific ones
|
# XXX usually this method is shadowed by fixturedef specific ones
|
||||||
|
@ -1790,7 +1830,7 @@ class FixtureManager:
|
||||||
if fixturedef.params is not None:
|
if fixturedef.params is not None:
|
||||||
func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]])
|
func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]])
|
||||||
# skip directly parametrized arguments
|
# skip directly parametrized arguments
|
||||||
if argname not in func_params and argname not in func_params[0]:
|
if argname not in func_params:
|
||||||
metafunc.parametrize(argname, fixturedef.params,
|
metafunc.parametrize(argname, fixturedef.params,
|
||||||
indirect=True, scope=fixturedef.scope,
|
indirect=True, scope=fixturedef.scope,
|
||||||
ids=fixturedef.ids)
|
ids=fixturedef.ids)
|
||||||
|
|
|
@ -2,6 +2,8 @@
|
||||||
|
|
||||||
This is a good source for looking at the various reporting hooks.
|
This is a good source for looking at the various reporting hooks.
|
||||||
"""
|
"""
|
||||||
|
from _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, \
|
||||||
|
EXIT_USAGEERROR, EXIT_NOTESTSCOLLECTED
|
||||||
import pytest
|
import pytest
|
||||||
import pluggy
|
import pluggy
|
||||||
import py
|
import py
|
||||||
|
@ -298,13 +300,9 @@ class TerminalReporter:
|
||||||
|
|
||||||
plugininfo = config.pluginmanager.list_plugin_distinfo()
|
plugininfo = config.pluginmanager.list_plugin_distinfo()
|
||||||
if plugininfo:
|
if plugininfo:
|
||||||
l = []
|
|
||||||
for plugin, dist in plugininfo:
|
lines.append(
|
||||||
name = dist.project_name
|
"plugins: %s" % ", ".join(_plugin_nameversions(plugininfo)))
|
||||||
if name.startswith("pytest-"):
|
|
||||||
name = name[7:]
|
|
||||||
l.append(name)
|
|
||||||
lines.append("plugins: %s" % ", ".join(l))
|
|
||||||
return lines
|
return lines
|
||||||
|
|
||||||
def pytest_collection_finish(self, session):
|
def pytest_collection_finish(self, session):
|
||||||
|
@ -359,12 +357,15 @@ class TerminalReporter:
|
||||||
outcome = yield
|
outcome = yield
|
||||||
outcome.get_result()
|
outcome.get_result()
|
||||||
self._tw.line("")
|
self._tw.line("")
|
||||||
if exitstatus in (0, 1, 2, 4):
|
summary_exit_codes = (
|
||||||
|
EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, EXIT_USAGEERROR,
|
||||||
|
EXIT_NOTESTSCOLLECTED)
|
||||||
|
if exitstatus in summary_exit_codes:
|
||||||
self.summary_errors()
|
self.summary_errors()
|
||||||
self.summary_failures()
|
self.summary_failures()
|
||||||
self.summary_warnings()
|
self.summary_warnings()
|
||||||
self.config.hook.pytest_terminal_summary(terminalreporter=self)
|
self.config.hook.pytest_terminal_summary(terminalreporter=self)
|
||||||
if exitstatus == 2:
|
if exitstatus == EXIT_INTERRUPTED:
|
||||||
self._report_keyboardinterrupt()
|
self._report_keyboardinterrupt()
|
||||||
del self._keyboardinterrupt_memo
|
del self._keyboardinterrupt_memo
|
||||||
self.summary_deselected()
|
self.summary_deselected()
|
||||||
|
@ -549,3 +550,18 @@ def build_summary_stats_line(stats):
|
||||||
color = 'yellow'
|
color = 'yellow'
|
||||||
|
|
||||||
return (line, color)
|
return (line, color)
|
||||||
|
|
||||||
|
|
||||||
|
def _plugin_nameversions(plugininfo):
|
||||||
|
l = []
|
||||||
|
for plugin, dist in plugininfo:
|
||||||
|
# gets us name and version!
|
||||||
|
name = '{dist.project_name}-{dist.version}'.format(dist=dist)
|
||||||
|
# questionable convenience, but it keeps things short
|
||||||
|
if name.startswith("pytest-"):
|
||||||
|
name = name[7:]
|
||||||
|
# we decided to print python package names
|
||||||
|
# they can have more than one plugin
|
||||||
|
if name not in l:
|
||||||
|
l.append(name)
|
||||||
|
return l
|
||||||
|
|
|
@ -6,7 +6,7 @@ def get_version_string():
|
||||||
fn = py.path.local(__file__).join("..", "..", "..",
|
fn = py.path.local(__file__).join("..", "..", "..",
|
||||||
"_pytest", "__init__.py")
|
"_pytest", "__init__.py")
|
||||||
for line in fn.readlines():
|
for line in fn.readlines():
|
||||||
if "version" in line:
|
if "version" in line and not line.strip().startswith('#'):
|
||||||
return eval(line.split("=")[-1])
|
return eval(line.split("=")[-1])
|
||||||
|
|
||||||
def get_minor_version_string():
|
def get_minor_version_string():
|
||||||
|
|
|
@ -72,3 +72,18 @@ ignore lengthy exception stack traces you can just write::
|
||||||
# content of pytest.ini
|
# content of pytest.ini
|
||||||
[pytest]
|
[pytest]
|
||||||
doctest_optionflags= NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL
|
doctest_optionflags= NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL
|
||||||
|
|
||||||
|
|
||||||
|
py.test also introduces a new ``ALLOW_UNICODE`` option flag: when enabled, the
|
||||||
|
``u`` prefix is stripped from unicode strings in expected doctest output. This
|
||||||
|
allows doctests which use unicode to run in Python 2 and 3 unchanged.
|
||||||
|
|
||||||
|
As with any other option flag, this flag can be enabled in ``pytest.ini`` using
|
||||||
|
the ``doctest_optionflags`` ini option or by an inline comment in the doc test
|
||||||
|
itself::
|
||||||
|
|
||||||
|
# content of example.rst
|
||||||
|
>>> get_unicode_greeting() # doctest: +ALLOW_UNICODE
|
||||||
|
'Hello'
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -534,23 +534,24 @@ case we just write some informations out to a ``failures`` file::
|
||||||
import pytest
|
import pytest
|
||||||
import os.path
|
import os.path
|
||||||
|
|
||||||
@pytest.hookimpl(tryfirst=True)
|
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
|
||||||
def pytest_runtest_makereport(item, call, __multicall__):
|
def pytest_runtest_makereport(item, call):
|
||||||
# execute all other hooks to obtain the report object
|
# execute all other hooks to obtain the report object
|
||||||
rep = __multicall__.execute()
|
outcome = yield
|
||||||
|
rep = outcome.get_result()
|
||||||
|
|
||||||
# we only look at actual failing test calls, not setup/teardown
|
# we only look at actual failing test calls, not setup/teardown
|
||||||
if rep.when == "call" and rep.failed:
|
if rep.when == "call" and rep.failed:
|
||||||
mode = "a" if os.path.exists("failures") else "w"
|
mode = "a" if os.path.exists("failures") else "w"
|
||||||
with open("failures", mode) as f:
|
with open("failures", mode) as f:
|
||||||
# let's also access a fixture for the fun of it
|
# let's also access a fixture for the fun of it
|
||||||
if "tmpdir" in item.funcargs:
|
if "tmpdir" in item.fixturenames:
|
||||||
extra = " (%s)" % item.funcargs["tmpdir"]
|
extra = " (%s)" % item.funcargs["tmpdir"]
|
||||||
else:
|
else:
|
||||||
extra = ""
|
extra = ""
|
||||||
|
|
||||||
f.write(rep.nodeid + extra + "\n")
|
f.write(rep.nodeid + extra + "\n")
|
||||||
return rep
|
|
||||||
|
|
||||||
if you then have failing tests::
|
if you then have failing tests::
|
||||||
|
|
||||||
|
@ -606,16 +607,16 @@ here is a little example implemented via a local plugin::
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
@pytest.hookimpl(tryfirst=True)
|
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
|
||||||
def pytest_runtest_makereport(item, call, __multicall__):
|
def pytest_runtest_makereport(item, call):
|
||||||
# execute all other hooks to obtain the report object
|
# execute all other hooks to obtain the report object
|
||||||
rep = __multicall__.execute()
|
outcome = yield
|
||||||
|
rep = outcome.get_result()
|
||||||
|
|
||||||
# set an report attribute for each phase of a call, which can
|
# set an report attribute for each phase of a call, which can
|
||||||
# be "setup", "call", "teardown"
|
# be "setup", "call", "teardown"
|
||||||
|
|
||||||
setattr(item, "rep_" + rep.when, rep)
|
setattr(item, "rep_" + rep.when, rep)
|
||||||
return rep
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
|
@ -742,5 +743,4 @@ over to ``pytest`` instead. For example::
|
||||||
This makes it convenient to execute your tests from within your frozen
|
This makes it convenient to execute your tests from within your frozen
|
||||||
application, using standard ``py.test`` command-line options::
|
application, using standard ``py.test`` command-line options::
|
||||||
|
|
||||||
$ ./app_main --pytest --verbose --tb=long --junit-xml=results.xml test-suite/
|
./app_main --pytest --verbose --tb=long --junit-xml=results.xml test-suite/
|
||||||
/bin/sh: ./app_main: No such file or directory
|
|
||||||
|
|
|
@ -83,7 +83,7 @@ As with all function :ref:`marking <mark>` you can skip test functions at the
|
||||||
`whole class- or module level`_. If your code targets python2.6 or above you
|
`whole class- or module level`_. If your code targets python2.6 or above you
|
||||||
use the skipif decorator (and any other marker) on classes::
|
use the skipif decorator (and any other marker) on classes::
|
||||||
|
|
||||||
@pytest.mark.skipif(sys.platform == 'win32',
|
@pytest.mark.skipif(sys.platform != 'win32',
|
||||||
reason="requires windows")
|
reason="requires windows")
|
||||||
class TestPosixCalls:
|
class TestPosixCalls:
|
||||||
|
|
||||||
|
@ -97,7 +97,7 @@ If your code targets python2.5 where class-decorators are not available,
|
||||||
you can set the ``pytestmark`` attribute of a class::
|
you can set the ``pytestmark`` attribute of a class::
|
||||||
|
|
||||||
class TestPosixCalls:
|
class TestPosixCalls:
|
||||||
pytestmark = pytest.mark.skipif(sys.platform == 'win32',
|
pytestmark = pytest.mark.skipif(sys.platform != 'win32',
|
||||||
reason="requires Windows")
|
reason="requires Windows")
|
||||||
|
|
||||||
def test_function(self):
|
def test_function(self):
|
||||||
|
|
|
@ -153,6 +153,36 @@ integration servers, use this invocation::
|
||||||
|
|
||||||
to create an XML file at ``path``.
|
to create an XML file at ``path``.
|
||||||
|
|
||||||
|
record_xml_property
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. versionadded:: 2.8
|
||||||
|
|
||||||
|
If you want to log additional information for a test, you can use the
|
||||||
|
``record_xml_property`` fixture:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
def test_function(record_xml_property):
|
||||||
|
record_xml_property("example_key", 1)
|
||||||
|
assert 0
|
||||||
|
|
||||||
|
This will add an extra property ``example_key="1"`` to the generated
|
||||||
|
``testcase`` tag:
|
||||||
|
|
||||||
|
.. code-block:: xml
|
||||||
|
|
||||||
|
<testcase classname="test_function" example_key="1" file="test_function.py" line="0" name="test_function" time="0.0009">
|
||||||
|
|
||||||
|
.. warning::
|
||||||
|
|
||||||
|
This is an experimental feature, and its interface might be replaced
|
||||||
|
by something more powerful and general in future versions. The
|
||||||
|
functionality per-se will be kept, however.
|
||||||
|
|
||||||
|
Also please note that using this feature will break any schema verification.
|
||||||
|
This might be a problem when used with some CI servers.
|
||||||
|
|
||||||
Creating resultlog format files
|
Creating resultlog format files
|
||||||
----------------------------------------------------
|
----------------------------------------------------
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,7 @@ from _pytest.config import (
|
||||||
main, UsageError, _preloadplugins, cmdline,
|
main, UsageError, _preloadplugins, cmdline,
|
||||||
hookspec, hookimpl
|
hookspec, hookimpl
|
||||||
)
|
)
|
||||||
from _pytest import __version__
|
from _pytest import version as __version__
|
||||||
|
|
||||||
_preloadplugins() # to populate pytest.* namespace so help(pytest) works
|
_preloadplugins() # to populate pytest.* namespace so help(pytest) works
|
||||||
|
|
||||||
|
|
3
setup.py
3
setup.py
|
@ -63,7 +63,7 @@ def main():
|
||||||
name='pytest',
|
name='pytest',
|
||||||
description='pytest: simple powerful testing with Python',
|
description='pytest: simple powerful testing with Python',
|
||||||
long_description=long_description,
|
long_description=long_description,
|
||||||
version=get_version(),
|
use_scm_version={'write_to': '_pytest/__init__.py'},
|
||||||
url='http://pytest.org',
|
url='http://pytest.org',
|
||||||
license='MIT license',
|
license='MIT license',
|
||||||
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
|
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
|
||||||
|
@ -75,6 +75,7 @@ def main():
|
||||||
# the following should be enabled for release
|
# the following should be enabled for release
|
||||||
install_requires=install_requires,
|
install_requires=install_requires,
|
||||||
extras_require=extras_require,
|
extras_require=extras_require,
|
||||||
|
setup_requires=['setuptools_scm'],
|
||||||
packages=['_pytest', '_pytest.assertion'],
|
packages=['_pytest', '_pytest.assertion'],
|
||||||
py_modules=['pytest'],
|
py_modules=['pytest'],
|
||||||
zip_safe=False,
|
zip_safe=False,
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
import sys
|
import sys
|
||||||
import py, pytest
|
import py, pytest
|
||||||
|
from _pytest.main import EXIT_NOTESTSCOLLECTED, EXIT_USAGEERROR
|
||||||
|
|
||||||
|
|
||||||
class TestGeneralUsage:
|
class TestGeneralUsage:
|
||||||
def test_config_error(self, testdir):
|
def test_config_error(self, testdir):
|
||||||
|
@ -147,7 +149,7 @@ class TestGeneralUsage:
|
||||||
pytest.skip("early")
|
pytest.skip("early")
|
||||||
""")
|
""")
|
||||||
result = testdir.runpytest()
|
result = testdir.runpytest()
|
||||||
assert result.ret == 0
|
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||||
result.stdout.fnmatch_lines([
|
result.stdout.fnmatch_lines([
|
||||||
"*1 skip*"
|
"*1 skip*"
|
||||||
])
|
])
|
||||||
|
@ -177,7 +179,7 @@ class TestGeneralUsage:
|
||||||
sys.stderr.write("stder42\\n")
|
sys.stderr.write("stder42\\n")
|
||||||
""")
|
""")
|
||||||
result = testdir.runpytest()
|
result = testdir.runpytest()
|
||||||
assert result.ret == 0
|
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||||
assert "should not be seen" not in result.stdout.str()
|
assert "should not be seen" not in result.stdout.str()
|
||||||
assert "stderr42" not in result.stderr.str()
|
assert "stderr42" not in result.stderr.str()
|
||||||
|
|
||||||
|
@ -212,13 +214,13 @@ class TestGeneralUsage:
|
||||||
sub2 = testdir.tmpdir.mkdir("sub2")
|
sub2 = testdir.tmpdir.mkdir("sub2")
|
||||||
sub1.join("conftest.py").write("assert 0")
|
sub1.join("conftest.py").write("assert 0")
|
||||||
result = testdir.runpytest(sub2)
|
result = testdir.runpytest(sub2)
|
||||||
assert result.ret == 0
|
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||||
sub2.ensure("__init__.py")
|
sub2.ensure("__init__.py")
|
||||||
p = sub2.ensure("test_hello.py")
|
p = sub2.ensure("test_hello.py")
|
||||||
result = testdir.runpytest(p)
|
result = testdir.runpytest(p)
|
||||||
assert result.ret == 0
|
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||||
result = testdir.runpytest(sub1)
|
result = testdir.runpytest(sub1)
|
||||||
assert result.ret != 0
|
assert result.ret == EXIT_USAGEERROR
|
||||||
|
|
||||||
def test_directory_skipped(self, testdir):
|
def test_directory_skipped(self, testdir):
|
||||||
testdir.makeconftest("""
|
testdir.makeconftest("""
|
||||||
|
@ -228,7 +230,7 @@ class TestGeneralUsage:
|
||||||
""")
|
""")
|
||||||
testdir.makepyfile("def test_hello(): pass")
|
testdir.makepyfile("def test_hello(): pass")
|
||||||
result = testdir.runpytest()
|
result = testdir.runpytest()
|
||||||
assert result.ret == 0
|
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||||
result.stdout.fnmatch_lines([
|
result.stdout.fnmatch_lines([
|
||||||
"*1 skipped*"
|
"*1 skipped*"
|
||||||
])
|
])
|
||||||
|
@ -479,7 +481,7 @@ class TestInvocationVariants:
|
||||||
|
|
||||||
def test_invoke_with_path(self, tmpdir, capsys):
|
def test_invoke_with_path(self, tmpdir, capsys):
|
||||||
retcode = pytest.main(tmpdir)
|
retcode = pytest.main(tmpdir)
|
||||||
assert not retcode
|
assert retcode == EXIT_NOTESTSCOLLECTED
|
||||||
out, err = capsys.readouterr()
|
out, err = capsys.readouterr()
|
||||||
|
|
||||||
def test_invoke_plugin_api(self, testdir, capsys):
|
def test_invoke_plugin_api(self, testdir, capsys):
|
||||||
|
@ -586,6 +588,11 @@ class TestInvocationVariants:
|
||||||
assert type(_pytest.config.get_plugin_manager()) is _pytest.config.PytestPluginManager
|
assert type(_pytest.config.get_plugin_manager()) is _pytest.config.PytestPluginManager
|
||||||
|
|
||||||
|
|
||||||
|
def test_has_plugin(self, request):
|
||||||
|
"""Test hasplugin function of the plugin manager (#932)."""
|
||||||
|
assert request.config.pluginmanager.hasplugin('python')
|
||||||
|
|
||||||
|
|
||||||
class TestDurations:
|
class TestDurations:
|
||||||
source = """
|
source = """
|
||||||
import time
|
import time
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
import sys
|
import sys
|
||||||
from textwrap import dedent
|
from textwrap import dedent
|
||||||
import pytest, py
|
import pytest, py
|
||||||
|
from _pytest.main import EXIT_NOTESTSCOLLECTED
|
||||||
|
|
||||||
|
|
||||||
class TestModule:
|
class TestModule:
|
||||||
def test_failing_import(self, testdir):
|
def test_failing_import(self, testdir):
|
||||||
|
@ -412,9 +414,19 @@ class TestFunction:
|
||||||
['overridden'])
|
['overridden'])
|
||||||
def test_overridden_via_param(value):
|
def test_overridden_via_param(value):
|
||||||
assert value == 'overridden'
|
assert value == 'overridden'
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('somevalue', ['overridden'])
|
||||||
|
def test_not_overridden(value, somevalue):
|
||||||
|
assert value == 'value'
|
||||||
|
assert somevalue == 'overridden'
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('other,value', [('foo', 'overridden')])
|
||||||
|
def test_overridden_via_multiparam(other, value):
|
||||||
|
assert other == 'foo'
|
||||||
|
assert value == 'overridden'
|
||||||
""")
|
""")
|
||||||
rec = testdir.inline_run()
|
rec = testdir.inline_run()
|
||||||
rec.assertoutcome(passed=1)
|
rec.assertoutcome(passed=3)
|
||||||
|
|
||||||
|
|
||||||
def test_parametrize_overrides_parametrized_fixture(self, testdir):
|
def test_parametrize_overrides_parametrized_fixture(self, testdir):
|
||||||
|
@ -896,7 +908,7 @@ def test_unorderable_types(testdir):
|
||||||
""")
|
""")
|
||||||
result = testdir.runpytest()
|
result = testdir.runpytest()
|
||||||
assert "TypeError" not in result.stdout.str()
|
assert "TypeError" not in result.stdout.str()
|
||||||
assert result.ret == 0
|
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||||
|
|
||||||
|
|
||||||
def test_collect_functools_partial(testdir):
|
def test_collect_functools_partial(testdir):
|
||||||
|
|
|
@ -1598,6 +1598,22 @@ class TestFixtureMarker:
|
||||||
reprec = testdir.inline_run()
|
reprec = testdir.inline_run()
|
||||||
reprec.assertoutcome(passed=4)
|
reprec.assertoutcome(passed=4)
|
||||||
|
|
||||||
|
def test_multiple_parametrization_issue_736(self, testdir):
|
||||||
|
testdir.makepyfile("""
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
@pytest.fixture(params=[1,2,3])
|
||||||
|
def foo(request):
|
||||||
|
return request.param
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('foobar', [4,5,6])
|
||||||
|
def test_issue(foo, foobar):
|
||||||
|
assert foo in [1,2,3]
|
||||||
|
assert foobar in [4,5,6]
|
||||||
|
""")
|
||||||
|
reprec = testdir.inline_run()
|
||||||
|
reprec.assertoutcome(passed=9)
|
||||||
|
|
||||||
def test_scope_session(self, testdir):
|
def test_scope_session(self, testdir):
|
||||||
testdir.makepyfile("""
|
testdir.makepyfile("""
|
||||||
import pytest
|
import pytest
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
import re
|
||||||
|
|
||||||
import pytest, py
|
import pytest, py
|
||||||
from _pytest import python as funcargs
|
from _pytest import python as funcargs
|
||||||
|
@ -138,6 +139,8 @@ class TestMetafunc:
|
||||||
("three", "three hundred"),
|
("three", "three hundred"),
|
||||||
(True, False),
|
(True, False),
|
||||||
(None, None),
|
(None, None),
|
||||||
|
(re.compile('foo'), re.compile('bar')),
|
||||||
|
(str, int),
|
||||||
(list("six"), [66, 66]),
|
(list("six"), [66, 66]),
|
||||||
(set([7]), set("seven")),
|
(set([7]), set("seven")),
|
||||||
(tuple("eight"), (8, -8, 8))
|
(tuple("eight"), (8, -8, 8))
|
||||||
|
@ -147,9 +150,18 @@ class TestMetafunc:
|
||||||
"three-three hundred",
|
"three-three hundred",
|
||||||
"True-False",
|
"True-False",
|
||||||
"None-None",
|
"None-None",
|
||||||
"a5-b5",
|
"foo-bar",
|
||||||
"a6-b6",
|
"str-int",
|
||||||
"a7-b7"]
|
"a7-b7",
|
||||||
|
"a8-b8",
|
||||||
|
"a9-b9"]
|
||||||
|
|
||||||
|
def test_idmaker_enum(self):
|
||||||
|
from _pytest.python import idmaker
|
||||||
|
enum = pytest.importorskip("enum")
|
||||||
|
e = enum.Enum("Foo", "one, two")
|
||||||
|
result = idmaker(("a", "b"), [(e.one, e.two)])
|
||||||
|
assert result == ["Foo.one-Foo.two"]
|
||||||
|
|
||||||
@pytest.mark.issue351
|
@pytest.mark.issue351
|
||||||
def test_idmaker_idfn(self):
|
def test_idmaker_idfn(self):
|
||||||
|
@ -214,12 +226,11 @@ class TestMetafunc:
|
||||||
metafunc = self.Metafunc(func)
|
metafunc = self.Metafunc(func)
|
||||||
metafunc.parametrize('x', [1], indirect=True)
|
metafunc.parametrize('x', [1], indirect=True)
|
||||||
metafunc.parametrize('y', [2,3], indirect=True)
|
metafunc.parametrize('y', [2,3], indirect=True)
|
||||||
metafunc.parametrize('unnamed', [1], indirect=True)
|
|
||||||
assert len(metafunc._calls) == 2
|
assert len(metafunc._calls) == 2
|
||||||
assert metafunc._calls[0].funcargs == {}
|
assert metafunc._calls[0].funcargs == {}
|
||||||
assert metafunc._calls[1].funcargs == {}
|
assert metafunc._calls[1].funcargs == {}
|
||||||
assert metafunc._calls[0].params == dict(x=1,y=2, unnamed=1)
|
assert metafunc._calls[0].params == dict(x=1,y=2)
|
||||||
assert metafunc._calls[1].params == dict(x=1,y=3, unnamed=1)
|
assert metafunc._calls[1].params == dict(x=1,y=3)
|
||||||
|
|
||||||
@pytest.mark.issue714
|
@pytest.mark.issue714
|
||||||
def test_parametrize_indirect_list(self):
|
def test_parametrize_indirect_list(self):
|
||||||
|
|
|
@ -12,6 +12,7 @@ if sys.platform.startswith("java"):
|
||||||
|
|
||||||
from _pytest.assertion import util
|
from _pytest.assertion import util
|
||||||
from _pytest.assertion.rewrite import rewrite_asserts, PYTEST_TAG
|
from _pytest.assertion.rewrite import rewrite_asserts, PYTEST_TAG
|
||||||
|
from _pytest.main import EXIT_NOTESTSCOLLECTED
|
||||||
|
|
||||||
|
|
||||||
def setup_module(mod):
|
def setup_module(mod):
|
||||||
|
@ -429,7 +430,7 @@ class TestRewriteOnImport:
|
||||||
import sys
|
import sys
|
||||||
sys.path.append(%r)
|
sys.path.append(%r)
|
||||||
import test_gum.test_lizard""" % (z_fn,))
|
import test_gum.test_lizard""" % (z_fn,))
|
||||||
assert testdir.runpytest().ret == 0
|
assert testdir.runpytest().ret == EXIT_NOTESTSCOLLECTED
|
||||||
|
|
||||||
def test_readonly(self, testdir):
|
def test_readonly(self, testdir):
|
||||||
sub = testdir.mkdir("testing")
|
sub = testdir.mkdir("testing")
|
||||||
|
@ -497,7 +498,7 @@ def test_rewritten():
|
||||||
pkg = testdir.mkdir('a_package_without_init_py')
|
pkg = testdir.mkdir('a_package_without_init_py')
|
||||||
pkg.join('module.py').ensure()
|
pkg.join('module.py').ensure()
|
||||||
testdir.makepyfile("import a_package_without_init_py.module")
|
testdir.makepyfile("import a_package_without_init_py.module")
|
||||||
assert testdir.runpytest().ret == 0
|
assert testdir.runpytest().ret == EXIT_NOTESTSCOLLECTED
|
||||||
|
|
||||||
class TestAssertionRewriteHookDetails(object):
|
class TestAssertionRewriteHookDetails(object):
|
||||||
def test_loader_is_package_false_for_module(self, testdir):
|
def test_loader_is_package_false_for_module(self, testdir):
|
||||||
|
|
|
@ -10,6 +10,7 @@ import contextlib
|
||||||
|
|
||||||
from _pytest import capture
|
from _pytest import capture
|
||||||
from _pytest.capture import CaptureManager
|
from _pytest.capture import CaptureManager
|
||||||
|
from _pytest.main import EXIT_NOTESTSCOLLECTED
|
||||||
from py.builtin import print_
|
from py.builtin import print_
|
||||||
|
|
||||||
needsosdup = pytest.mark.xfail("not hasattr(os, 'dup')")
|
needsosdup = pytest.mark.xfail("not hasattr(os, 'dup')")
|
||||||
|
@ -365,7 +366,7 @@ class TestLoggingInteraction:
|
||||||
""")
|
""")
|
||||||
# make sure that logging is still captured in tests
|
# make sure that logging is still captured in tests
|
||||||
result = testdir.runpytest_subprocess("-s", "-p", "no:capturelog")
|
result = testdir.runpytest_subprocess("-s", "-p", "no:capturelog")
|
||||||
assert result.ret == 0
|
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||||
result.stderr.fnmatch_lines([
|
result.stderr.fnmatch_lines([
|
||||||
"WARNING*hello435*",
|
"WARNING*hello435*",
|
||||||
])
|
])
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
import pytest, py
|
import pytest, py
|
||||||
|
|
||||||
from _pytest.main import Session
|
from _pytest.main import Session, EXIT_NOTESTSCOLLECTED
|
||||||
|
|
||||||
class TestCollector:
|
class TestCollector:
|
||||||
def test_collect_versus_item(self):
|
def test_collect_versus_item(self):
|
||||||
|
@ -247,10 +247,10 @@ class TestCustomConftests:
|
||||||
p = testdir.makepyfile("def test_hello(): pass")
|
p = testdir.makepyfile("def test_hello(): pass")
|
||||||
result = testdir.runpytest(p)
|
result = testdir.runpytest(p)
|
||||||
assert result.ret == 0
|
assert result.ret == 0
|
||||||
assert "1 passed" in result.stdout.str()
|
result.stdout.fnmatch_lines("*1 passed*")
|
||||||
result = testdir.runpytest()
|
result = testdir.runpytest()
|
||||||
assert result.ret == 0
|
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||||
assert "1 passed" not in result.stdout.str()
|
result.stdout.fnmatch_lines("*collected 0 items*")
|
||||||
|
|
||||||
def test_collectignore_exclude_on_option(self, testdir):
|
def test_collectignore_exclude_on_option(self, testdir):
|
||||||
testdir.makeconftest("""
|
testdir.makeconftest("""
|
||||||
|
@ -264,7 +264,7 @@ class TestCustomConftests:
|
||||||
testdir.mkdir("hello")
|
testdir.mkdir("hello")
|
||||||
testdir.makepyfile(test_world="def test_hello(): pass")
|
testdir.makepyfile(test_world="def test_hello(): pass")
|
||||||
result = testdir.runpytest()
|
result = testdir.runpytest()
|
||||||
assert result.ret == 0
|
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||||
assert "passed" not in result.stdout.str()
|
assert "passed" not in result.stdout.str()
|
||||||
result = testdir.runpytest("--XX")
|
result = testdir.runpytest("--XX")
|
||||||
assert result.ret == 0
|
assert result.ret == 0
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
import py, pytest
|
import py, pytest
|
||||||
|
|
||||||
from _pytest.config import getcfg, get_common_ancestor, determine_setup
|
from _pytest.config import getcfg, get_common_ancestor, determine_setup
|
||||||
|
from _pytest.main import EXIT_NOTESTSCOLLECTED
|
||||||
|
|
||||||
class TestParseIni:
|
class TestParseIni:
|
||||||
def test_getcfg_and_config(self, testdir, tmpdir):
|
def test_getcfg_and_config(self, testdir, tmpdir):
|
||||||
|
@ -343,7 +344,7 @@ def test_invalid_options_show_extra_information(testdir):
|
||||||
@pytest.mark.skipif("sys.platform == 'win32'")
|
@pytest.mark.skipif("sys.platform == 'win32'")
|
||||||
def test_toolongargs_issue224(testdir):
|
def test_toolongargs_issue224(testdir):
|
||||||
result = testdir.runpytest("-m", "hello" * 500)
|
result = testdir.runpytest("-m", "hello" * 500)
|
||||||
assert result.ret == 0
|
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||||
|
|
||||||
def test_notify_exception(testdir, capfd):
|
def test_notify_exception(testdir, capfd):
|
||||||
config = testdir.parseconfig()
|
config = testdir.parseconfig()
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
from textwrap import dedent
|
from textwrap import dedent
|
||||||
import py, pytest
|
import py, pytest
|
||||||
from _pytest.config import PytestPluginManager
|
from _pytest.config import PytestPluginManager
|
||||||
|
from _pytest.main import EXIT_NOTESTSCOLLECTED, EXIT_USAGEERROR
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module", params=["global", "inpackage"])
|
@pytest.fixture(scope="module", params=["global", "inpackage"])
|
||||||
|
@ -166,7 +167,10 @@ def test_conftest_confcutdir(testdir):
|
||||||
def test_no_conftest(testdir):
|
def test_no_conftest(testdir):
|
||||||
testdir.makeconftest("assert 0")
|
testdir.makeconftest("assert 0")
|
||||||
result = testdir.runpytest("--noconftest")
|
result = testdir.runpytest("--noconftest")
|
||||||
assert result.ret == 0
|
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||||
|
|
||||||
|
result = testdir.runpytest()
|
||||||
|
assert result.ret == EXIT_USAGEERROR
|
||||||
|
|
||||||
def test_conftest_existing_resultlog(testdir):
|
def test_conftest_existing_resultlog(testdir):
|
||||||
x = testdir.mkdir("tests")
|
x = testdir.mkdir("tests")
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
|
import sys
|
||||||
from _pytest.doctest import DoctestItem, DoctestModule, DoctestTextfile
|
from _pytest.doctest import DoctestItem, DoctestModule, DoctestTextfile
|
||||||
import py
|
import py
|
||||||
|
import pytest
|
||||||
|
|
||||||
class TestDoctests:
|
class TestDoctests:
|
||||||
|
|
||||||
|
@ -401,3 +403,46 @@ class TestDoctests:
|
||||||
result = testdir.runpytest("--doctest-modules")
|
result = testdir.runpytest("--doctest-modules")
|
||||||
result.stdout.fnmatch_lines('*2 passed*')
|
result.stdout.fnmatch_lines('*2 passed*')
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('config_mode', ['ini', 'comment'])
|
||||||
|
def test_allow_unicode(self, testdir, config_mode):
|
||||||
|
"""Test that doctests which output unicode work in all python versions
|
||||||
|
tested by pytest when the ALLOW_UNICODE option is used (either in
|
||||||
|
the ini file or by an inline comment).
|
||||||
|
"""
|
||||||
|
if config_mode == 'ini':
|
||||||
|
testdir.makeini('''
|
||||||
|
[pytest]
|
||||||
|
doctest_optionflags = ALLOW_UNICODE
|
||||||
|
''')
|
||||||
|
comment = ''
|
||||||
|
else:
|
||||||
|
comment = '#doctest: +ALLOW_UNICODE'
|
||||||
|
|
||||||
|
testdir.maketxtfile(test_doc="""
|
||||||
|
>>> b'12'.decode('ascii') {comment}
|
||||||
|
'12'
|
||||||
|
""".format(comment=comment))
|
||||||
|
testdir.makepyfile(foo="""
|
||||||
|
def foo():
|
||||||
|
'''
|
||||||
|
>>> b'12'.decode('ascii') {comment}
|
||||||
|
'12'
|
||||||
|
'''
|
||||||
|
""".format(comment=comment))
|
||||||
|
reprec = testdir.inline_run("--doctest-modules")
|
||||||
|
reprec.assertoutcome(passed=2)
|
||||||
|
|
||||||
|
def test_unicode_string(self, testdir):
|
||||||
|
"""Test that doctests which output unicode fail in Python 2 when
|
||||||
|
the ALLOW_UNICODE option is not used. The same test should pass
|
||||||
|
in Python 3.
|
||||||
|
"""
|
||||||
|
testdir.maketxtfile(test_doc="""
|
||||||
|
>>> b'12'.decode('ascii')
|
||||||
|
'12'
|
||||||
|
""")
|
||||||
|
reprec = testdir.inline_run()
|
||||||
|
passed = int(sys.version_info[0] >= 3)
|
||||||
|
reprec.assertoutcome(passed=passed, failed=int(not passed))
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
from _pytest.main import EXIT_NOTESTSCOLLECTED
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
def test_version(testdir, pytestconfig):
|
def test_version(testdir, pytestconfig):
|
||||||
|
@ -43,7 +44,7 @@ def test_hookvalidation_optional(testdir):
|
||||||
pass
|
pass
|
||||||
""")
|
""")
|
||||||
result = testdir.runpytest()
|
result = testdir.runpytest()
|
||||||
assert result.ret == 0
|
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||||
|
|
||||||
def test_traceconfig(testdir):
|
def test_traceconfig(testdir):
|
||||||
result = testdir.runpytest("--traceconfig")
|
result = testdir.runpytest("--traceconfig")
|
||||||
|
@ -54,14 +55,14 @@ def test_traceconfig(testdir):
|
||||||
|
|
||||||
def test_debug(testdir, monkeypatch):
|
def test_debug(testdir, monkeypatch):
|
||||||
result = testdir.runpytest_subprocess("--debug")
|
result = testdir.runpytest_subprocess("--debug")
|
||||||
assert result.ret == 0
|
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||||
p = testdir.tmpdir.join("pytestdebug.log")
|
p = testdir.tmpdir.join("pytestdebug.log")
|
||||||
assert "pytest_sessionstart" in p.read()
|
assert "pytest_sessionstart" in p.read()
|
||||||
|
|
||||||
def test_PYTEST_DEBUG(testdir, monkeypatch):
|
def test_PYTEST_DEBUG(testdir, monkeypatch):
|
||||||
monkeypatch.setenv("PYTEST_DEBUG", "1")
|
monkeypatch.setenv("PYTEST_DEBUG", "1")
|
||||||
result = testdir.runpytest_subprocess()
|
result = testdir.runpytest_subprocess()
|
||||||
assert result.ret == 0
|
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||||
result.stderr.fnmatch_lines([
|
result.stderr.fnmatch_lines([
|
||||||
"*pytest_plugin_registered*",
|
"*pytest_plugin_registered*",
|
||||||
"*manager*PluginManager*"
|
"*manager*PluginManager*"
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
from xml.dom import minidom
|
from xml.dom import minidom
|
||||||
|
from _pytest.main import EXIT_NOTESTSCOLLECTED
|
||||||
import py, sys, os
|
import py, sys, os
|
||||||
from _pytest.junitxml import LogXML
|
from _pytest.junitxml import LogXML
|
||||||
|
|
||||||
|
@ -298,7 +299,7 @@ class TestPython:
|
||||||
def test_collect_skipped(self, testdir):
|
def test_collect_skipped(self, testdir):
|
||||||
testdir.makepyfile("import pytest; pytest.skip('xyz')")
|
testdir.makepyfile("import pytest; pytest.skip('xyz')")
|
||||||
result, dom = runandparse(testdir)
|
result, dom = runandparse(testdir)
|
||||||
assert not result.ret
|
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||||
node = dom.getElementsByTagName("testsuite")[0]
|
node = dom.getElementsByTagName("testsuite")[0]
|
||||||
assert_attr(node, skips=1, tests=0)
|
assert_attr(node, skips=1, tests=0)
|
||||||
tnode = node.getElementsByTagName("testcase")[0]
|
tnode = node.getElementsByTagName("testcase")[0]
|
||||||
|
@ -552,4 +553,13 @@ def test_unicode_issue368(testdir):
|
||||||
log.append_skipped(report)
|
log.append_skipped(report)
|
||||||
log.pytest_sessionfinish()
|
log.pytest_sessionfinish()
|
||||||
|
|
||||||
|
def test_record_property(testdir):
|
||||||
|
testdir.makepyfile("""
|
||||||
|
def test_record(record_xml_property):
|
||||||
|
record_xml_property("foo", "<1");
|
||||||
|
""")
|
||||||
|
result, dom = runandparse(testdir, '-rw')
|
||||||
|
node = dom.getElementsByTagName("testsuite")[0]
|
||||||
|
tnode = node.getElementsByTagName("testcase")[0]
|
||||||
|
assert_attr(tnode, foo="<1")
|
||||||
|
result.stdout.fnmatch_lines('*C3*test_record_property.py*experimental*')
|
||||||
|
|
|
@ -347,3 +347,49 @@ def test_SkipTest_in_test(testdir):
|
||||||
""")
|
""")
|
||||||
reprec = testdir.inline_run()
|
reprec = testdir.inline_run()
|
||||||
reprec.assertoutcome(skipped=1)
|
reprec.assertoutcome(skipped=1)
|
||||||
|
|
||||||
|
def test_istest_function_decorator(testdir):
|
||||||
|
p = testdir.makepyfile("""
|
||||||
|
import nose.tools
|
||||||
|
@nose.tools.istest
|
||||||
|
def not_test_prefix():
|
||||||
|
pass
|
||||||
|
""")
|
||||||
|
result = testdir.runpytest(p)
|
||||||
|
result.assert_outcomes(passed=1)
|
||||||
|
|
||||||
|
def test_nottest_function_decorator(testdir):
|
||||||
|
testdir.makepyfile("""
|
||||||
|
import nose.tools
|
||||||
|
@nose.tools.nottest
|
||||||
|
def test_prefix():
|
||||||
|
pass
|
||||||
|
""")
|
||||||
|
reprec = testdir.inline_run()
|
||||||
|
assert not reprec.getfailedcollections()
|
||||||
|
calls = reprec.getreports("pytest_runtest_logreport")
|
||||||
|
assert not calls
|
||||||
|
|
||||||
|
def test_istest_class_decorator(testdir):
|
||||||
|
p = testdir.makepyfile("""
|
||||||
|
import nose.tools
|
||||||
|
@nose.tools.istest
|
||||||
|
class NotTestPrefix:
|
||||||
|
def test_method(self):
|
||||||
|
pass
|
||||||
|
""")
|
||||||
|
result = testdir.runpytest(p)
|
||||||
|
result.assert_outcomes(passed=1)
|
||||||
|
|
||||||
|
def test_nottest_class_decorator(testdir):
|
||||||
|
testdir.makepyfile("""
|
||||||
|
import nose.tools
|
||||||
|
@nose.tools.nottest
|
||||||
|
class TestPrefix:
|
||||||
|
def test_method(self):
|
||||||
|
pass
|
||||||
|
""")
|
||||||
|
reprec = testdir.inline_run()
|
||||||
|
assert not reprec.getfailedcollections()
|
||||||
|
calls = reprec.getreports("pytest_runtest_logreport")
|
||||||
|
assert not calls
|
||||||
|
|
|
@ -3,6 +3,7 @@ import py
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from _pytest.config import get_config, PytestPluginManager
|
from _pytest.config import get_config, PytestPluginManager
|
||||||
|
from _pytest.main import EXIT_NOTESTSCOLLECTED
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def pytestpm():
|
def pytestpm():
|
||||||
|
@ -223,7 +224,7 @@ class TestPytestPluginManager:
|
||||||
p.copy(p.dirpath("skipping2.py"))
|
p.copy(p.dirpath("skipping2.py"))
|
||||||
monkeypatch.setenv("PYTEST_PLUGINS", "skipping2")
|
monkeypatch.setenv("PYTEST_PLUGINS", "skipping2")
|
||||||
result = testdir.runpytest("-rw", "-p", "skipping1", syspathinsert=True)
|
result = testdir.runpytest("-rw", "-p", "skipping1", syspathinsert=True)
|
||||||
assert result.ret == 0
|
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||||
result.stdout.fnmatch_lines([
|
result.stdout.fnmatch_lines([
|
||||||
"WI1*skipped plugin*skipping1*hello*",
|
"WI1*skipped plugin*skipping1*hello*",
|
||||||
"WI1*skipped plugin*skipping2*hello*",
|
"WI1*skipped plugin*skipping2*hello*",
|
||||||
|
|
|
@ -293,8 +293,8 @@ class TestExecutionForked(BaseFunctionalTests):
|
||||||
|
|
||||||
def getrunner(self):
|
def getrunner(self):
|
||||||
# XXX re-arrange this test to live in pytest-xdist
|
# XXX re-arrange this test to live in pytest-xdist
|
||||||
xplugin = pytest.importorskip("xdist.plugin")
|
boxed = pytest.importorskip("xdist.boxed")
|
||||||
return xplugin.forked_run_report
|
return boxed.forked_run_report
|
||||||
|
|
||||||
def test_suicide(self, testdir):
|
def test_suicide(self, testdir):
|
||||||
reports = testdir.runitem("""
|
reports = testdir.runitem("""
|
||||||
|
@ -431,6 +431,27 @@ def test_pytest_fail_notrace(testdir):
|
||||||
])
|
])
|
||||||
assert 'def teardown_function' not in result.stdout.str()
|
assert 'def teardown_function' not in result.stdout.str()
|
||||||
|
|
||||||
|
|
||||||
|
def test_pytest_no_tests_collected_exit_status(testdir):
|
||||||
|
result = testdir.runpytest()
|
||||||
|
result.stdout.fnmatch_lines('*collected 0 items*')
|
||||||
|
assert result.ret == main.EXIT_NOTESTSCOLLECTED
|
||||||
|
|
||||||
|
testdir.makepyfile(test_foo="""
|
||||||
|
def test_foo():
|
||||||
|
assert 1
|
||||||
|
""")
|
||||||
|
result = testdir.runpytest()
|
||||||
|
result.stdout.fnmatch_lines('*collected 1 items*')
|
||||||
|
result.stdout.fnmatch_lines('*1 passed*')
|
||||||
|
assert result.ret == main.EXIT_OK
|
||||||
|
|
||||||
|
result = testdir.runpytest('-k nonmatch')
|
||||||
|
result.stdout.fnmatch_lines('*collected 1 items*')
|
||||||
|
result.stdout.fnmatch_lines('*1 deselected*')
|
||||||
|
assert result.ret == main.EXIT_NOTESTSCOLLECTED
|
||||||
|
|
||||||
|
|
||||||
def test_exception_printing_skip():
|
def test_exception_printing_skip():
|
||||||
try:
|
try:
|
||||||
pytest.skip("hello")
|
pytest.skip("hello")
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
|
from _pytest.main import EXIT_NOTESTSCOLLECTED
|
||||||
|
|
||||||
class SessionTests:
|
class SessionTests:
|
||||||
def test_basic_testitem_events(self, testdir):
|
def test_basic_testitem_events(self, testdir):
|
||||||
tfile = testdir.makepyfile("""
|
tfile = testdir.makepyfile("""
|
||||||
|
@ -239,4 +241,4 @@ def test_sessionfinish_with_start(testdir):
|
||||||
|
|
||||||
""")
|
""")
|
||||||
res = testdir.runpytest("--collect-only")
|
res = testdir.runpytest("--collect-only")
|
||||||
assert res.ret == 0
|
assert res.ret == EXIT_NOTESTSCOLLECTED
|
||||||
|
|
|
@ -1,19 +1,24 @@
|
||||||
"""
|
"""
|
||||||
terminal reporting of the full testing process.
|
terminal reporting of the full testing process.
|
||||||
"""
|
"""
|
||||||
|
import collections
|
||||||
import pytest
|
import pytest
|
||||||
import py
|
import py
|
||||||
import pluggy
|
import pluggy
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
from _pytest.main import EXIT_NOTESTSCOLLECTED
|
||||||
from _pytest.terminal import TerminalReporter, repr_pythonversion, getreportopt
|
from _pytest.terminal import TerminalReporter, repr_pythonversion, getreportopt
|
||||||
from _pytest.terminal import build_summary_stats_line
|
from _pytest.terminal import build_summary_stats_line, _plugin_nameversions
|
||||||
from _pytest import runner
|
from _pytest import runner
|
||||||
|
|
||||||
def basic_run_report(item):
|
def basic_run_report(item):
|
||||||
runner.call_and_report(item, "setup", log=False)
|
runner.call_and_report(item, "setup", log=False)
|
||||||
return runner.call_and_report(item, "call", log=False)
|
return runner.call_and_report(item, "call", log=False)
|
||||||
|
|
||||||
|
DistInfo = collections.namedtuple('DistInfo', ['project_name', 'version'])
|
||||||
|
|
||||||
|
|
||||||
class Option:
|
class Option:
|
||||||
def __init__(self, verbose=False, fulltrace=False):
|
def __init__(self, verbose=False, fulltrace=False):
|
||||||
self.verbose = verbose
|
self.verbose = verbose
|
||||||
|
@ -40,6 +45,21 @@ def pytest_generate_tests(metafunc):
|
||||||
funcargs={'option': Option(fulltrace=True)})
|
funcargs={'option': Option(fulltrace=True)})
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('input,expected', [
|
||||||
|
([DistInfo(project_name='test', version=1)], ['test-1']),
|
||||||
|
([DistInfo(project_name='pytest-test', version=1)], ['test-1']),
|
||||||
|
([
|
||||||
|
DistInfo(project_name='test', version=1),
|
||||||
|
DistInfo(project_name='test', version=1)
|
||||||
|
], ['test-1']),
|
||||||
|
], ids=['normal', 'prefix-strip', 'deduplicate'])
|
||||||
|
|
||||||
|
def test_plugin_nameversion(input, expected):
|
||||||
|
pluginlist = [(None, x) for x in input]
|
||||||
|
result = _plugin_nameversions(pluginlist)
|
||||||
|
assert result == expected
|
||||||
|
|
||||||
|
|
||||||
class TestTerminal:
|
class TestTerminal:
|
||||||
def test_pass_skip_fail(self, testdir, option):
|
def test_pass_skip_fail(self, testdir, option):
|
||||||
testdir.makepyfile("""
|
testdir.makepyfile("""
|
||||||
|
@ -577,7 +597,7 @@ def test_traceconfig(testdir, monkeypatch):
|
||||||
result.stdout.fnmatch_lines([
|
result.stdout.fnmatch_lines([
|
||||||
"*active plugins*"
|
"*active plugins*"
|
||||||
])
|
])
|
||||||
assert result.ret == 0
|
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||||
|
|
||||||
|
|
||||||
class TestGenericReporting:
|
class TestGenericReporting:
|
||||||
|
@ -783,4 +803,3 @@ def test_summary_stats(exp_line, exp_color, stats_arg):
|
||||||
print("Actually got: \"%s\"; with color \"%s\"" % (line, color))
|
print("Actually got: \"%s\"; with color \"%s\"" % (line, color))
|
||||||
assert line == exp_line
|
assert line == exp_line
|
||||||
assert color == exp_color
|
assert color == exp_color
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
from _pytest.main import EXIT_NOTESTSCOLLECTED
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
def test_simple_unittest(testdir):
|
def test_simple_unittest(testdir):
|
||||||
|
@ -41,7 +42,7 @@ def test_isclasscheck_issue53(testdir):
|
||||||
E = _E()
|
E = _E()
|
||||||
""")
|
""")
|
||||||
result = testdir.runpytest(testpath)
|
result = testdir.runpytest(testpath)
|
||||||
assert result.ret == 0
|
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||||
|
|
||||||
def test_setup(testdir):
|
def test_setup(testdir):
|
||||||
testpath = testdir.makepyfile("""
|
testpath = testdir.makepyfile("""
|
||||||
|
@ -572,7 +573,7 @@ def test_unorderable_types(testdir):
|
||||||
""")
|
""")
|
||||||
result = testdir.runpytest()
|
result = testdir.runpytest()
|
||||||
assert "TypeError" not in result.stdout.str()
|
assert "TypeError" not in result.stdout.str()
|
||||||
assert result.ret == 0
|
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||||
|
|
||||||
def test_unittest_typerror_traceback(testdir):
|
def test_unittest_typerror_traceback(testdir):
|
||||||
testdir.makepyfile("""
|
testdir.makepyfile("""
|
||||||
|
|
6
tox.ini
6
tox.ini
|
@ -22,7 +22,7 @@ deps=
|
||||||
[testenv:py27-subprocess]
|
[testenv:py27-subprocess]
|
||||||
changedir=.
|
changedir=.
|
||||||
basepython=python2.7
|
basepython=python2.7
|
||||||
deps=pytest-xdist
|
deps=pytest-xdist>=1.13
|
||||||
mock
|
mock
|
||||||
nose
|
nose
|
||||||
commands=
|
commands=
|
||||||
|
@ -37,7 +37,7 @@ deps = pytest-flakes>=0.2
|
||||||
commands = py.test --flakes -m flakes _pytest testing
|
commands = py.test --flakes -m flakes _pytest testing
|
||||||
|
|
||||||
[testenv:py27-xdist]
|
[testenv:py27-xdist]
|
||||||
deps=pytest-xdist
|
deps=pytest-xdist>=1.13
|
||||||
mock
|
mock
|
||||||
nose
|
nose
|
||||||
commands=
|
commands=
|
||||||
|
@ -63,7 +63,7 @@ commands=
|
||||||
py.test -rfsxX test_pdb.py test_terminal.py test_unittest.py
|
py.test -rfsxX test_pdb.py test_terminal.py test_unittest.py
|
||||||
|
|
||||||
[testenv:py27-nobyte]
|
[testenv:py27-nobyte]
|
||||||
deps=pytest-xdist
|
deps=pytest-xdist>=1.13
|
||||||
distribute=true
|
distribute=true
|
||||||
setenv=
|
setenv=
|
||||||
PYTHONDONTWRITEBYTECODE=1
|
PYTHONDONTWRITEBYTECODE=1
|
||||||
|
|
Loading…
Reference in New Issue