Merge branch 'fix-flake8-issues' into features
This commit is contained in:
commit
4fd92ef9ba
1
AUTHORS
1
AUTHORS
|
@ -9,6 +9,7 @@ Ahn Ki-Wook
|
|||
Alexander Johnson
|
||||
Alexei Kozlenok
|
||||
Anatoly Bubenkoff
|
||||
Andras Tim
|
||||
Andreas Zeidler
|
||||
Andrzej Ostrowski
|
||||
Andy Freeland
|
||||
|
|
|
@ -62,14 +62,16 @@ import sys
|
|||
import os
|
||||
from glob import glob
|
||||
|
||||
|
||||
class FastFilesCompleter:
|
||||
'Fast file completer class'
|
||||
|
||||
def __init__(self, directories=True):
|
||||
self.directories = directories
|
||||
|
||||
def __call__(self, prefix, **kwargs):
|
||||
"""only called on non option completions"""
|
||||
if os.path.sep in prefix[1:]: #
|
||||
if os.path.sep in prefix[1:]:
|
||||
prefix_dir = len(os.path.dirname(prefix) + os.path.sep)
|
||||
else:
|
||||
prefix_dir = 0
|
||||
|
@ -98,5 +100,6 @@ if os.environ.get('_ARGCOMPLETE'):
|
|||
def try_argcomplete(parser):
|
||||
argcomplete.autocomplete(parser)
|
||||
else:
|
||||
def try_argcomplete(parser): pass
|
||||
def try_argcomplete(parser):
|
||||
pass
|
||||
filescompleter = None
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
from __future__ import absolute_import, division, print_function
|
||||
import types
|
||||
|
||||
|
||||
def format_exception_only(etype, value):
|
||||
"""Format the exception part of a traceback.
|
||||
|
||||
|
@ -62,6 +63,7 @@ def format_exception_only(etype, value):
|
|||
lines.append(_format_final_exc_line(stype, value))
|
||||
return lines
|
||||
|
||||
|
||||
def _format_final_exc_line(etype, value):
|
||||
"""Return a list of a single line -- normal case for format_exception_only"""
|
||||
valuestr = _some_str(value)
|
||||
|
@ -71,6 +73,7 @@ def _format_final_exc_line(etype, value):
|
|||
line = "%s: %s\n" % (etype, valuestr)
|
||||
return line
|
||||
|
||||
|
||||
def _some_str(value):
|
||||
try:
|
||||
return unicode(value)
|
||||
|
|
|
@ -18,6 +18,7 @@ else:
|
|||
|
||||
class Code(object):
|
||||
""" wrapper around Python code objects """
|
||||
|
||||
def __init__(self, rawcode):
|
||||
if not hasattr(rawcode, "co_filename"):
|
||||
rawcode = getrawcode(rawcode)
|
||||
|
@ -82,6 +83,7 @@ class Code(object):
|
|||
argcount += raw.co_flags & CO_VARKEYWORDS
|
||||
return raw.co_varnames[:argcount]
|
||||
|
||||
|
||||
class Frame(object):
|
||||
"""Wrapper around a Python frame holding f_locals and f_globals
|
||||
in which expressions can be evaluated."""
|
||||
|
@ -143,6 +145,7 @@ class Frame(object):
|
|||
pass # this can occur when using Psyco
|
||||
return retval
|
||||
|
||||
|
||||
class TracebackEntry(object):
|
||||
""" a single entry in a traceback """
|
||||
|
||||
|
@ -255,11 +258,13 @@ class TracebackEntry(object):
|
|||
return self.frame.code.raw.co_name
|
||||
name = property(name, None, None, "co_name of underlaying code")
|
||||
|
||||
|
||||
class Traceback(list):
|
||||
""" Traceback objects encapsulate and offer higher level
|
||||
access to Traceback entries.
|
||||
"""
|
||||
Entry = TracebackEntry
|
||||
|
||||
def __init__(self, tb, excinfo=None):
|
||||
""" initialize from given python traceback object and ExceptionInfo """
|
||||
self._excinfo = excinfo
|
||||
|
@ -349,6 +354,7 @@ class Traceback(list):
|
|||
co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2',
|
||||
'?', 'eval')
|
||||
|
||||
|
||||
class ExceptionInfo(object):
|
||||
""" wraps sys.exc_info() objects and offers
|
||||
help for navigating the traceback.
|
||||
|
@ -743,6 +749,7 @@ class ReprExceptionInfo(ExceptionRepr):
|
|||
self.reprtraceback.toterminal(tw)
|
||||
super(ReprExceptionInfo, self).toterminal(tw)
|
||||
|
||||
|
||||
class ReprTraceback(TerminalRepr):
|
||||
entrysep = "_ "
|
||||
|
||||
|
@ -766,12 +773,14 @@ class ReprTraceback(TerminalRepr):
|
|||
if self.extraline:
|
||||
tw.line(self.extraline)
|
||||
|
||||
|
||||
class ReprTracebackNative(ReprTraceback):
|
||||
def __init__(self, tblines):
|
||||
self.style = "native"
|
||||
self.reprentries = [ReprEntryNative(tblines)]
|
||||
self.extraline = None
|
||||
|
||||
|
||||
class ReprEntryNative(TerminalRepr):
|
||||
style = "native"
|
||||
|
||||
|
@ -781,6 +790,7 @@ class ReprEntryNative(TerminalRepr):
|
|||
def toterminal(self, tw):
|
||||
tw.write("".join(self.lines))
|
||||
|
||||
|
||||
class ReprEntry(TerminalRepr):
|
||||
localssep = "_ "
|
||||
|
||||
|
@ -818,6 +828,7 @@ class ReprEntry(TerminalRepr):
|
|||
self.reprlocals,
|
||||
self.reprfileloc)
|
||||
|
||||
|
||||
class ReprFileLocation(TerminalRepr):
|
||||
def __init__(self, path, lineno, message):
|
||||
self.path = str(path)
|
||||
|
@ -834,6 +845,7 @@ class ReprFileLocation(TerminalRepr):
|
|||
tw.write(self.path, bold=True, red=True)
|
||||
tw.line(":%s: %s" % (self.lineno, msg))
|
||||
|
||||
|
||||
class ReprLocals(TerminalRepr):
|
||||
def __init__(self, lines):
|
||||
self.lines = lines
|
||||
|
@ -842,6 +854,7 @@ class ReprLocals(TerminalRepr):
|
|||
for line in self.lines:
|
||||
tw.line(line)
|
||||
|
||||
|
||||
class ReprFuncArgs(TerminalRepr):
|
||||
def __init__(self, args):
|
||||
self.args = args
|
||||
|
|
|
@ -2,7 +2,8 @@ from __future__ import absolute_import, division, generators, print_function
|
|||
|
||||
from bisect import bisect_right
|
||||
import sys
|
||||
import inspect, tokenize
|
||||
import inspect
|
||||
import tokenize
|
||||
import py
|
||||
cpy_compile = compile
|
||||
|
||||
|
@ -19,6 +20,7 @@ class Source(object):
|
|||
possibly deindenting it.
|
||||
"""
|
||||
_compilecounter = 0
|
||||
|
||||
def __init__(self, *parts, **kwargs):
|
||||
self.lines = lines = []
|
||||
de = kwargs.get('deindent', True)
|
||||
|
@ -134,7 +136,8 @@ class Source(object):
|
|||
try:
|
||||
import parser
|
||||
except ImportError:
|
||||
syntax_checker = lambda x: compile(x, 'asd', 'exec')
|
||||
def syntax_checker(x):
|
||||
return compile(x, 'asd', 'exec')
|
||||
else:
|
||||
syntax_checker = parser.suite
|
||||
|
||||
|
@ -198,8 +201,8 @@ class Source(object):
|
|||
# public API shortcut functions
|
||||
#
|
||||
|
||||
def compile_(source, filename=None, mode='exec', flags=
|
||||
generators.compiler_flag, dont_inherit=0):
|
||||
|
||||
def compile_(source, filename=None, mode='exec', flags=generators.compiler_flag, dont_inherit=0):
|
||||
""" compile the given source to a raw code object,
|
||||
and maintain an internal cache which allows later
|
||||
retrieval of the source code for the code object
|
||||
|
@ -245,6 +248,7 @@ def getfslineno(obj):
|
|||
# helper functions
|
||||
#
|
||||
|
||||
|
||||
def findsource(obj):
|
||||
try:
|
||||
sourcelines, lineno = py.std.inspect.findsource(obj)
|
||||
|
@ -410,5 +414,3 @@ def getstatementrange_old(lineno, source, assertion=False):
|
|||
if trysource.isparseable():
|
||||
return start, end
|
||||
raise SyntaxError("no valid source range around line %d " % (lineno,))
|
||||
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@ def pytest_addoption(parser):
|
|||
expression information.""")
|
||||
|
||||
|
||||
|
||||
def register_assert_rewrite(*names):
|
||||
"""Register one or more module names to be rewritten on import.
|
||||
|
||||
|
|
|
@ -39,7 +39,8 @@ ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3
|
|||
if sys.version_info >= (3, 5):
|
||||
ast_Call = ast.Call
|
||||
else:
|
||||
ast_Call = lambda a,b,c: ast.Call(a, b, c, None, None)
|
||||
def ast_Call(a, b, c):
|
||||
return ast.Call(a, b, c, None, None)
|
||||
|
||||
|
||||
class AssertionRewritingHook(object):
|
||||
|
@ -215,8 +216,6 @@ class AssertionRewritingHook(object):
|
|||
raise
|
||||
return sys.modules[name]
|
||||
|
||||
|
||||
|
||||
def is_package(self, name):
|
||||
try:
|
||||
fd, fn, desc = imp.find_module(name)
|
||||
|
@ -283,6 +282,7 @@ N = "\n".encode("utf-8")
|
|||
cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+")
|
||||
BOM_UTF8 = '\xef\xbb\xbf'
|
||||
|
||||
|
||||
def _rewrite_test(config, fn):
|
||||
"""Try to read and rewrite *fn* and return the code object."""
|
||||
state = config._assertstate
|
||||
|
@ -340,6 +340,7 @@ def _rewrite_test(config, fn):
|
|||
return None, None
|
||||
return stat, co
|
||||
|
||||
|
||||
def _make_rewritten_pyc(state, source_stat, pyc, co):
|
||||
"""Try to dump rewritten code to *pyc*."""
|
||||
if sys.platform.startswith("win"):
|
||||
|
@ -353,6 +354,7 @@ def _make_rewritten_pyc(state, source_stat, pyc, co):
|
|||
if _write_pyc(state, co, source_stat, proc_pyc):
|
||||
os.rename(proc_pyc, pyc)
|
||||
|
||||
|
||||
def _read_pyc(source, pyc, trace=lambda x: None):
|
||||
"""Possibly read a pytest pyc containing rewritten code.
|
||||
|
||||
|
@ -412,6 +414,7 @@ def _saferepr(obj):
|
|||
|
||||
from _pytest.assertion.util import format_explanation as _format_explanation # noqa
|
||||
|
||||
|
||||
def _format_assertmsg(obj):
|
||||
"""Format the custom assertion message given.
|
||||
|
||||
|
@ -439,9 +442,11 @@ def _format_assertmsg(obj):
|
|||
s = s.replace(t("\\n"), t("\n~"))
|
||||
return s
|
||||
|
||||
|
||||
def _should_repr_global_name(obj):
|
||||
return not hasattr(obj, "__name__") and not py.builtin.callable(obj)
|
||||
|
||||
|
||||
def _format_boolop(explanations, is_or):
|
||||
explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")"
|
||||
if py.builtin._istext(explanation):
|
||||
|
@ -450,6 +455,7 @@ def _format_boolop(explanations, is_or):
|
|||
t = py.builtin.bytes
|
||||
return explanation.replace(t('%'), t('%%'))
|
||||
|
||||
|
||||
def _call_reprcompare(ops, results, expls, each_obj):
|
||||
for i, res, expl in zip(range(len(ops)), results, expls):
|
||||
try:
|
||||
|
@ -839,7 +845,7 @@ class AssertionRewriter(ast.NodeVisitor):
|
|||
new_kwargs.append(ast.keyword(keyword.arg, res))
|
||||
if keyword.arg:
|
||||
arg_expls.append(keyword.arg + "=" + expl)
|
||||
else: ## **args have `arg` keywords with an .arg of None
|
||||
else: # **args have `arg` keywords with an .arg of None
|
||||
arg_expls.append("**" + expl)
|
||||
|
||||
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
|
||||
|
@ -893,7 +899,6 @@ class AssertionRewriter(ast.NodeVisitor):
|
|||
else:
|
||||
visit_Call = visit_Call_legacy
|
||||
|
||||
|
||||
def visit_Attribute(self, attr):
|
||||
if not isinstance(attr.ctx, ast.Load):
|
||||
return self.generic_visit(attr)
|
||||
|
|
|
@ -111,11 +111,17 @@ def assertrepr_compare(config, op, left, right):
|
|||
|
||||
summary = u('%s %s %s') % (ecu(left_repr), op, ecu(right_repr))
|
||||
|
||||
issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) and
|
||||
not isinstance(x, basestring))
|
||||
istext = lambda x: isinstance(x, basestring)
|
||||
isdict = lambda x: isinstance(x, dict)
|
||||
isset = lambda x: isinstance(x, (set, frozenset))
|
||||
def issequence(x):
|
||||
return (isinstance(x, (list, tuple, Sequence)) and not isinstance(x, basestring))
|
||||
|
||||
def istext(x):
|
||||
return isinstance(x, basestring)
|
||||
|
||||
def isdict(x):
|
||||
return isinstance(x, dict)
|
||||
|
||||
def isset(x):
|
||||
return isinstance(x, (set, frozenset))
|
||||
|
||||
def isiterable(obj):
|
||||
try:
|
||||
|
|
|
@ -100,6 +100,7 @@ class Cache(object):
|
|||
|
||||
class LFPlugin:
|
||||
""" Plugin which implements the --lf (run last-failing) option """
|
||||
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
active_keys = 'lf', 'failedfirst'
|
||||
|
@ -193,7 +194,6 @@ def pytest_cmdline_main(config):
|
|||
return wrap_session(config, cacheshow)
|
||||
|
||||
|
||||
|
||||
@pytest.hookimpl(tryfirst=True)
|
||||
def pytest_configure(config):
|
||||
config.cache = Cache(config)
|
||||
|
|
|
@ -171,6 +171,7 @@ def capsys(request):
|
|||
request.node._capfuncarg = c = CaptureFixture(SysCapture, request)
|
||||
return c
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def capfd(request):
|
||||
"""Enable capturing of writes to file descriptors 1 and 2 and make
|
||||
|
@ -238,6 +239,7 @@ def safe_text_dupfile(f, mode, default_encoding="UTF8"):
|
|||
|
||||
class EncodedFile(object):
|
||||
errors = "strict" # possibly needed by py3 code (issue555)
|
||||
|
||||
def __init__(self, buffer, encoding):
|
||||
self.buffer = buffer
|
||||
self.encoding = encoding
|
||||
|
@ -318,9 +320,11 @@ class MultiCapture(object):
|
|||
return (self.out.snap() if self.out is not None else "",
|
||||
self.err.snap() if self.err is not None else "")
|
||||
|
||||
|
||||
class NoCapture:
|
||||
__init__ = start = done = suspend = resume = lambda *args: None
|
||||
|
||||
|
||||
class FDCapture:
|
||||
""" Capture IO to/from a given os-level filedescriptor. """
|
||||
|
||||
|
|
|
@ -13,7 +13,6 @@ import py
|
|||
import _pytest
|
||||
|
||||
|
||||
|
||||
try:
|
||||
import enum
|
||||
except ImportError: # pragma: no cover
|
||||
|
@ -111,7 +110,6 @@ def getfuncargnames(function, startindex=None):
|
|||
return tuple(argnames[startindex:])
|
||||
|
||||
|
||||
|
||||
if sys.version_info[:2] == (2, 6):
|
||||
def isclass(object):
|
||||
""" Return true if the object is a class. Overrides inspect.isclass for
|
||||
|
@ -298,6 +296,7 @@ else:
|
|||
def getvalue(self):
|
||||
return self.buffer.getvalue().decode('UTF-8')
|
||||
|
||||
|
||||
class FuncargnamesCompatAttr(object):
|
||||
""" helper class so that Metafunc, Function and FixtureRequest
|
||||
don't need to each define the "funcargnames" compatibility attribute.
|
||||
|
|
|
@ -63,6 +63,7 @@ def main(args=None, plugins=None):
|
|||
sys.stderr.write("ERROR: %s\n" % (msg,))
|
||||
return 4
|
||||
|
||||
|
||||
class cmdline: # compatibility namespace
|
||||
main = staticmethod(main)
|
||||
|
||||
|
@ -116,6 +117,7 @@ def _preloadplugins():
|
|||
assert not _preinit
|
||||
_preinit.append(get_config())
|
||||
|
||||
|
||||
def get_config():
|
||||
if _preinit:
|
||||
return _preinit.pop(0)
|
||||
|
@ -126,6 +128,7 @@ def get_config():
|
|||
pluginmanager.import_plugin(spec)
|
||||
return config
|
||||
|
||||
|
||||
def get_plugin_manager():
|
||||
"""
|
||||
Obtain a new instance of the
|
||||
|
@ -137,6 +140,7 @@ def get_plugin_manager():
|
|||
"""
|
||||
return get_config().pluginmanager
|
||||
|
||||
|
||||
def _prepareconfig(args=None, plugins=None):
|
||||
warning = None
|
||||
if args is None:
|
||||
|
@ -176,6 +180,7 @@ class PytestPluginManager(PluginManager):
|
|||
``pytest_plugins`` global variables found in plugins being loaded;
|
||||
* ``conftest.py`` loading during start-up;
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super(PytestPluginManager, self).__init__("pytest", implprefix="pytest_")
|
||||
self._conftest_plugins = set()
|
||||
|
@ -206,7 +211,8 @@ class PytestPluginManager(PluginManager):
|
|||
"""
|
||||
.. deprecated:: 2.8
|
||||
|
||||
Use :py:meth:`pluggy.PluginManager.add_hookspecs <_pytest.vendored_packages.pluggy.PluginManager.add_hookspecs>` instead.
|
||||
Use :py:meth:`pluggy.PluginManager.add_hookspecs <_pytest.vendored_packages.pluggy.PluginManager.add_hookspecs>`
|
||||
instead.
|
||||
"""
|
||||
warning = dict(code="I2",
|
||||
fslocation=_pytest._code.getfslineno(sys._getframe(1)),
|
||||
|
@ -805,6 +811,7 @@ class DropShorterLongHelpFormatter(argparse.HelpFormatter):
|
|||
- shortcut if there are only two options and one of them is a short one
|
||||
- cache result on action object as this is called at least 2 times
|
||||
"""
|
||||
|
||||
def _format_action_invocation(self, action):
|
||||
orgstr = argparse.HelpFormatter._format_action_invocation(self, action)
|
||||
if orgstr and orgstr[0] != '-': # only optional arguments
|
||||
|
@ -836,7 +843,7 @@ class DropShorterLongHelpFormatter(argparse.HelpFormatter):
|
|||
short_long[shortened] = xxoption
|
||||
# now short_long has been filled out to the longest with dashes
|
||||
# **and** we keep the right option ordering from add_argument
|
||||
for option in options: #
|
||||
for option in options:
|
||||
if len(option) == 2 or option[2] == ' ':
|
||||
return_list.append(option)
|
||||
if option[2:] == short_long.get(option.replace('-', '')):
|
||||
|
@ -845,22 +852,26 @@ class DropShorterLongHelpFormatter(argparse.HelpFormatter):
|
|||
return action._formatted_action_invocation
|
||||
|
||||
|
||||
|
||||
def _ensure_removed_sysmodule(modname):
|
||||
try:
|
||||
del sys.modules[modname]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
|
||||
class CmdOptions(object):
|
||||
""" holds cmdline options as attributes."""
|
||||
|
||||
def __init__(self, values=()):
|
||||
self.__dict__.update(values)
|
||||
|
||||
def __repr__(self):
|
||||
return "<CmdOptions %r>" % (self.__dict__,)
|
||||
|
||||
def copy(self):
|
||||
return CmdOptions(self.__dict__)
|
||||
|
||||
|
||||
class Notset:
|
||||
def __repr__(self):
|
||||
return "<NOTSET>"
|
||||
|
@ -1230,12 +1241,14 @@ class Config(object):
|
|||
""" (deprecated, use getoption(skip=True)) """
|
||||
return self.getoption(name, skip=True)
|
||||
|
||||
|
||||
def exists(path, ignore=EnvironmentError):
|
||||
try:
|
||||
return path.check()
|
||||
except ignore:
|
||||
return False
|
||||
|
||||
|
||||
def getcfg(args, warnfunc=None):
|
||||
"""
|
||||
Search the list of arguments for a valid ini-file for pytest,
|
||||
|
|
|
@ -4,7 +4,6 @@ import pdb
|
|||
import sys
|
||||
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("general")
|
||||
group._addoption(
|
||||
|
@ -40,6 +39,7 @@ def pytest_configure(config):
|
|||
pytestPDB._pdb_cls = pdb_cls
|
||||
config._cleanup.append(fin)
|
||||
|
||||
|
||||
class pytestPDB:
|
||||
""" Pseudo PDB that defers to the real pdb. """
|
||||
_pluginmanager = None
|
||||
|
|
|
@ -22,6 +22,7 @@ DOCTEST_REPORT_CHOICES = (
|
|||
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE,
|
||||
)
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addini('doctest_optionflags', 'option flags for doctests',
|
||||
type="args", default=["ELLIPSIS"])
|
||||
|
@ -163,6 +164,7 @@ def get_optionflags(parent):
|
|||
flag_acc |= flag_lookup_table[flag]
|
||||
return flag_acc
|
||||
|
||||
|
||||
class DoctestTextfile(pytest.Module):
|
||||
obj = None
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@ from _pytest.compat import (
|
|||
from _pytest.runner import fail
|
||||
from _pytest.compat import FuncargnamesCompatAttr
|
||||
|
||||
|
||||
def pytest_sessionstart(session):
|
||||
import _pytest.python
|
||||
scopename2class.update({
|
||||
|
@ -38,6 +39,7 @@ scope2props["class"] = scope2props["module"] + ("cls",)
|
|||
scope2props["instance"] = scope2props["class"] + ("instance", )
|
||||
scope2props["function"] = scope2props["instance"] + ("function", "keywords")
|
||||
|
||||
|
||||
def scopeproperty(name=None, doc=None):
|
||||
def decoratescope(func):
|
||||
scopename = name or func.__name__
|
||||
|
@ -114,7 +116,6 @@ def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
|
|||
node._name2pseudofixturedef[argname] = fixturedef
|
||||
|
||||
|
||||
|
||||
def getfixturemarker(obj):
|
||||
""" return fixturemarker or None if it doesn't exist or raised
|
||||
exceptions."""
|
||||
|
@ -126,7 +127,6 @@ def getfixturemarker(obj):
|
|||
return None
|
||||
|
||||
|
||||
|
||||
def get_parametrized_fixture_keys(item, scopenum):
|
||||
""" return list of keys for all parametrized arguments which match
|
||||
the specified scope. """
|
||||
|
@ -166,6 +166,7 @@ def reorder_items(items):
|
|||
d[item] = keys
|
||||
return reorder_items_atscope(items, set(), argkeys_cache, 0)
|
||||
|
||||
|
||||
def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
|
||||
if scopenum >= scopenum_function or len(items) < 3:
|
||||
return items
|
||||
|
@ -237,10 +238,10 @@ def fillfixtures(function):
|
|||
request._fillfixtures()
|
||||
|
||||
|
||||
|
||||
def get_direct_param_fixture_func(request):
|
||||
return request.param
|
||||
|
||||
|
||||
class FuncFixtureInfo:
|
||||
def __init__(self, argnames, names_closure, name2fixturedefs):
|
||||
self.argnames = argnames
|
||||
|
@ -279,7 +280,6 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
|||
""" underlying collection node (depends on current request scope)"""
|
||||
return self._getscopeitem(self.scope)
|
||||
|
||||
|
||||
def _getnextfixturedef(self, argname):
|
||||
fixturedefs = self._arg2fixturedefs.get(argname, None)
|
||||
if fixturedefs is None:
|
||||
|
@ -301,7 +301,6 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
|||
""" the pytest config object associated with this request. """
|
||||
return self._pyfuncitem.config
|
||||
|
||||
|
||||
@scopeproperty()
|
||||
def function(self):
|
||||
""" test function object if the request has a per-function scope. """
|
||||
|
@ -559,6 +558,7 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
|||
class SubRequest(FixtureRequest):
|
||||
""" a sub request for handling getting a fixture from a
|
||||
test function/fixture. """
|
||||
|
||||
def __init__(self, request, scope, param, param_index, fixturedef):
|
||||
self._parent_request = request
|
||||
self.fixturename = fixturedef.argname
|
||||
|
@ -609,6 +609,7 @@ def scope2index(scope, descr, where=None):
|
|||
|
||||
class FixtureLookupError(LookupError):
|
||||
""" could not return a requested Fixture (missing or invalid). """
|
||||
|
||||
def __init__(self, argname, request, msg=None):
|
||||
self.argname = argname
|
||||
self.request = request
|
||||
|
@ -709,6 +710,7 @@ def call_fixture_func(fixturefunc, request, kwargs):
|
|||
|
||||
class FixtureDef:
|
||||
""" A container for a factory definition. """
|
||||
|
||||
def __init__(self, fixturemanager, baseid, argname, func, scope, params,
|
||||
unittest=False, ids=None):
|
||||
self._fixturemanager = fixturemanager
|
||||
|
@ -783,6 +785,7 @@ class FixtureDef:
|
|||
return ("<FixtureDef name=%r scope=%r baseid=%r >" %
|
||||
(self.argname, self.scope, self.baseid))
|
||||
|
||||
|
||||
def pytest_fixture_setup(fixturedef, request):
|
||||
""" Execution of fixture setup. """
|
||||
kwargs = {}
|
||||
|
@ -831,7 +834,6 @@ class FixtureFunctionMarker:
|
|||
return function
|
||||
|
||||
|
||||
|
||||
def fixture(scope="function", params=None, autouse=False, ids=None, name=None):
|
||||
""" (return a) decorator to mark a fixture factory function.
|
||||
|
||||
|
@ -870,7 +872,7 @@ def fixture(scope="function", params=None, autouse=False, ids=None, name=None):
|
|||
instead of ``return``. In this case, the code block after the ``yield`` statement is executed
|
||||
as teardown code regardless of the test outcome. A fixture function must yield exactly once.
|
||||
"""
|
||||
if callable(scope) and params is None and autouse == False:
|
||||
if callable(scope) and params is None and autouse is False:
|
||||
# direct decoration
|
||||
return FixtureFunctionMarker(
|
||||
"function", params, autouse, name=name)(scope)
|
||||
|
@ -947,7 +949,6 @@ class FixtureManager:
|
|||
self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
|
||||
session.config.pluginmanager.register(self, "funcmanage")
|
||||
|
||||
|
||||
def getfixtureinfo(self, node, func, cls, funcargs=True):
|
||||
if funcargs and not hasattr(node, "nofuncargs"):
|
||||
if cls is not None:
|
||||
|
@ -1126,4 +1127,3 @@ class FixtureManager:
|
|||
for fixturedef in fixturedefs:
|
||||
if nodeid.startswith(fixturedef.baseid):
|
||||
yield fixturedef
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@ pytest
|
|||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
|
||||
|
||||
def freeze_includes():
|
||||
"""
|
||||
Returns a list of module names used by py.test that should be
|
||||
|
|
|
@ -4,7 +4,8 @@ from __future__ import absolute_import, division, print_function
|
|||
import py
|
||||
import pytest
|
||||
from _pytest.config import PrintHelp
|
||||
import os, sys
|
||||
import os
|
||||
import sys
|
||||
from argparse import Action
|
||||
|
||||
|
||||
|
@ -86,6 +87,7 @@ def pytest_cmdline_parse():
|
|||
|
||||
config.add_cleanup(unset_tracing)
|
||||
|
||||
|
||||
def pytest_cmdline_main(config):
|
||||
if config.option.version:
|
||||
p = py.path.local(pytest.__file__)
|
||||
|
@ -102,6 +104,7 @@ def pytest_cmdline_main(config):
|
|||
config._ensure_unconfigure()
|
||||
return 0
|
||||
|
||||
|
||||
def showhelp(config):
|
||||
reporter = config.pluginmanager.get_plugin('terminalreporter')
|
||||
tw = reporter._tw
|
||||
|
@ -146,6 +149,7 @@ conftest_options = [
|
|||
('pytest_plugins', 'list of plugin names to load'),
|
||||
]
|
||||
|
||||
|
||||
def getpluginversioninfo(config):
|
||||
lines = []
|
||||
plugininfo = config.pluginmanager.list_plugin_distinfo()
|
||||
|
@ -157,6 +161,7 @@ def getpluginversioninfo(config):
|
|||
lines.append(" " + content)
|
||||
return lines
|
||||
|
||||
|
||||
def pytest_report_header(config):
|
||||
lines = []
|
||||
if config.option.debug or config.option.traceconfig:
|
||||
|
|
|
@ -8,6 +8,7 @@ hookspec = HookspecMarker("pytest")
|
|||
# Initialization hooks called for every plugin
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
@hookspec(historic=True)
|
||||
def pytest_addhooks(pluginmanager):
|
||||
"""called at plugin registration time to allow adding new hooks via a call to
|
||||
|
@ -23,6 +24,7 @@ def pytest_namespace():
|
|||
time.
|
||||
"""
|
||||
|
||||
|
||||
@hookspec(historic=True)
|
||||
def pytest_plugin_registered(plugin, manager):
|
||||
""" a new pytest plugin got registered. """
|
||||
|
@ -58,6 +60,7 @@ def pytest_addoption(parser):
|
|||
via (deprecated) ``pytest.config``.
|
||||
"""
|
||||
|
||||
|
||||
@hookspec(historic=True)
|
||||
def pytest_configure(config):
|
||||
"""
|
||||
|
@ -79,15 +82,18 @@ def pytest_configure(config):
|
|||
# discoverable conftest.py local plugins.
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_cmdline_parse(pluginmanager, args):
|
||||
"""return initialized config object, parsing the specified args.
|
||||
|
||||
Stops at first non-None result, see :ref:`firstresult` """
|
||||
|
||||
|
||||
def pytest_cmdline_preparse(config, args):
|
||||
"""(deprecated) modify command line arguments before option parsing. """
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_cmdline_main(config):
|
||||
""" called for performing the main command line action. The default
|
||||
|
@ -95,6 +101,7 @@ def pytest_cmdline_main(config):
|
|||
|
||||
Stops at first non-None result, see :ref:`firstresult` """
|
||||
|
||||
|
||||
def pytest_load_initial_conftests(early_config, parser, args):
|
||||
""" implements the loading of initial conftest files ahead
|
||||
of command line option parsing. """
|
||||
|
@ -110,13 +117,16 @@ def pytest_collection(session):
|
|||
|
||||
Stops at first non-None result, see :ref:`firstresult` """
|
||||
|
||||
|
||||
def pytest_collection_modifyitems(session, config, items):
|
||||
""" called after collection has been performed, may filter or re-order
|
||||
the items in-place."""
|
||||
|
||||
|
||||
def pytest_collection_finish(session):
|
||||
""" called after collection has been performed and modified. """
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_ignore_collect(path, config):
|
||||
""" return True to prevent considering this path for collection.
|
||||
|
@ -126,29 +136,37 @@ def pytest_ignore_collect(path, config):
|
|||
Stops at first non-None result, see :ref:`firstresult`
|
||||
"""
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_collect_directory(path, parent):
|
||||
""" called before traversing a directory for collection files.
|
||||
|
||||
Stops at first non-None result, see :ref:`firstresult` """
|
||||
|
||||
|
||||
def pytest_collect_file(path, parent):
|
||||
""" return collection Node or None for the given path. Any new node
|
||||
needs to have the specified ``parent`` as a parent."""
|
||||
|
||||
# logging hooks for collection
|
||||
|
||||
|
||||
def pytest_collectstart(collector):
|
||||
""" collector starts collecting. """
|
||||
|
||||
|
||||
def pytest_itemcollected(item):
|
||||
""" we just collected a test item. """
|
||||
|
||||
|
||||
def pytest_collectreport(report):
|
||||
""" collector finished collecting. """
|
||||
|
||||
|
||||
def pytest_deselected(items):
|
||||
""" called for test items deselected by keyword. """
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_make_collect_report(collector):
|
||||
""" perform ``collector.collect()`` and return a CollectReport.
|
||||
|
@ -159,6 +177,7 @@ def pytest_make_collect_report(collector):
|
|||
# Python test function related hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_pycollect_makemodule(path, parent):
|
||||
""" return a Module collector or None for the given path.
|
||||
|
@ -168,21 +187,25 @@ def pytest_pycollect_makemodule(path, parent):
|
|||
|
||||
Stops at first non-None result, see :ref:`firstresult` """
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_pycollect_makeitem(collector, name, obj):
|
||||
""" return custom item/collector for a python object in a module, or None.
|
||||
|
||||
Stops at first non-None result, see :ref:`firstresult` """
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_pyfunc_call(pyfuncitem):
|
||||
""" call underlying test function.
|
||||
|
||||
Stops at first non-None result, see :ref:`firstresult` """
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
""" generate (multiple) parametrized calls to a test function."""
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_make_parametrize_id(config, val, argname):
|
||||
"""Return a user-friendly string representation of the given ``val`` that will be used
|
||||
|
@ -195,6 +218,7 @@ def pytest_make_parametrize_id(config, val, argname):
|
|||
# generic runtest related hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_runtestloop(session):
|
||||
""" called for performing the main runtest loop
|
||||
|
@ -202,9 +226,11 @@ def pytest_runtestloop(session):
|
|||
|
||||
Stops at first non-None result, see :ref:`firstresult` """
|
||||
|
||||
|
||||
def pytest_itemstart(item, node):
|
||||
""" (deprecated, use pytest_runtest_logstart). """
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_runtest_protocol(item, nextitem):
|
||||
""" implements the runtest_setup/call/teardown protocol for
|
||||
|
@ -222,15 +248,19 @@ def pytest_runtest_protocol(item, nextitem):
|
|||
|
||||
Stops at first non-None result, see :ref:`firstresult` """
|
||||
|
||||
|
||||
def pytest_runtest_logstart(nodeid, location):
|
||||
""" signal the start of running a single test item. """
|
||||
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
""" called before ``pytest_runtest_call(item)``. """
|
||||
|
||||
|
||||
def pytest_runtest_call(item):
|
||||
""" called to execute the test ``item``. """
|
||||
|
||||
|
||||
def pytest_runtest_teardown(item, nextitem):
|
||||
""" called after ``pytest_runtest_call``.
|
||||
|
||||
|
@ -240,6 +270,7 @@ def pytest_runtest_teardown(item, nextitem):
|
|||
so that nextitem only needs to call setup-functions.
|
||||
"""
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_runtest_makereport(item, call):
|
||||
""" return a :py:class:`_pytest.runner.TestReport` object
|
||||
|
@ -248,6 +279,7 @@ def pytest_runtest_makereport(item, call):
|
|||
|
||||
Stops at first non-None result, see :ref:`firstresult` """
|
||||
|
||||
|
||||
def pytest_runtest_logreport(report):
|
||||
""" process a test setup/call/teardown report relating to
|
||||
the respective phase of executing a test. """
|
||||
|
@ -256,12 +288,14 @@ def pytest_runtest_logreport(report):
|
|||
# Fixture related hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_fixture_setup(fixturedef, request):
|
||||
""" performs fixture setup execution.
|
||||
|
||||
Stops at first non-None result, see :ref:`firstresult` """
|
||||
|
||||
|
||||
def pytest_fixture_post_finalizer(fixturedef):
|
||||
""" called after fixture teardown, but before the cache is cleared so
|
||||
the fixture result cache ``fixturedef.cached_result`` can
|
||||
|
@ -271,12 +305,15 @@ def pytest_fixture_post_finalizer(fixturedef):
|
|||
# test session related hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
def pytest_sessionstart(session):
|
||||
""" before session.main() is called. """
|
||||
|
||||
|
||||
def pytest_sessionfinish(session, exitstatus):
|
||||
""" whole test run finishes. """
|
||||
|
||||
|
||||
def pytest_unconfigure(config):
|
||||
""" called before test process is exited. """
|
||||
|
||||
|
@ -298,6 +335,7 @@ def pytest_assertrepr_compare(config, op, left, right):
|
|||
# hooks for influencing reporting (invoked from _pytest_terminal)
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
def pytest_report_header(config, startdir):
|
||||
""" return a string to be displayed as header info for terminal reporting.
|
||||
|
||||
|
@ -308,12 +346,14 @@ def pytest_report_header(config, startdir):
|
|||
:ref:`discovers plugins during startup <pluginorder>`.
|
||||
"""
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_report_teststatus(report):
|
||||
""" return result-category, shortletter and verbose word for reporting.
|
||||
|
||||
Stops at first non-None result, see :ref:`firstresult` """
|
||||
|
||||
|
||||
def pytest_terminal_summary(terminalreporter, exitstatus):
|
||||
""" add additional section in terminal summary reporting. """
|
||||
|
||||
|
@ -328,6 +368,7 @@ def pytest_logwarning(message, code, nodeid, fslocation):
|
|||
# doctest hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_doctest_prepare_content(content):
|
||||
""" return processed content for a given doctest
|
||||
|
@ -338,12 +379,15 @@ def pytest_doctest_prepare_content(content):
|
|||
# error handling and internal debugging hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
def pytest_internalerror(excrepr, excinfo):
|
||||
""" called for internal errors. """
|
||||
|
||||
|
||||
def pytest_keyboard_interrupt(excinfo):
|
||||
""" called for keyboard interrupt. """
|
||||
|
||||
|
||||
def pytest_exception_interact(node, call, report):
|
||||
"""called when an exception was raised which can potentially be
|
||||
interactively handled.
|
||||
|
@ -352,6 +396,7 @@ def pytest_exception_interact(node, call, report):
|
|||
that is not an internal exception like ``skip.Exception``.
|
||||
"""
|
||||
|
||||
|
||||
def pytest_enter_pdb(config):
|
||||
""" called upon pdb.set_trace(), can be used by plugins to take special
|
||||
action just before the python debugger enters in interactive mode.
|
||||
|
|
|
@ -30,7 +30,8 @@ EXIT_NOTESTSCOLLECTED = 5
|
|||
def pytest_addoption(parser):
|
||||
parser.addini("norecursedirs", "directory patterns to avoid for recursion",
|
||||
type="args", default=['.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg', 'venv'])
|
||||
parser.addini("testpaths", "directories to search for tests when no files or directories are given in the command line.",
|
||||
parser.addini("testpaths", "directories to search for tests when no files or directories are given in the "
|
||||
"command line.",
|
||||
type="args", default=[])
|
||||
# parser.addini("dirpatterns",
|
||||
# "patterns specifying possible locations of test files",
|
||||
|
@ -47,7 +48,8 @@ def pytest_addoption(parser):
|
|||
group._addoption('--strict', action="store_true",
|
||||
help="marks not registered in configuration file raise errors.")
|
||||
group._addoption("-c", metavar="file", type=str, dest="inifilename",
|
||||
help="load configuration from `file` instead of trying to locate one of the implicit configuration files.")
|
||||
help="load configuration from `file` instead of trying to locate one of the implicit "
|
||||
"configuration files.")
|
||||
group._addoption("--continue-on-collection-errors", action="store_true",
|
||||
default=False, dest="continue_on_collection_errors",
|
||||
help="Force test execution even if collection errors occur.")
|
||||
|
@ -77,7 +79,6 @@ def pytest_addoption(parser):
|
|||
help="base temporary directory for this test run.")
|
||||
|
||||
|
||||
|
||||
def pytest_namespace():
|
||||
"""keeping this one works around a deeper startup issue in pytest
|
||||
|
||||
|
@ -200,6 +201,7 @@ class FSHookProxy:
|
|||
self.__dict__[name] = x
|
||||
return x
|
||||
|
||||
|
||||
class _CompatProperty(object):
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
|
@ -216,7 +218,6 @@ class _CompatProperty(object):
|
|||
return getattr(__import__('pytest'), self.name)
|
||||
|
||||
|
||||
|
||||
class NodeKeywords(MappingMixin):
|
||||
def __init__(self, node):
|
||||
self.node = node
|
||||
|
@ -457,6 +458,7 @@ class Node(object):
|
|||
|
||||
repr_failure = _repr_failure_py
|
||||
|
||||
|
||||
class Collector(Node):
|
||||
""" Collector instances create children through collect()
|
||||
and thus iteratively build a tree.
|
||||
|
@ -486,6 +488,7 @@ class Collector(Node):
|
|||
ntraceback = ntraceback.cut(excludepath=tracebackcutdir)
|
||||
excinfo.traceback = ntraceback.filter()
|
||||
|
||||
|
||||
class FSCollector(Collector):
|
||||
def __init__(self, fspath, parent=None, config=None, session=None):
|
||||
fspath = py.path.local(fspath) # xxx only for test_resultlog.py?
|
||||
|
@ -504,9 +507,11 @@ class FSCollector(Collector):
|
|||
relpath = relpath.replace(os.sep, "/")
|
||||
return relpath
|
||||
|
||||
|
||||
class File(FSCollector):
|
||||
""" base class for collecting tests from a file. """
|
||||
|
||||
|
||||
class Item(Node):
|
||||
""" a basic test invocation item. Note that for a single function
|
||||
there might be multiple test invocation items.
|
||||
|
@ -518,6 +523,21 @@ class Item(Node):
|
|||
self._report_sections = []
|
||||
|
||||
def add_report_section(self, when, key, content):
|
||||
"""
|
||||
Adds a new report section, similar to what's done internally to add stdout and
|
||||
stderr captured output::
|
||||
|
||||
item.add_report_section("call", "stdout", "report section contents")
|
||||
|
||||
:param str when:
|
||||
One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``.
|
||||
:param str key:
|
||||
Name of the section, can be customized at will. Pytest uses ``"stdout"`` and
|
||||
``"stderr"`` internally.
|
||||
|
||||
:param str content:
|
||||
The full contents as a string.
|
||||
"""
|
||||
if content:
|
||||
self._report_sections.append((when, key, content))
|
||||
|
||||
|
@ -541,13 +561,16 @@ class Item(Node):
|
|||
self._location = location
|
||||
return location
|
||||
|
||||
|
||||
class NoMatch(Exception):
|
||||
""" raised if matching cannot locate a matching names. """
|
||||
|
||||
|
||||
class Interrupted(KeyboardInterrupt):
|
||||
""" signals an interrupted test run. """
|
||||
__module__ = 'builtins' # for py3
|
||||
|
||||
|
||||
class Session(FSCollector):
|
||||
Interrupted = Interrupted
|
||||
|
||||
|
|
|
@ -165,6 +165,7 @@ def pytest_collection_modifyitems(items, config):
|
|||
class MarkMapping:
|
||||
"""Provides a local mapping for markers where item access
|
||||
resolves to True if the marker is present. """
|
||||
|
||||
def __init__(self, keywords):
|
||||
mymarks = set()
|
||||
for key, value in keywords.items():
|
||||
|
@ -180,6 +181,7 @@ class KeywordMapping:
|
|||
"""Provides a local mapping for keywords.
|
||||
Given a list of names, map any substring of one of these names to True.
|
||||
"""
|
||||
|
||||
def __init__(self, names):
|
||||
self._names = names
|
||||
|
||||
|
@ -253,7 +255,6 @@ class MarkGenerator:
|
|||
on the ``test_function`` object. """
|
||||
_config = None
|
||||
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name[0] == "_":
|
||||
raise AttributeError("Marker name must NOT start with underscore")
|
||||
|
@ -280,6 +281,7 @@ def istestfunc(func):
|
|||
return hasattr(func, "__call__") and \
|
||||
getattr(func, "__name__", "<lambda>") != "<lambda>"
|
||||
|
||||
|
||||
class MarkDecorator:
|
||||
""" A decorator for test functions and test classes. When applied
|
||||
it will create :class:`MarkInfo` objects which may be
|
||||
|
@ -313,6 +315,7 @@ class MarkDecorator:
|
|||
additional keyword or positional arguments.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, mark):
|
||||
assert isinstance(mark, Mark), repr(mark)
|
||||
self.mark = mark
|
||||
|
@ -396,6 +399,7 @@ class Mark(namedtuple('Mark', 'name, args, kwargs')):
|
|||
|
||||
class MarkInfo(object):
|
||||
""" Marking object created by :class:`MarkDecorator` instances. """
|
||||
|
||||
def __init__(self, mark):
|
||||
assert isinstance(mark, Mark), repr(mark)
|
||||
self.combined = mark
|
||||
|
|
|
@ -41,6 +41,7 @@ def pytest_runtest_setup(item):
|
|||
# XXX this implies we only call teardown when setup worked
|
||||
item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item)
|
||||
|
||||
|
||||
def teardown_nose(item):
|
||||
if is_potential_nosetest(item):
|
||||
if not call_optional(item.obj, 'teardown'):
|
||||
|
|
|
@ -122,6 +122,7 @@ winpymap = {
|
|||
'python3.5': r'C:\Python35\python.exe',
|
||||
}
|
||||
|
||||
|
||||
def getexecutable(name, cache={}):
|
||||
try:
|
||||
return cache[name]
|
||||
|
@ -143,6 +144,7 @@ def getexecutable(name, cache={}):
|
|||
cache[name] = executable
|
||||
return executable
|
||||
|
||||
|
||||
@pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4",
|
||||
'pypy', 'pypy3'])
|
||||
def anypython(request):
|
||||
|
@ -159,6 +161,8 @@ def anypython(request):
|
|||
return executable
|
||||
|
||||
# used at least by pytest-xdist plugin
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _pytest(request):
|
||||
""" Return a helper which offers a gethookrecorder(hook)
|
||||
|
@ -167,6 +171,7 @@ def _pytest(request):
|
|||
"""
|
||||
return PytestArg(request)
|
||||
|
||||
|
||||
class PytestArg:
|
||||
def __init__(self, request):
|
||||
self.request = request
|
||||
|
@ -337,6 +342,8 @@ def testdir(request, tmpdir_factory):
|
|||
|
||||
|
||||
rex_outcome = re.compile(r"(\d+) ([\w-]+)")
|
||||
|
||||
|
||||
class RunResult:
|
||||
"""The result of running a command.
|
||||
|
||||
|
@ -352,6 +359,7 @@ class RunResult:
|
|||
:duration: Duration in seconds.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, ret, outlines, errlines, duration):
|
||||
self.ret = ret
|
||||
self.outlines = outlines
|
||||
|
@ -382,7 +390,6 @@ class RunResult:
|
|||
assert failed == d.get("failed", 0)
|
||||
|
||||
|
||||
|
||||
class Testdir:
|
||||
"""Temporary test directory with tools to test/run pytest itself.
|
||||
|
||||
|
@ -581,6 +588,7 @@ class Testdir:
|
|||
return p
|
||||
|
||||
Session = Session
|
||||
|
||||
def getnode(self, config, arg):
|
||||
"""Return the collection node of a file.
|
||||
|
||||
|
@ -1031,6 +1039,7 @@ class Testdir:
|
|||
child.timeout = expect_timeout
|
||||
return child
|
||||
|
||||
|
||||
def getdecoded(out):
|
||||
try:
|
||||
return out.decode("utf-8")
|
||||
|
|
|
@ -8,6 +8,7 @@ import os
|
|||
import collections
|
||||
from itertools import count
|
||||
|
||||
import math
|
||||
import py
|
||||
from _pytest.mark import MarkerError
|
||||
from _pytest.config import hookimpl
|
||||
|
@ -48,7 +49,6 @@ def filter_traceback(entry):
|
|||
return p != cutdir1 and not p.relto(cutdir2) and not p.relto(cutdir3)
|
||||
|
||||
|
||||
|
||||
def pyobj_property(name):
|
||||
def get(self):
|
||||
node = self.getparent(getattr(__import__('pytest'), name))
|
||||
|
@ -112,6 +112,7 @@ def pytest_generate_tests(metafunc):
|
|||
for marker in markers:
|
||||
metafunc.parametrize(*marker.args, **marker.kwargs)
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
config.addinivalue_line("markers",
|
||||
"parametrize(argnames, argvalues): call a test function multiple "
|
||||
|
@ -155,9 +156,11 @@ def pytest_collect_file(path, parent):
|
|||
ihook = parent.session.gethookproxy(path)
|
||||
return ihook.pytest_pycollect_makemodule(path=path, parent=parent)
|
||||
|
||||
|
||||
def pytest_pycollect_makemodule(path, parent):
|
||||
return Module(path, parent)
|
||||
|
||||
|
||||
@hookimpl(hookwrapper=True)
|
||||
def pytest_pycollect_makeitem(collector, name, obj):
|
||||
outcome = yield
|
||||
|
@ -176,8 +179,7 @@ def pytest_pycollect_makeitem(collector, name, obj):
|
|||
# or a funtools.wrapped.
|
||||
# We musn't if it's been wrapped with mock.patch (python 2 only)
|
||||
if not (isfunction(obj) or isfunction(get_real_func(obj))):
|
||||
collector.warn(code="C2", message=
|
||||
"cannot collect %r because it is not a function."
|
||||
collector.warn(code="C2", message="cannot collect %r because it is not a function."
|
||||
% name, )
|
||||
elif getattr(obj, "__test__", True):
|
||||
if is_generator(obj):
|
||||
|
@ -186,16 +188,17 @@ def pytest_pycollect_makeitem(collector, name, obj):
|
|||
res = list(collector._genfunctions(name, obj))
|
||||
outcome.force_result(res)
|
||||
|
||||
|
||||
def pytest_make_parametrize_id(config, val, argname=None):
|
||||
return None
|
||||
|
||||
|
||||
|
||||
class PyobjContext(object):
|
||||
module = pyobj_property("Module")
|
||||
cls = pyobj_property("Class")
|
||||
instance = pyobj_property("Instance")
|
||||
|
||||
|
||||
class PyobjMixin(PyobjContext):
|
||||
def obj():
|
||||
def fget(self):
|
||||
|
@ -253,6 +256,7 @@ class PyobjMixin(PyobjContext):
|
|||
assert isinstance(lineno, int)
|
||||
return fspath, lineno, modpath
|
||||
|
||||
|
||||
class PyCollector(PyobjMixin, main.Collector):
|
||||
|
||||
def funcnamefilter(self, name):
|
||||
|
@ -471,6 +475,7 @@ def _get_xunit_func(obj, name):
|
|||
|
||||
class Class(PyCollector):
|
||||
""" Collector for test methods. """
|
||||
|
||||
def collect(self):
|
||||
if not safe_getattr(self.obj, "__test__", True):
|
||||
return []
|
||||
|
@ -497,6 +502,7 @@ class Class(PyCollector):
|
|||
fin_class = getattr(fin_class, '__func__', fin_class)
|
||||
self.addfinalizer(lambda: fin_class(self.obj))
|
||||
|
||||
|
||||
class Instance(PyCollector):
|
||||
def _getobj(self):
|
||||
return self.parent.obj()
|
||||
|
@ -509,6 +515,7 @@ class Instance(PyCollector):
|
|||
self.obj = self._getobj()
|
||||
return self.obj
|
||||
|
||||
|
||||
class FunctionMixin(PyobjMixin):
|
||||
""" mixin for the code common to Function and Generator.
|
||||
"""
|
||||
|
@ -595,7 +602,7 @@ class Generator(FunctionMixin, PyCollector):
|
|||
raise ValueError("%r generated tests with non-unique name %r" % (self, name))
|
||||
seen[name] = True
|
||||
l.append(self.Function(name, self, args=args, callobj=call))
|
||||
self.config.warn('C1', deprecated.YIELD_TESTS, fslocation=self.fspath)
|
||||
self.warn('C1', deprecated.YIELD_TESTS)
|
||||
return l
|
||||
|
||||
def getcallargs(self, obj):
|
||||
|
@ -696,6 +703,7 @@ class Metafunc(fixtures.FuncargnamesCompatAttr):
|
|||
test configuration or values specified in the class or module where a
|
||||
test function is defined.
|
||||
"""
|
||||
|
||||
def __init__(self, function, fixtureinfo, config, cls=None, module=None):
|
||||
#: access to the :class:`_pytest.config.Config` object for the test session
|
||||
self.config = config
|
||||
|
@ -1071,6 +1079,432 @@ def _showfixtures_main(config, session):
|
|||
red=True)
|
||||
|
||||
|
||||
# builtin pytest.raises helper
|
||||
|
||||
def raises(expected_exception, *args, **kwargs):
|
||||
"""
|
||||
Assert that a code block/function call raises ``expected_exception``
|
||||
and raise a failure exception otherwise.
|
||||
|
||||
This helper produces a ``ExceptionInfo()`` object (see below).
|
||||
|
||||
If using Python 2.5 or above, you may use this function as a
|
||||
context manager::
|
||||
|
||||
>>> with raises(ZeroDivisionError):
|
||||
... 1/0
|
||||
|
||||
.. versionchanged:: 2.10
|
||||
|
||||
In the context manager form you may use the keyword argument
|
||||
``message`` to specify a custom failure message::
|
||||
|
||||
>>> with raises(ZeroDivisionError, message="Expecting ZeroDivisionError"):
|
||||
... pass
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
Failed: Expecting ZeroDivisionError
|
||||
|
||||
|
||||
.. note::
|
||||
|
||||
When using ``pytest.raises`` as a context manager, it's worthwhile to
|
||||
note that normal context manager rules apply and that the exception
|
||||
raised *must* be the final line in the scope of the context manager.
|
||||
Lines of code after that, within the scope of the context manager will
|
||||
not be executed. For example::
|
||||
|
||||
>>> value = 15
|
||||
>>> with raises(ValueError) as exc_info:
|
||||
... if value > 10:
|
||||
... raise ValueError("value must be <= 10")
|
||||
... assert exc_info.type == ValueError # this will not execute
|
||||
|
||||
Instead, the following approach must be taken (note the difference in
|
||||
scope)::
|
||||
|
||||
>>> with raises(ValueError) as exc_info:
|
||||
... if value > 10:
|
||||
... raise ValueError("value must be <= 10")
|
||||
...
|
||||
>>> assert exc_info.type == ValueError
|
||||
|
||||
Or you can use the keyword argument ``match`` to assert that the
|
||||
exception matches a text or regex::
|
||||
|
||||
>>> with raises(ValueError, match='must be 0 or None'):
|
||||
... raise ValueError("value must be 0 or None")
|
||||
|
||||
>>> with raises(ValueError, match=r'must be \d+$'):
|
||||
... raise ValueError("value must be 42")
|
||||
|
||||
|
||||
Or you can specify a callable by passing a to-be-called lambda::
|
||||
|
||||
>>> raises(ZeroDivisionError, lambda: 1/0)
|
||||
<ExceptionInfo ...>
|
||||
|
||||
or you can specify an arbitrary callable with arguments::
|
||||
|
||||
>>> def f(x): return 1/x
|
||||
...
|
||||
>>> raises(ZeroDivisionError, f, 0)
|
||||
<ExceptionInfo ...>
|
||||
>>> raises(ZeroDivisionError, f, x=0)
|
||||
<ExceptionInfo ...>
|
||||
|
||||
A third possibility is to use a string to be executed::
|
||||
|
||||
>>> raises(ZeroDivisionError, "f(0)")
|
||||
<ExceptionInfo ...>
|
||||
|
||||
.. autoclass:: _pytest._code.ExceptionInfo
|
||||
:members:
|
||||
|
||||
.. note::
|
||||
Similar to caught exception objects in Python, explicitly clearing
|
||||
local references to returned ``ExceptionInfo`` objects can
|
||||
help the Python interpreter speed up its garbage collection.
|
||||
|
||||
Clearing those references breaks a reference cycle
|
||||
(``ExceptionInfo`` --> caught exception --> frame stack raising
|
||||
the exception --> current frame stack --> local variables -->
|
||||
``ExceptionInfo``) which makes Python keep all objects referenced
|
||||
from that cycle (including all local variables in the current
|
||||
frame) alive until the next cyclic garbage collection run. See the
|
||||
official Python ``try`` statement documentation for more detailed
|
||||
information.
|
||||
|
||||
"""
|
||||
__tracebackhide__ = True
|
||||
msg = ("exceptions must be old-style classes or"
|
||||
" derived from BaseException, not %s")
|
||||
if isinstance(expected_exception, tuple):
|
||||
for exc in expected_exception:
|
||||
if not isclass(exc):
|
||||
raise TypeError(msg % type(exc))
|
||||
elif not isclass(expected_exception):
|
||||
raise TypeError(msg % type(expected_exception))
|
||||
|
||||
message = "DID NOT RAISE {0}".format(expected_exception)
|
||||
match_expr = None
|
||||
|
||||
if not args:
|
||||
if "message" in kwargs:
|
||||
message = kwargs.pop("message")
|
||||
if "match" in kwargs:
|
||||
match_expr = kwargs.pop("match")
|
||||
message += " matching '{0}'".format(match_expr)
|
||||
return RaisesContext(expected_exception, message, match_expr)
|
||||
elif isinstance(args[0], str):
|
||||
code, = args
|
||||
assert isinstance(code, str)
|
||||
frame = sys._getframe(1)
|
||||
loc = frame.f_locals.copy()
|
||||
loc.update(kwargs)
|
||||
# print "raises frame scope: %r" % frame.f_locals
|
||||
try:
|
||||
code = _pytest._code.Source(code).compile()
|
||||
py.builtin.exec_(code, frame.f_globals, loc)
|
||||
# XXX didn'T mean f_globals == f_locals something special?
|
||||
# this is destroyed here ...
|
||||
except expected_exception:
|
||||
return _pytest._code.ExceptionInfo()
|
||||
else:
|
||||
func = args[0]
|
||||
try:
|
||||
func(*args[1:], **kwargs)
|
||||
except expected_exception:
|
||||
return _pytest._code.ExceptionInfo()
|
||||
fail(message)
|
||||
|
||||
|
||||
raises.Exception = fail.Exception
|
||||
|
||||
|
||||
class RaisesContext(object):
|
||||
def __init__(self, expected_exception, message, match_expr):
|
||||
self.expected_exception = expected_exception
|
||||
self.message = message
|
||||
self.match_expr = match_expr
|
||||
self.excinfo = None
|
||||
|
||||
def __enter__(self):
|
||||
self.excinfo = object.__new__(_pytest._code.ExceptionInfo)
|
||||
return self.excinfo
|
||||
|
||||
def __exit__(self, *tp):
|
||||
__tracebackhide__ = True
|
||||
if tp[0] is None:
|
||||
fail(self.message)
|
||||
if sys.version_info < (2, 7):
|
||||
# py26: on __exit__() exc_value often does not contain the
|
||||
# exception value.
|
||||
# http://bugs.python.org/issue7853
|
||||
if not isinstance(tp[1], BaseException):
|
||||
exc_type, value, traceback = tp
|
||||
tp = exc_type, exc_type(value), traceback
|
||||
self.excinfo.__init__(tp)
|
||||
suppress_exception = issubclass(self.excinfo.type, self.expected_exception)
|
||||
if sys.version_info[0] == 2 and suppress_exception:
|
||||
sys.exc_clear()
|
||||
if self.match_expr:
|
||||
self.excinfo.match(self.match_expr)
|
||||
return suppress_exception
|
||||
|
||||
|
||||
# builtin pytest.approx helper
|
||||
|
||||
class approx(object):
|
||||
"""
|
||||
Assert that two numbers (or two sets of numbers) are equal to each other
|
||||
within some tolerance.
|
||||
|
||||
Due to the `intricacies of floating-point arithmetic`__, numbers that we
|
||||
would intuitively expect to be equal are not always so::
|
||||
|
||||
>>> 0.1 + 0.2 == 0.3
|
||||
False
|
||||
|
||||
__ https://docs.python.org/3/tutorial/floatingpoint.html
|
||||
|
||||
This problem is commonly encountered when writing tests, e.g. when making
|
||||
sure that floating-point values are what you expect them to be. One way to
|
||||
deal with this problem is to assert that two floating-point numbers are
|
||||
equal to within some appropriate tolerance::
|
||||
|
||||
>>> abs((0.1 + 0.2) - 0.3) < 1e-6
|
||||
True
|
||||
|
||||
However, comparisons like this are tedious to write and difficult to
|
||||
understand. Furthermore, absolute comparisons like the one above are
|
||||
usually discouraged because there's no tolerance that works well for all
|
||||
situations. ``1e-6`` is good for numbers around ``1``, but too small for
|
||||
very big numbers and too big for very small ones. It's better to express
|
||||
the tolerance as a fraction of the expected value, but relative comparisons
|
||||
like that are even more difficult to write correctly and concisely.
|
||||
|
||||
The ``approx`` class performs floating-point comparisons using a syntax
|
||||
that's as intuitive as possible::
|
||||
|
||||
>>> from pytest import approx
|
||||
>>> 0.1 + 0.2 == approx(0.3)
|
||||
True
|
||||
|
||||
The same syntax also works on sequences of numbers::
|
||||
|
||||
>>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6))
|
||||
True
|
||||
|
||||
By default, ``approx`` considers numbers within a relative tolerance of
|
||||
``1e-6`` (i.e. one part in a million) of its expected value to be equal.
|
||||
This treatment would lead to surprising results if the expected value was
|
||||
``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``.
|
||||
To handle this case less surprisingly, ``approx`` also considers numbers
|
||||
within an absolute tolerance of ``1e-12`` of its expected value to be
|
||||
equal. Infinite numbers are another special case. They are only
|
||||
considered equal to themselves, regardless of the relative tolerance. Both
|
||||
the relative and absolute tolerances can be changed by passing arguments to
|
||||
the ``approx`` constructor::
|
||||
|
||||
>>> 1.0001 == approx(1)
|
||||
False
|
||||
>>> 1.0001 == approx(1, rel=1e-3)
|
||||
True
|
||||
>>> 1.0001 == approx(1, abs=1e-3)
|
||||
True
|
||||
|
||||
If you specify ``abs`` but not ``rel``, the comparison will not consider
|
||||
the relative tolerance at all. In other words, two numbers that are within
|
||||
the default relative tolerance of ``1e-6`` will still be considered unequal
|
||||
if they exceed the specified absolute tolerance. If you specify both
|
||||
``abs`` and ``rel``, the numbers will be considered equal if either
|
||||
tolerance is met::
|
||||
|
||||
>>> 1 + 1e-8 == approx(1)
|
||||
True
|
||||
>>> 1 + 1e-8 == approx(1, abs=1e-12)
|
||||
False
|
||||
>>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12)
|
||||
True
|
||||
|
||||
If you're thinking about using ``approx``, then you might want to know how
|
||||
it compares to other good ways of comparing floating-point numbers. All of
|
||||
these algorithms are based on relative and absolute tolerances and should
|
||||
agree for the most part, but they do have meaningful differences:
|
||||
|
||||
- ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative
|
||||
tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute
|
||||
tolerance is met. Because the relative tolerance is calculated w.r.t.
|
||||
both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor
|
||||
``b`` is a "reference value"). You have to specify an absolute tolerance
|
||||
if you want to compare to ``0.0`` because there is no tolerance by
|
||||
default. Only available in python>=3.5. `More information...`__
|
||||
|
||||
__ https://docs.python.org/3/library/math.html#math.isclose
|
||||
|
||||
- ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference
|
||||
between ``a`` and ``b`` is less that the sum of the relative tolerance
|
||||
w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance
|
||||
is only calculated w.r.t. ``b``, this test is asymmetric and you can
|
||||
think of ``b`` as the reference value. Support for comparing sequences
|
||||
is provided by ``numpy.allclose``. `More information...`__
|
||||
|
||||
__ http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.isclose.html
|
||||
|
||||
- ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b``
|
||||
are within an absolute tolerance of ``1e-7``. No relative tolerance is
|
||||
considered and the absolute tolerance cannot be changed, so this function
|
||||
is not appropriate for very large or very small numbers. Also, it's only
|
||||
available in subclasses of ``unittest.TestCase`` and it's ugly because it
|
||||
doesn't follow PEP8. `More information...`__
|
||||
|
||||
__ https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual
|
||||
|
||||
- ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative
|
||||
tolerance is met w.r.t. ``b`` or if the absolute tolerance is met.
|
||||
Because the relative tolerance is only calculated w.r.t. ``b``, this test
|
||||
is asymmetric and you can think of ``b`` as the reference value. In the
|
||||
special case that you explicitly specify an absolute tolerance but not a
|
||||
relative tolerance, only the absolute tolerance is considered.
|
||||
"""
|
||||
|
||||
def __init__(self, expected, rel=None, abs=None):
|
||||
self.expected = expected
|
||||
self.abs = abs
|
||||
self.rel = rel
|
||||
|
||||
def __repr__(self):
|
||||
return ', '.join(repr(x) for x in self.expected)
|
||||
|
||||
def __eq__(self, actual):
|
||||
from collections import Iterable
|
||||
if not isinstance(actual, Iterable):
|
||||
actual = [actual]
|
||||
if len(actual) != len(self.expected):
|
||||
return False
|
||||
return all(a == x for a, x in zip(actual, self.expected))
|
||||
|
||||
__hash__ = None
|
||||
|
||||
def __ne__(self, actual):
|
||||
return not (actual == self)
|
||||
|
||||
@property
|
||||
def expected(self):
|
||||
# Regardless of whether the user-specified expected value is a number
|
||||
# or a sequence of numbers, return a list of ApproxNotIterable objects
|
||||
# that can be compared against.
|
||||
from collections import Iterable
|
||||
|
||||
def approx_non_iter(x):
|
||||
return ApproxNonIterable(x, self.rel, self.abs)
|
||||
|
||||
if isinstance(self._expected, Iterable):
|
||||
return [approx_non_iter(x) for x in self._expected]
|
||||
else:
|
||||
return [approx_non_iter(self._expected)]
|
||||
|
||||
@expected.setter
|
||||
def expected(self, expected):
|
||||
self._expected = expected
|
||||
|
||||
|
||||
class ApproxNonIterable(object):
|
||||
"""
|
||||
Perform approximate comparisons for single numbers only.
|
||||
|
||||
In other words, the ``expected`` attribute for objects of this class must
|
||||
be some sort of number. This is in contrast to the ``approx`` class, where
|
||||
the ``expected`` attribute can either be a number of a sequence of numbers.
|
||||
This class is responsible for making comparisons, while ``approx`` is
|
||||
responsible for abstracting the difference between numbers and sequences of
|
||||
numbers. Although this class can stand on its own, it's only meant to be
|
||||
used within ``approx``.
|
||||
"""
|
||||
|
||||
def __init__(self, expected, rel=None, abs=None):
|
||||
self.expected = expected
|
||||
self.abs = abs
|
||||
self.rel = rel
|
||||
|
||||
def __repr__(self):
|
||||
if isinstance(self.expected, complex):
|
||||
return str(self.expected)
|
||||
|
||||
# Infinities aren't compared using tolerances, so don't show a
|
||||
# tolerance.
|
||||
if math.isinf(self.expected):
|
||||
return str(self.expected)
|
||||
|
||||
# If a sensible tolerance can't be calculated, self.tolerance will
|
||||
# raise a ValueError. In this case, display '???'.
|
||||
try:
|
||||
vetted_tolerance = '{:.1e}'.format(self.tolerance)
|
||||
except ValueError:
|
||||
vetted_tolerance = '???'
|
||||
|
||||
if sys.version_info[0] == 2:
|
||||
return '{0} +- {1}'.format(self.expected, vetted_tolerance)
|
||||
else:
|
||||
return u'{0} \u00b1 {1}'.format(self.expected, vetted_tolerance)
|
||||
|
||||
def __eq__(self, actual):
|
||||
# Short-circuit exact equality.
|
||||
if actual == self.expected:
|
||||
return True
|
||||
|
||||
# Infinity shouldn't be approximately equal to anything but itself, but
|
||||
# if there's a relative tolerance, it will be infinite and infinity
|
||||
# will seem approximately equal to everything. The equal-to-itself
|
||||
# case would have been short circuited above, so here we can just
|
||||
# return false if the expected value is infinite. The abs() call is
|
||||
# for compatibility with complex numbers.
|
||||
if math.isinf(abs(self.expected)):
|
||||
return False
|
||||
|
||||
# Return true if the two numbers are within the tolerance.
|
||||
return abs(self.expected - actual) <= self.tolerance
|
||||
|
||||
__hash__ = None
|
||||
|
||||
def __ne__(self, actual):
|
||||
return not (actual == self)
|
||||
|
||||
@property
|
||||
def tolerance(self):
|
||||
def set_default(x, default):
|
||||
return x if x is not None else default
|
||||
|
||||
# Figure out what the absolute tolerance should be. ``self.abs`` is
|
||||
# either None or a value specified by the user.
|
||||
absolute_tolerance = set_default(self.abs, 1e-12)
|
||||
|
||||
if absolute_tolerance < 0:
|
||||
raise ValueError("absolute tolerance can't be negative: {}".format(absolute_tolerance))
|
||||
if math.isnan(absolute_tolerance):
|
||||
raise ValueError("absolute tolerance can't be NaN.")
|
||||
|
||||
# If the user specified an absolute tolerance but not a relative one,
|
||||
# just return the absolute tolerance.
|
||||
if self.rel is None:
|
||||
if self.abs is not None:
|
||||
return absolute_tolerance
|
||||
|
||||
# Figure out what the relative tolerance should be. ``self.rel`` is
|
||||
# either None or a value specified by the user. This is done after
|
||||
# we've made sure the user didn't ask for an absolute tolerance only,
|
||||
# because we don't want to raise errors about the relative tolerance if
|
||||
# we aren't even going to use it.
|
||||
relative_tolerance = set_default(self.rel, 1e-6) * abs(self.expected)
|
||||
|
||||
if relative_tolerance < 0:
|
||||
raise ValueError("relative tolerance can't be negative: {}".format(absolute_tolerance))
|
||||
if math.isnan(relative_tolerance):
|
||||
raise ValueError("relative tolerance can't be NaN.")
|
||||
|
||||
# Return the larger of the relative and absolute tolerances.
|
||||
return max(relative_tolerance, absolute_tolerance)
|
||||
|
||||
#
|
||||
# the basic pytest Function item
|
||||
|
@ -1081,6 +1515,7 @@ class Function(FunctionMixin, main.Item, fixtures.FuncargnamesCompatAttr):
|
|||
Python test function.
|
||||
"""
|
||||
_genid = None
|
||||
|
||||
def __init__(self, name, parent, args=None, config=None,
|
||||
callspec=None, callobj=NOTSET, keywords=None, session=None,
|
||||
fixtureinfo=None, originalname=None):
|
||||
|
|
|
@ -6,12 +6,14 @@ from __future__ import absolute_import, division, print_function
|
|||
import py
|
||||
import os
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("terminal reporting", "resultlog plugin options")
|
||||
group.addoption('--resultlog', '--result-log', action="store",
|
||||
metavar="path", default=None,
|
||||
help="DEPRECATED path for machine-readable result log.")
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
resultlog = config.option.resultlog
|
||||
# prevent opening resultlog on slave nodes (xdist)
|
||||
|
@ -26,6 +28,7 @@ def pytest_configure(config):
|
|||
from _pytest.deprecated import RESULT_LOG
|
||||
config.warn('C1', RESULT_LOG)
|
||||
|
||||
|
||||
def pytest_unconfigure(config):
|
||||
resultlog = getattr(config, '_resultlog', None)
|
||||
if resultlog:
|
||||
|
@ -33,6 +36,7 @@ def pytest_unconfigure(config):
|
|||
del config._resultlog
|
||||
config.pluginmanager.unregister(resultlog)
|
||||
|
||||
|
||||
def generic_path(item):
|
||||
chain = item.listchain()
|
||||
gpath = [chain[0].name]
|
||||
|
@ -56,6 +60,7 @@ def generic_path(item):
|
|||
fspath = newfspath
|
||||
return ''.join(gpath)
|
||||
|
||||
|
||||
class ResultLog(object):
|
||||
def __init__(self, config, logfile):
|
||||
self.config = config
|
||||
|
|
|
@ -9,7 +9,6 @@ import py
|
|||
from _pytest._code.code import TerminalRepr, ExceptionInfo
|
||||
|
||||
|
||||
|
||||
#
|
||||
# pytest plugin hooks
|
||||
|
||||
|
@ -19,6 +18,7 @@ def pytest_addoption(parser):
|
|||
action="store", type=int, default=None, metavar="N",
|
||||
help="show N slowest setup/test durations (N=0 for all)."),
|
||||
|
||||
|
||||
def pytest_terminal_summary(terminalreporter):
|
||||
durations = terminalreporter.config.option.durations
|
||||
if durations is None:
|
||||
|
@ -44,15 +44,20 @@ def pytest_terminal_summary(terminalreporter):
|
|||
tr.write_line("%02.2fs %-8s %s" %
|
||||
(rep.duration, rep.when, nodeid))
|
||||
|
||||
|
||||
def pytest_sessionstart(session):
|
||||
session._setupstate = SetupState()
|
||||
|
||||
|
||||
def pytest_sessionfinish(session):
|
||||
session._setupstate.teardown_all()
|
||||
|
||||
|
||||
class NodeInfo:
|
||||
def __init__(self, location):
|
||||
self.location = location
|
||||
|
||||
|
||||
def pytest_runtest_protocol(item, nextitem):
|
||||
item.ihook.pytest_runtest_logstart(
|
||||
nodeid=item.nodeid, location=item.location,
|
||||
|
@ -60,6 +65,7 @@ def pytest_runtest_protocol(item, nextitem):
|
|||
runtestprotocol(item, nextitem=nextitem)
|
||||
return True
|
||||
|
||||
|
||||
def runtestprotocol(item, log=True, nextitem=None):
|
||||
hasrequest = hasattr(item, "_request")
|
||||
if hasrequest and not item._request:
|
||||
|
@ -80,6 +86,7 @@ def runtestprotocol(item, log=True, nextitem=None):
|
|||
item.funcargs = None
|
||||
return reports
|
||||
|
||||
|
||||
def show_test_item(item):
|
||||
"""Show test function, parameters and the fixtures of the test item."""
|
||||
tw = item.config.get_terminal_writer()
|
||||
|
@ -90,9 +97,11 @@ def show_test_item(item):
|
|||
if used_fixtures:
|
||||
tw.write(' (fixtures used: {0})'.format(', '.join(used_fixtures)))
|
||||
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
item.session._setupstate.prepare(item)
|
||||
|
||||
|
||||
def pytest_runtest_call(item):
|
||||
try:
|
||||
item.runtest()
|
||||
|
@ -106,9 +115,11 @@ def pytest_runtest_call(item):
|
|||
del tb # Get rid of it in this namespace
|
||||
raise
|
||||
|
||||
|
||||
def pytest_runtest_teardown(item, nextitem):
|
||||
item.session._setupstate.teardown_exact(item, nextitem)
|
||||
|
||||
|
||||
def pytest_report_teststatus(report):
|
||||
if report.when in ("setup", "teardown"):
|
||||
if report.failed:
|
||||
|
@ -133,21 +144,25 @@ def call_and_report(item, when, log=True, **kwds):
|
|||
hook.pytest_exception_interact(node=item, call=call, report=report)
|
||||
return report
|
||||
|
||||
|
||||
def check_interactive_exception(call, report):
|
||||
return call.excinfo and not (
|
||||
hasattr(report, "wasxfail") or
|
||||
call.excinfo.errisinstance(skip.Exception) or
|
||||
call.excinfo.errisinstance(bdb.BdbQuit))
|
||||
|
||||
|
||||
def call_runtest_hook(item, when, **kwds):
|
||||
hookname = "pytest_runtest_" + when
|
||||
ihook = getattr(item.ihook, hookname)
|
||||
return CallInfo(lambda: ihook(item=item, **kwds), when=when)
|
||||
|
||||
|
||||
class CallInfo:
|
||||
""" Result/Exception info a function invocation. """
|
||||
#: None or ExceptionInfo object.
|
||||
excinfo = None
|
||||
|
||||
def __init__(self, func, when):
|
||||
#: context of invocation: one of "setup", "call",
|
||||
#: "teardown", "memocollect"
|
||||
|
@ -169,6 +184,7 @@ class CallInfo:
|
|||
status = "result: %r" % (self.result,)
|
||||
return "<CallInfo when=%r %s>" % (self.when, status)
|
||||
|
||||
|
||||
def getslaveinfoline(node):
|
||||
try:
|
||||
return node._slaveinfocache
|
||||
|
@ -179,6 +195,7 @@ def getslaveinfoline(node):
|
|||
d['id'], d['sysplatform'], ver, d['executable'])
|
||||
return s
|
||||
|
||||
|
||||
class BaseReport(object):
|
||||
|
||||
def __init__(self, **kw):
|
||||
|
@ -243,6 +260,7 @@ class BaseReport(object):
|
|||
def fspath(self):
|
||||
return self.nodeid.split("::")[0]
|
||||
|
||||
|
||||
def pytest_runtest_makereport(item, call):
|
||||
when = call.when
|
||||
duration = call.stop - call.start
|
||||
|
@ -273,10 +291,12 @@ def pytest_runtest_makereport(item, call):
|
|||
keywords, outcome, longrepr, when,
|
||||
sections, duration)
|
||||
|
||||
|
||||
class TestReport(BaseReport):
|
||||
""" Basic test report object (also used for setup and teardown calls if
|
||||
they fail).
|
||||
"""
|
||||
|
||||
def __init__(self, nodeid, location, keywords, outcome,
|
||||
longrepr, when, sections=(), duration=0, **extra):
|
||||
#: normalized collection node id
|
||||
|
@ -315,14 +335,17 @@ class TestReport(BaseReport):
|
|||
return "<TestReport %r when=%r outcome=%r>" % (
|
||||
self.nodeid, self.when, self.outcome)
|
||||
|
||||
|
||||
class TeardownErrorReport(BaseReport):
|
||||
outcome = "failed"
|
||||
when = "teardown"
|
||||
|
||||
def __init__(self, longrepr, **extra):
|
||||
self.longrepr = longrepr
|
||||
self.sections = []
|
||||
self.__dict__.update(extra)
|
||||
|
||||
|
||||
def pytest_make_collect_report(collector):
|
||||
call = CallInfo(
|
||||
lambda: list(collector.collect()),
|
||||
|
@ -367,14 +390,18 @@ class CollectReport(BaseReport):
|
|||
return "<CollectReport %r lenresult=%s outcome=%r>" % (
|
||||
self.nodeid, len(self.result), self.outcome)
|
||||
|
||||
|
||||
class CollectErrorRepr(TerminalRepr):
|
||||
def __init__(self, msg):
|
||||
self.longrepr = msg
|
||||
|
||||
def toterminal(self, out):
|
||||
out.line(self.longrepr, red=True)
|
||||
|
||||
|
||||
class SetupState(object):
|
||||
""" shared state for setting up/tearing down test items or collectors. """
|
||||
|
||||
def __init__(self):
|
||||
self.stack = []
|
||||
self._finalizers = {}
|
||||
|
@ -451,6 +478,7 @@ class SetupState(object):
|
|||
col._prepare_exc = sys.exc_info()
|
||||
raise
|
||||
|
||||
|
||||
def collect_one_node(collector):
|
||||
ihook = collector.ihook
|
||||
ihook.pytest_collectstart(collector=collector)
|
||||
|
@ -469,6 +497,7 @@ class OutcomeException(Exception):
|
|||
""" OutcomeException and its subclass instances indicate and
|
||||
contain info about test and collection outcomes.
|
||||
"""
|
||||
|
||||
def __init__(self, msg=None, pytrace=True):
|
||||
Exception.__init__(self, msg)
|
||||
self.msg = msg
|
||||
|
@ -483,6 +512,7 @@ class OutcomeException(Exception):
|
|||
return "<%s instance>" % (self.__class__.__name__,)
|
||||
__str__ = __repr__
|
||||
|
||||
|
||||
class Skipped(OutcomeException):
|
||||
# XXX hackish: on 3k we fake to live in the builtins
|
||||
# in order to have Skipped exception printing shorter/nicer
|
||||
|
@ -500,12 +530,14 @@ class Failed(OutcomeException):
|
|||
|
||||
class Exit(KeyboardInterrupt):
|
||||
""" raised for immediate program exits (no tracebacks/summaries)"""
|
||||
|
||||
def __init__(self, msg="unknown reason"):
|
||||
self.msg = msg
|
||||
KeyboardInterrupt.__init__(self, msg)
|
||||
|
||||
# exposed helper methods
|
||||
|
||||
|
||||
def exit(msg):
|
||||
""" exit testing process as if KeyboardInterrupt was triggered. """
|
||||
__tracebackhide__ = True
|
||||
|
|
|
@ -10,6 +10,7 @@ from _pytest.config import hookimpl
|
|||
from _pytest.mark import MarkInfo, MarkDecorator
|
||||
from _pytest.runner import fail, skip
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("general")
|
||||
group.addoption('--runxfail',
|
||||
|
@ -269,6 +270,8 @@ def pytest_runtest_makereport(item, call):
|
|||
rep.longrepr = filename, line, reason
|
||||
|
||||
# called by terminalreporter progress reporting
|
||||
|
||||
|
||||
def pytest_report_teststatus(report):
|
||||
if hasattr(report, "wasxfail"):
|
||||
if report.skipped:
|
||||
|
@ -277,6 +280,8 @@ def pytest_report_teststatus(report):
|
|||
return "xpassed", "X", ("XPASS", {'yellow': True})
|
||||
|
||||
# called by the terminalreporter instance/plugin
|
||||
|
||||
|
||||
def pytest_terminal_summary(terminalreporter):
|
||||
tr = terminalreporter
|
||||
if not tr.reportchars:
|
||||
|
|
|
@ -47,6 +47,7 @@ def pytest_addoption(parser):
|
|||
choices=['yes', 'no', 'auto'],
|
||||
help="color terminal output (yes/no/auto).")
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
config.option.verbose -= config.option.quiet
|
||||
reporter = TerminalReporter(config, sys.stdout)
|
||||
|
@ -57,6 +58,7 @@ def pytest_configure(config):
|
|||
reporter.write_line("[traceconfig] " + msg)
|
||||
config.trace.root.setprocessor("pytest:config", mywriter)
|
||||
|
||||
|
||||
def getreportopt(config):
|
||||
reportopts = ""
|
||||
reportchars = config.option.reportchars
|
||||
|
@ -72,6 +74,7 @@ def getreportopt(config):
|
|||
reportopts = 'fEsxXw'
|
||||
return reportopts
|
||||
|
||||
|
||||
def pytest_report_teststatus(report):
|
||||
if report.passed:
|
||||
letter = "."
|
||||
|
@ -88,6 +91,7 @@ class WarningReport:
|
|||
"""
|
||||
Simple structure to hold warnings information captured by ``pytest_logwarning``.
|
||||
"""
|
||||
|
||||
def __init__(self, code, message, nodeid=None, fslocation=None):
|
||||
"""
|
||||
:param code: unused
|
||||
|
@ -290,6 +294,9 @@ class TerminalReporter:
|
|||
if self.isatty:
|
||||
if final:
|
||||
line += " \n"
|
||||
# Rewrite with empty line so we will not see the artifact of
|
||||
# previous write
|
||||
self.rewrite('')
|
||||
self.rewrite(line, bold=True)
|
||||
else:
|
||||
self.write_line(line)
|
||||
|
@ -502,7 +509,6 @@ class TerminalReporter:
|
|||
content = content[:-1]
|
||||
self._tw.line(content)
|
||||
|
||||
|
||||
def summary_failures(self):
|
||||
if self.config.option.tbstyle != "no":
|
||||
reports = self.getreports('failed')
|
||||
|
@ -564,6 +570,7 @@ class TerminalReporter:
|
|||
self.write_sep("=", "%d tests deselected" % (
|
||||
len(self.stats['deselected'])), bold=True)
|
||||
|
||||
|
||||
def repr_pythonversion(v=None):
|
||||
if v is None:
|
||||
v = sys.version_info
|
||||
|
@ -572,6 +579,7 @@ def repr_pythonversion(v=None):
|
|||
except (TypeError, ValueError):
|
||||
return str(v)
|
||||
|
||||
|
||||
def flatten(l):
|
||||
for x in l:
|
||||
if isinstance(x, (list, tuple)):
|
||||
|
@ -580,6 +588,7 @@ def flatten(l):
|
|||
else:
|
||||
yield x
|
||||
|
||||
|
||||
def build_summary_stats_line(stats):
|
||||
keys = ("failed passed skipped deselected "
|
||||
"xfailed xpassed warnings error").split()
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
Emit yield test warning only once per generator
|
|
@ -0,0 +1 @@
|
|||
Ensure final collected line doesn't include artifacts of previous write.
|
|
@ -0,0 +1 @@
|
|||
Fixed all flake8 errors and warnings
|
|
@ -9,6 +9,7 @@
|
|||
<li><a href="{{ pathto('contact') }}">Contact</a></li>
|
||||
<li><a href="{{ pathto('talks') }}">Talks/Posts</a></li>
|
||||
<li><a href="{{ pathto('changelog') }}">Changelog</a></li>
|
||||
<li><a href="{{ pathto('backwards-compatibility') }}">Backwards Compatibility</a></li>
|
||||
<li><a href="{{ pathto('license') }}">License</a></li>
|
||||
</ul>
|
||||
|
||||
|
|
|
@ -81,7 +81,6 @@ class TestGeneralUsage(object):
|
|||
"*---unconfigure",
|
||||
])
|
||||
|
||||
|
||||
def test_config_preparse_plugin_option(self, testdir):
|
||||
testdir.makepyfile(pytest_xyz="""
|
||||
def pytest_addoption(parser):
|
||||
|
@ -147,7 +146,6 @@ class TestGeneralUsage(object):
|
|||
*ERROR*could not load*conftest.py*
|
||||
""")
|
||||
|
||||
|
||||
def test_early_skip(self, testdir):
|
||||
testdir.mkdir("xyz")
|
||||
testdir.makeconftest("""
|
||||
|
@ -676,7 +674,6 @@ class TestInvocationVariants(object):
|
|||
import _pytest.config
|
||||
assert type(_pytest.config.get_plugin_manager()) is _pytest.config.PytestPluginManager
|
||||
|
||||
|
||||
def test_has_plugin(self, request):
|
||||
"""Test hasplugin function of the plugin manager (#932)."""
|
||||
assert request.config.pluginmanager.hasplugin('python')
|
||||
|
@ -764,6 +761,7 @@ class TestDurationWithFixture(object):
|
|||
def test_2():
|
||||
time.sleep(frag)
|
||||
"""
|
||||
|
||||
def test_setup_function(self, testdir):
|
||||
testdir.makepyfile(self.source)
|
||||
result = testdir.runpytest("--durations=10")
|
||||
|
|
|
@ -12,6 +12,7 @@ def test_ne():
|
|||
code2 = _pytest._code.Code(compile('foo = "baz"', '', 'exec'))
|
||||
assert code2 != code1
|
||||
|
||||
|
||||
def test_code_gives_back_name_for_not_existing_file():
|
||||
name = 'abc-123'
|
||||
co_code = compile("pass\n", name, 'exec')
|
||||
|
@ -20,6 +21,7 @@ def test_code_gives_back_name_for_not_existing_file():
|
|||
assert str(code.path) == name
|
||||
assert code.fullsource is None
|
||||
|
||||
|
||||
def test_code_with_class():
|
||||
class A(object):
|
||||
pass
|
||||
|
@ -30,11 +32,13 @@ if True:
|
|||
def x():
|
||||
pass
|
||||
|
||||
|
||||
def test_code_fullsource():
|
||||
code = _pytest._code.Code(x)
|
||||
full = code.fullsource
|
||||
assert 'test_code_fullsource()' in str(full)
|
||||
|
||||
|
||||
def test_code_source():
|
||||
code = _pytest._code.Code(x)
|
||||
src = code.source()
|
||||
|
@ -42,6 +46,7 @@ def test_code_source():
|
|||
pass"""
|
||||
assert str(src) == expected
|
||||
|
||||
|
||||
def test_frame_getsourcelineno_myself():
|
||||
def func():
|
||||
return sys._getframe(0)
|
||||
|
@ -50,6 +55,7 @@ def test_frame_getsourcelineno_myself():
|
|||
source, lineno = f.code.fullsource, f.lineno
|
||||
assert source[lineno].startswith(" return sys._getframe(0)")
|
||||
|
||||
|
||||
def test_getstatement_empty_fullsource():
|
||||
def func():
|
||||
return sys._getframe(0)
|
||||
|
@ -62,6 +68,7 @@ def test_getstatement_empty_fullsource():
|
|||
finally:
|
||||
f.code.__class__.fullsource = prop
|
||||
|
||||
|
||||
def test_code_from_func():
|
||||
co = _pytest._code.Code(test_frame_getsourcelineno_myself)
|
||||
assert co.firstlineno
|
||||
|
@ -92,6 +99,7 @@ def test_unicode_handling_syntax_error():
|
|||
if sys.version_info[0] < 3:
|
||||
unicode(excinfo)
|
||||
|
||||
|
||||
def test_code_getargs():
|
||||
def f1(x):
|
||||
pass
|
||||
|
@ -141,8 +149,10 @@ class TestExceptionInfo(object):
|
|||
|
||||
def test_bad_getsource(self):
|
||||
try:
|
||||
if False: pass
|
||||
else: assert False
|
||||
if False:
|
||||
pass
|
||||
else:
|
||||
assert False
|
||||
except AssertionError:
|
||||
exci = _pytest._code.ExceptionInfo()
|
||||
assert exci.getrepr()
|
||||
|
@ -152,11 +162,13 @@ class TestTracebackEntry(object):
|
|||
|
||||
def test_getsource(self):
|
||||
try:
|
||||
if False: pass
|
||||
else: assert False
|
||||
if False:
|
||||
pass
|
||||
else:
|
||||
assert False
|
||||
except AssertionError:
|
||||
exci = _pytest._code.ExceptionInfo()
|
||||
entry = exci.traceback[0]
|
||||
source = entry.getsource()
|
||||
assert len(source) == 4
|
||||
assert 'else: assert False' in source[3]
|
||||
assert len(source) == 6
|
||||
assert 'assert False' in source[5]
|
||||
|
|
|
@ -12,9 +12,6 @@ from _pytest._code.code import (
|
|||
ReprExceptionInfo,
|
||||
ExceptionChainRepr)
|
||||
|
||||
queue = py.builtin._tryimport('queue', 'Queue')
|
||||
|
||||
failsonjython = pytest.mark.xfail("sys.platform.startswith('java')")
|
||||
from test_source import astonly
|
||||
|
||||
try:
|
||||
|
@ -24,23 +21,32 @@ except ImportError:
|
|||
else:
|
||||
invalidate_import_caches = getattr(importlib, "invalidate_caches", None)
|
||||
|
||||
import pytest
|
||||
queue = py.builtin._tryimport('queue', 'Queue')
|
||||
|
||||
failsonjython = pytest.mark.xfail("sys.platform.startswith('java')")
|
||||
|
||||
pytest_version_info = tuple(map(int, pytest.__version__.split(".")[:3]))
|
||||
|
||||
|
||||
class TWMock(object):
|
||||
WRITE = object()
|
||||
|
||||
def __init__(self):
|
||||
self.lines = []
|
||||
self.is_writing = False
|
||||
|
||||
def sep(self, sep, line=None):
|
||||
self.lines.append((sep, line))
|
||||
|
||||
def write(self, msg, **kw):
|
||||
self.lines.append((TWMock.WRITE, msg))
|
||||
|
||||
def line(self, line, **kw):
|
||||
self.lines.append(line)
|
||||
|
||||
def markup(self, text, **kw):
|
||||
return text
|
||||
|
||||
def get_write_msg(self, idx):
|
||||
flag, msg = self.lines[idx]
|
||||
assert flag == TWMock.WRITE
|
||||
|
@ -48,6 +54,7 @@ class TWMock(object):
|
|||
|
||||
fullwidth = 80
|
||||
|
||||
|
||||
def test_excinfo_simple():
|
||||
try:
|
||||
raise ValueError
|
||||
|
@ -55,6 +62,7 @@ def test_excinfo_simple():
|
|||
info = _pytest._code.ExceptionInfo()
|
||||
assert info.type == ValueError
|
||||
|
||||
|
||||
def test_excinfo_getstatement():
|
||||
def g():
|
||||
raise ValueError
|
||||
|
@ -77,20 +85,27 @@ def test_excinfo_getstatement():
|
|||
# xxx
|
||||
|
||||
# testchain for getentries test below
|
||||
|
||||
|
||||
def f():
|
||||
#
|
||||
raise ValueError
|
||||
#
|
||||
|
||||
|
||||
def g():
|
||||
#
|
||||
__tracebackhide__ = True
|
||||
f()
|
||||
#
|
||||
|
||||
|
||||
def h():
|
||||
#
|
||||
g()
|
||||
#
|
||||
|
||||
|
||||
class TestTraceback_f_g_h(object):
|
||||
def setup_method(self, method):
|
||||
try:
|
||||
|
@ -294,6 +309,7 @@ class TestTraceback_f_g_h(object):
|
|||
assert entry.lineno == co.firstlineno + 2
|
||||
assert entry.frame.code.name == 'g'
|
||||
|
||||
|
||||
def test_excinfo_exconly():
|
||||
excinfo = pytest.raises(ValueError, h)
|
||||
assert excinfo.exconly().startswith('ValueError')
|
||||
|
@ -303,11 +319,13 @@ def test_excinfo_exconly():
|
|||
assert msg.startswith('ValueError')
|
||||
assert msg.endswith("world")
|
||||
|
||||
|
||||
def test_excinfo_repr():
|
||||
excinfo = pytest.raises(ValueError, h)
|
||||
s = repr(excinfo)
|
||||
assert s == "<ExceptionInfo ValueError tblen=4>"
|
||||
|
||||
|
||||
def test_excinfo_str():
|
||||
excinfo = pytest.raises(ValueError, h)
|
||||
s = str(excinfo)
|
||||
|
@ -315,10 +333,12 @@ def test_excinfo_str():
|
|||
assert s.endswith("ValueError")
|
||||
assert len(s.split(":")) >= 3 # on windows it's 4
|
||||
|
||||
|
||||
def test_excinfo_errisinstance():
|
||||
excinfo = pytest.raises(ValueError, h)
|
||||
assert excinfo.errisinstance(ValueError)
|
||||
|
||||
|
||||
def test_excinfo_no_sourcecode():
|
||||
try:
|
||||
exec ("raise ValueError()")
|
||||
|
@ -330,6 +350,7 @@ def test_excinfo_no_sourcecode():
|
|||
else:
|
||||
assert s == " File '<string>':1 in <module>\n ???\n"
|
||||
|
||||
|
||||
def test_excinfo_no_python_sourcecode(tmpdir):
|
||||
# XXX: simplified locally testable version
|
||||
tmpdir.join('test.txt').write("{{ h()}}:")
|
||||
|
@ -358,6 +379,7 @@ def test_entrysource_Queue_example():
|
|||
s = str(source).strip()
|
||||
assert s.startswith("def get")
|
||||
|
||||
|
||||
def test_codepath_Queue_example():
|
||||
try:
|
||||
queue.Queue().get(timeout=0.001)
|
||||
|
@ -369,11 +391,13 @@ def test_codepath_Queue_example():
|
|||
assert path.basename.lower() == "queue.py"
|
||||
assert path.check()
|
||||
|
||||
|
||||
def test_match_succeeds():
|
||||
with pytest.raises(ZeroDivisionError) as excinfo:
|
||||
0 // 0
|
||||
excinfo.match(r'.*zero.*')
|
||||
|
||||
|
||||
def test_match_raises_error(testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
@ -388,6 +412,7 @@ def test_match_raises_error(testdir):
|
|||
"*AssertionError*Pattern*[123]*not found*",
|
||||
])
|
||||
|
||||
|
||||
class TestFormattedExcinfo(object):
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -442,7 +467,6 @@ class TestFormattedExcinfo(object):
|
|||
'E AssertionError'
|
||||
]
|
||||
|
||||
|
||||
def test_repr_source_not_existing(self):
|
||||
pr = FormattedExcinfo()
|
||||
co = compile("raise ValueError()", "", "exec")
|
||||
|
@ -528,14 +552,12 @@ raise ValueError()
|
|||
if py.std.sys.version_info[0] >= 3:
|
||||
assert repr.chain[0][0].reprentries[0].lines[0] == "> ???"
|
||||
|
||||
|
||||
fail = py.error.ENOENT # noqa
|
||||
repr = pr.repr_excinfo(excinfo)
|
||||
assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
|
||||
if py.std.sys.version_info[0] >= 3:
|
||||
assert repr.chain[0][0].reprentries[0].lines[0] == "> ???"
|
||||
|
||||
|
||||
def test_repr_local(self):
|
||||
p = FormattedExcinfo(showlocals=True)
|
||||
loc = {'y': 5, 'z': 7, 'x': 3, '@x': 2, '__builtins__': {}}
|
||||
|
@ -969,7 +991,8 @@ raise ValueError()
|
|||
r = excinfo.getrepr(style="long")
|
||||
tw = TWMock()
|
||||
r.toterminal(tw)
|
||||
for line in tw.lines: print (line)
|
||||
for line in tw.lines:
|
||||
print (line)
|
||||
assert tw.lines[0] == ""
|
||||
assert tw.lines[1] == " def f():"
|
||||
assert tw.lines[2] == "> g()"
|
||||
|
@ -1016,7 +1039,8 @@ raise ValueError()
|
|||
r = excinfo.getrepr(style="long")
|
||||
tw = TWMock()
|
||||
r.toterminal(tw)
|
||||
for line in tw.lines: print (line)
|
||||
for line in tw.lines:
|
||||
print (line)
|
||||
assert tw.lines[0] == ""
|
||||
assert tw.lines[1] == " def f():"
|
||||
assert tw.lines[2] == " try:"
|
||||
|
|
|
@ -17,6 +17,7 @@ else:
|
|||
|
||||
failsonjython = pytest.mark.xfail("sys.platform.startswith('java')")
|
||||
|
||||
|
||||
def test_source_str_function():
|
||||
x = Source("3")
|
||||
assert str(x) == "3"
|
||||
|
@ -34,6 +35,7 @@ def test_source_str_function():
|
|||
""", rstrip=True)
|
||||
assert str(x) == "\n3"
|
||||
|
||||
|
||||
def test_unicode():
|
||||
try:
|
||||
unicode
|
||||
|
@ -45,10 +47,12 @@ def test_unicode():
|
|||
val = eval(co)
|
||||
assert isinstance(val, unicode)
|
||||
|
||||
|
||||
def test_source_from_function():
|
||||
source = _pytest._code.Source(test_source_str_function)
|
||||
assert str(source).startswith('def test_source_str_function():')
|
||||
|
||||
|
||||
def test_source_from_method():
|
||||
class TestClass(object):
|
||||
def test_method(self):
|
||||
|
@ -57,11 +61,13 @@ def test_source_from_method():
|
|||
assert source.lines == ["def test_method(self):",
|
||||
" pass"]
|
||||
|
||||
|
||||
def test_source_from_lines():
|
||||
lines = ["a \n", "b\n", "c"]
|
||||
source = _pytest._code.Source(lines)
|
||||
assert source.lines == ['a ', 'b', 'c']
|
||||
|
||||
|
||||
def test_source_from_inner_function():
|
||||
def f():
|
||||
pass
|
||||
|
@ -70,6 +76,7 @@ def test_source_from_inner_function():
|
|||
source = _pytest._code.Source(f)
|
||||
assert str(source).startswith('def f():')
|
||||
|
||||
|
||||
def test_source_putaround_simple():
|
||||
source = Source("raise ValueError")
|
||||
source = source.putaround(
|
||||
|
@ -86,6 +93,7 @@ except ValueError:
|
|||
else:
|
||||
x = 23"""
|
||||
|
||||
|
||||
def test_source_putaround():
|
||||
source = Source()
|
||||
source = source.putaround("""
|
||||
|
@ -94,24 +102,28 @@ def test_source_putaround():
|
|||
""")
|
||||
assert str(source).strip() == "if 1:\n x=1"
|
||||
|
||||
|
||||
def test_source_strips():
|
||||
source = Source("")
|
||||
assert source == Source()
|
||||
assert str(source) == ''
|
||||
assert source.strip() == source
|
||||
|
||||
|
||||
def test_source_strip_multiline():
|
||||
source = Source()
|
||||
source.lines = ["", " hello", " "]
|
||||
source2 = source.strip()
|
||||
assert source2.lines == [" hello"]
|
||||
|
||||
|
||||
def test_syntaxerror_rerepresentation():
|
||||
ex = pytest.raises(SyntaxError, _pytest._code.compile, 'xyz xyz')
|
||||
assert ex.value.lineno == 1
|
||||
assert ex.value.offset in (4, 7) # XXX pypy/jython versus cpython?
|
||||
assert ex.value.text.strip(), 'x x'
|
||||
|
||||
|
||||
def test_isparseable():
|
||||
assert Source("hello").isparseable()
|
||||
assert Source("if 1:\n pass").isparseable()
|
||||
|
@ -120,6 +132,7 @@ def test_isparseable():
|
|||
assert not Source(" \nif 1:\npass").isparseable()
|
||||
assert not Source(chr(0)).isparseable()
|
||||
|
||||
|
||||
class TestAccesses(object):
|
||||
source = Source("""\
|
||||
def f(x):
|
||||
|
@ -127,6 +140,7 @@ class TestAccesses(object):
|
|||
def g(x):
|
||||
pass
|
||||
""")
|
||||
|
||||
def test_getrange(self):
|
||||
x = self.source[0:2]
|
||||
assert x.isparseable()
|
||||
|
@ -144,6 +158,7 @@ class TestAccesses(object):
|
|||
l = [x for x in self.source]
|
||||
assert len(l) == 4
|
||||
|
||||
|
||||
class TestSourceParsingAndCompiling(object):
|
||||
source = Source("""\
|
||||
def f(x):
|
||||
|
@ -307,6 +322,7 @@ class TestSourceParsingAndCompiling(object):
|
|||
def test_offsetless_synerr(self):
|
||||
pytest.raises(SyntaxError, _pytest._code.compile, "lambda a,a: 0", mode='eval')
|
||||
|
||||
|
||||
def test_getstartingblock_singleline():
|
||||
class A(object):
|
||||
def __init__(self, *args):
|
||||
|
@ -318,6 +334,7 @@ def test_getstartingblock_singleline():
|
|||
l = [i for i in x.source.lines if i.strip()]
|
||||
assert len(l) == 1
|
||||
|
||||
|
||||
def test_getstartingblock_multiline():
|
||||
class A(object):
|
||||
def __init__(self, *args):
|
||||
|
@ -325,13 +342,14 @@ def test_getstartingblock_multiline():
|
|||
self.source = _pytest._code.Frame(frame).statement
|
||||
|
||||
x = A('x',
|
||||
'y' \
|
||||
'y'
|
||||
,
|
||||
'z')
|
||||
|
||||
l = [i for i in x.source.lines if i.strip()]
|
||||
assert len(l) == 4
|
||||
|
||||
|
||||
def test_getline_finally():
|
||||
def c(): pass
|
||||
excinfo = pytest.raises(TypeError, """
|
||||
|
@ -345,6 +363,7 @@ def test_getline_finally():
|
|||
source = excinfo.traceback[-1].statement
|
||||
assert str(source).strip() == 'c(1)'
|
||||
|
||||
|
||||
def test_getfuncsource_dynamic():
|
||||
source = """
|
||||
def f():
|
||||
|
@ -386,6 +405,7 @@ def test_deindent():
|
|||
lines = deindent(source.splitlines())
|
||||
assert lines == ['', 'def f():', ' def g():', ' pass', ' ']
|
||||
|
||||
|
||||
@pytest.mark.xfail("sys.version_info[:3] < (2,7,0)")
|
||||
def test_source_of_class_at_eof_without_newline(tmpdir):
|
||||
# this test fails because the implicit inspect.getsource(A) below
|
||||
|
@ -400,10 +420,12 @@ def test_source_of_class_at_eof_without_newline(tmpdir):
|
|||
s2 = _pytest._code.Source(tmpdir.join("a.py").pyimport().A)
|
||||
assert str(source).strip() == str(s2).strip()
|
||||
|
||||
|
||||
if True:
|
||||
def x():
|
||||
pass
|
||||
|
||||
|
||||
def test_getsource_fallback():
|
||||
from _pytest._code.source import getsource
|
||||
expected = """def x():
|
||||
|
@ -411,6 +433,7 @@ def test_getsource_fallback():
|
|||
src = getsource(x)
|
||||
assert src == expected
|
||||
|
||||
|
||||
def test_idem_compile_and_getsource():
|
||||
from _pytest._code.source import getsource
|
||||
expected = "def x(): pass"
|
||||
|
@ -418,12 +441,14 @@ def test_idem_compile_and_getsource():
|
|||
src = getsource(co)
|
||||
assert src == expected
|
||||
|
||||
|
||||
def test_findsource_fallback():
|
||||
from _pytest._code.source import findsource
|
||||
src, lineno = findsource(x)
|
||||
assert 'test_findsource_simple' in str(src)
|
||||
assert src[lineno] == ' def x():'
|
||||
|
||||
|
||||
def test_findsource():
|
||||
from _pytest._code.source import findsource
|
||||
co = _pytest._code.compile("""if 1:
|
||||
|
@ -462,15 +487,18 @@ def test_getfslineno():
|
|||
assert lineno == A_lineno
|
||||
|
||||
assert getfslineno(3) == ("", -1)
|
||||
|
||||
class B(object):
|
||||
pass
|
||||
B.__name__ = "B2"
|
||||
assert getfslineno(B)[1] == -1
|
||||
|
||||
|
||||
def test_code_of_object_instance_with_call():
|
||||
class A(object):
|
||||
pass
|
||||
pytest.raises(TypeError, lambda: _pytest._code.Source(A()))
|
||||
|
||||
class WithCall(object):
|
||||
def __call__(self):
|
||||
pass
|
||||
|
@ -490,10 +518,12 @@ def getstatement(lineno, source):
|
|||
ast, start, end = getstatementrange_ast(lineno, source)
|
||||
return source[start:end]
|
||||
|
||||
|
||||
def test_oneline():
|
||||
source = getstatement(0, "raise ValueError")
|
||||
assert str(source) == "raise ValueError"
|
||||
|
||||
|
||||
def test_comment_and_no_newline_at_end():
|
||||
from _pytest._code.source import getstatementrange_ast
|
||||
source = Source(['def test_basic_complex():',
|
||||
|
@ -502,10 +532,12 @@ def test_comment_and_no_newline_at_end():
|
|||
ast, start, end = getstatementrange_ast(1, source)
|
||||
assert end == 2
|
||||
|
||||
|
||||
def test_oneline_and_comment():
|
||||
source = getstatement(0, "raise ValueError\n#hello")
|
||||
assert str(source) == "raise ValueError"
|
||||
|
||||
|
||||
@pytest.mark.xfail(hasattr(sys, "pypy_version_info"),
|
||||
reason='does not work on pypy')
|
||||
def test_comments():
|
||||
|
@ -527,6 +559,7 @@ comment 4
|
|||
assert str(getstatement(line, source)) == ' assert False'
|
||||
assert str(getstatement(10, source)) == '"""'
|
||||
|
||||
|
||||
def test_comment_in_statement():
|
||||
source = '''test(foo=1,
|
||||
# comment 1
|
||||
|
@ -536,14 +569,17 @@ def test_comment_in_statement():
|
|||
assert str(getstatement(line, source)) == \
|
||||
'test(foo=1,\n # comment 1\n bar=2)'
|
||||
|
||||
|
||||
def test_single_line_else():
|
||||
source = getstatement(1, "if False: 2\nelse: 3")
|
||||
assert str(source) == "else: 3"
|
||||
|
||||
|
||||
def test_single_line_finally():
|
||||
source = getstatement(1, "try: 1\nfinally: 3")
|
||||
assert str(source) == "finally: 3"
|
||||
|
||||
|
||||
def test_issue55():
|
||||
source = ('def round_trip(dinp):\n assert 1 == dinp\n'
|
||||
'def test_rt():\n round_trip("""\n""")\n')
|
||||
|
@ -560,6 +596,7 @@ x = 3
|
|||
""")
|
||||
assert str(source) == "raise ValueError(\n 23\n)"
|
||||
|
||||
|
||||
class TestTry(object):
|
||||
pytestmark = astonly
|
||||
source = """\
|
||||
|
@ -587,6 +624,7 @@ else:
|
|||
source = getstatement(5, self.source)
|
||||
assert str(source) == " raise KeyError()"
|
||||
|
||||
|
||||
class TestTryFinally(object):
|
||||
source = """\
|
||||
try:
|
||||
|
@ -604,7 +642,6 @@ finally:
|
|||
assert str(source) == " raise IndexError(1)"
|
||||
|
||||
|
||||
|
||||
class TestIf(object):
|
||||
pytestmark = astonly
|
||||
source = """\
|
||||
|
@ -632,6 +669,7 @@ else:
|
|||
source = getstatement(5, self.source)
|
||||
assert str(source) == " y = 7"
|
||||
|
||||
|
||||
def test_semicolon():
|
||||
s = """\
|
||||
hello ; pytest.skip()
|
||||
|
@ -639,6 +677,7 @@ hello ; pytest.skip()
|
|||
source = getstatement(0, s)
|
||||
assert str(source) == s.strip()
|
||||
|
||||
|
||||
def test_def_online():
|
||||
s = """\
|
||||
def func(): raise ValueError(42)
|
||||
|
@ -649,6 +688,7 @@ def something():
|
|||
source = getstatement(0, s)
|
||||
assert str(source) == "def func(): raise ValueError(42)"
|
||||
|
||||
|
||||
def XXX_test_expression_multiline():
|
||||
source = """\
|
||||
something
|
||||
|
|
|
@ -9,12 +9,16 @@ def test_yield_tests_deprecation(testdir):
|
|||
def test_gen():
|
||||
yield "m1", func1, 15, 3*5
|
||||
yield "m2", func1, 42, 6*7
|
||||
def test_gen2():
|
||||
for k in range(10):
|
||||
yield func1, 1, 1
|
||||
""")
|
||||
result = testdir.runpytest('-ra')
|
||||
result.stdout.fnmatch_lines([
|
||||
'*yield tests are deprecated, and scheduled to be removed in pytest 4.0*',
|
||||
'*2 passed*',
|
||||
])
|
||||
assert result.stdout.str().count('yield tests are deprecated') == 2
|
||||
|
||||
|
||||
def test_funcarg_prefix_deprecation(testdir):
|
||||
|
|
|
@ -10,4 +10,3 @@ if __name__ == '__main__':
|
|||
hidden.extend(['--hidden-import', x])
|
||||
args = ['pyinstaller', '--noconfirm'] + hidden + ['runtests_script.py']
|
||||
subprocess.check_call(' '.join(args), shell=True)
|
||||
|
||||
|
|
|
@ -2,5 +2,6 @@
|
|||
def test_upper():
|
||||
assert 'foo'.upper() == 'FOO'
|
||||
|
||||
|
||||
def test_lower():
|
||||
assert 'FOO'.lower() == 'foo'
|
|
@ -28,8 +28,8 @@ class TestApprox(object):
|
|||
if sys.version_info[:2] == (2, 6):
|
||||
tol1, tol2, infr = '???', '???', '???'
|
||||
assert repr(approx(1.0)) == '1.0 {pm} {tol1}'.format(pm=plus_minus, tol1=tol1)
|
||||
assert repr(approx([1.0, 2.0])) == 'approx([1.0 {pm} {tol1}, 2.0 {pm} {tol2}])'.format(pm=plus_minus, tol1=tol1, tol2=tol2)
|
||||
assert repr(approx((1.0, 2.0))) == 'approx((1.0 {pm} {tol1}, 2.0 {pm} {tol2}))'.format(pm=plus_minus, tol1=tol1, tol2=tol2)
|
||||
assert repr(approx([1.0, 2.0])) == '1.0 {pm} {tol1}, 2.0 {pm} {tol2}'.format(
|
||||
pm=plus_minus, tol1=tol1, tol2=tol2)
|
||||
assert repr(approx(inf)) == 'inf'
|
||||
assert repr(approx(1.0, rel=nan)) == '1.0 {pm} ???'.format(pm=plus_minus)
|
||||
assert repr(approx(1.0, rel=inf)) == '1.0 {pm} {infr}'.format(pm=plus_minus, infr=infr)
|
||||
|
@ -379,4 +379,3 @@ class TestApprox(object):
|
|||
'*At index 0 diff: 3 != 4 * {0}'.format(expected),
|
||||
'=* 1 failed in *=',
|
||||
])
|
||||
|
||||
|
|
|
@ -496,7 +496,6 @@ class TestFunction(object):
|
|||
rec = testdir.inline_run()
|
||||
rec.assertoutcome(passed=2)
|
||||
|
||||
|
||||
def test_parametrize_with_non_hashable_values_indirect(self, testdir):
|
||||
"""Test parametrization with non-hashable values with indirect parametrization."""
|
||||
testdir.makepyfile("""
|
||||
|
@ -524,7 +523,6 @@ class TestFunction(object):
|
|||
rec = testdir.inline_run()
|
||||
rec.assertoutcome(passed=2)
|
||||
|
||||
|
||||
def test_parametrize_overrides_fixture(self, testdir):
|
||||
"""Test parametrization when parameter overrides existing fixture with same name."""
|
||||
testdir.makepyfile("""
|
||||
|
@ -552,7 +550,6 @@ class TestFunction(object):
|
|||
rec = testdir.inline_run()
|
||||
rec.assertoutcome(passed=3)
|
||||
|
||||
|
||||
def test_parametrize_overrides_parametrized_fixture(self, testdir):
|
||||
"""Test parametrization when parameter overrides existing parametrized fixture with same name."""
|
||||
testdir.makepyfile("""
|
||||
|
@ -890,6 +887,7 @@ class TestConftestCustomization(object):
|
|||
result = testdir.runpytest_subprocess()
|
||||
result.stdout.fnmatch_lines('*1 passed*')
|
||||
|
||||
|
||||
def test_setup_only_available_in_subdir(testdir):
|
||||
sub1 = testdir.mkpydir("sub1")
|
||||
sub2 = testdir.mkpydir("sub2")
|
||||
|
@ -916,6 +914,7 @@ def test_setup_only_available_in_subdir(testdir):
|
|||
result = testdir.runpytest("-v", "-s")
|
||||
result.assert_outcomes(passed=2)
|
||||
|
||||
|
||||
def test_modulecol_roundtrip(testdir):
|
||||
modcol = testdir.getmodulecol("pass", withinit=True)
|
||||
trail = modcol.nodeid
|
||||
|
@ -1200,6 +1199,7 @@ def test_collector_attributes(testdir):
|
|||
"*1 passed*",
|
||||
])
|
||||
|
||||
|
||||
def test_customize_through_attributes(testdir):
|
||||
testdir.makeconftest("""
|
||||
import pytest
|
||||
|
@ -1369,7 +1369,6 @@ def test_skip_duplicates_by_default(testdir):
|
|||
])
|
||||
|
||||
|
||||
|
||||
def test_keep_duplicates(testdir):
|
||||
"""Test for issue https://github.com/pytest-dev/pytest/issues/1609 (#1609)
|
||||
|
||||
|
|
|
@ -7,17 +7,22 @@ from _pytest.pytester import get_public_names
|
|||
from _pytest.fixtures import FixtureLookupError
|
||||
from _pytest import fixtures
|
||||
|
||||
|
||||
def test_getfuncargnames():
|
||||
def f(): pass
|
||||
def f():
|
||||
pass
|
||||
assert not fixtures.getfuncargnames(f)
|
||||
|
||||
def g(arg): pass
|
||||
def g(arg):
|
||||
pass
|
||||
assert fixtures.getfuncargnames(g) == ('arg',)
|
||||
|
||||
def h(arg1, arg2="hello"): pass
|
||||
def h(arg1, arg2="hello"):
|
||||
pass
|
||||
assert fixtures.getfuncargnames(h) == ('arg1',)
|
||||
|
||||
def h(arg1, arg2, arg3="hello"): pass
|
||||
def h(arg1, arg2, arg3="hello"):
|
||||
pass
|
||||
assert fixtures.getfuncargnames(h) == ('arg1', 'arg2')
|
||||
|
||||
class A(object):
|
||||
|
@ -28,6 +33,7 @@ def test_getfuncargnames():
|
|||
if sys.version_info < (3, 0):
|
||||
assert fixtures.getfuncargnames(A.f) == ('arg1',)
|
||||
|
||||
|
||||
class TestFillFixtures(object):
|
||||
def test_fillfuncargs_exposed(self):
|
||||
# used by oejskit, kept for compatibility
|
||||
|
@ -439,7 +445,6 @@ class TestFillFixtures(object):
|
|||
])
|
||||
assert "INTERNAL" not in result.stdout.str()
|
||||
|
||||
|
||||
def test_fixture_excinfo_leak(self, testdir):
|
||||
# on python2 sys.excinfo would leak into fixture executions
|
||||
testdir.makepyfile("""
|
||||
|
@ -551,7 +556,8 @@ class TestRequestBasic(object):
|
|||
else:
|
||||
# see #1830 for a cleaner way to accomplish this
|
||||
@contextlib.contextmanager
|
||||
def expecting_no_warning(): yield
|
||||
def expecting_no_warning():
|
||||
yield
|
||||
|
||||
warning_expectation = expecting_no_warning()
|
||||
|
||||
|
@ -639,7 +645,6 @@ class TestRequestBasic(object):
|
|||
mod = reprec.getcalls("pytest_runtest_setup")[0].item.module
|
||||
assert not mod.l
|
||||
|
||||
|
||||
def test_request_addfinalizer_partial_setup_failure(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import pytest
|
||||
|
@ -815,6 +820,7 @@ class TestRequestBasic(object):
|
|||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=2)
|
||||
|
||||
|
||||
class TestRequestMarking(object):
|
||||
def test_applymarker(self, testdir):
|
||||
item1, item2 = testdir.getitems("""
|
||||
|
@ -875,6 +881,7 @@ class TestRequestMarking(object):
|
|||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=2)
|
||||
|
||||
|
||||
class TestRequestCachedSetup(object):
|
||||
def test_request_cachedsetup_defaultmodule(self, testdir):
|
||||
reprec = testdir.inline_runsource("""
|
||||
|
@ -1040,6 +1047,7 @@ class TestRequestCachedSetup(object):
|
|||
"*ZeroDivisionError*",
|
||||
])
|
||||
|
||||
|
||||
class TestFixtureUsages(object):
|
||||
def test_noargfixturedec(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
|
@ -1598,8 +1606,6 @@ class TestAutouseManagement(object):
|
|||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=2)
|
||||
|
||||
|
||||
|
||||
def test_funcarg_and_setup(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
@ -2401,7 +2407,6 @@ class TestFixtureMarker(object):
|
|||
reprec = testdir.inline_run("-v")
|
||||
reprec.assertoutcome(passed=5)
|
||||
|
||||
|
||||
@pytest.mark.issue246
|
||||
@pytest.mark.parametrize("scope", ["session", "function", "module"])
|
||||
def test_finalizer_order_on_parametrization(self, scope, testdir):
|
||||
|
@ -2587,6 +2592,7 @@ class TestRequestScopeAccess(object):
|
|||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
|
||||
class TestErrors(object):
|
||||
def test_subfactory_missing_funcarg(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
|
@ -2632,8 +2638,6 @@ class TestErrors(object):
|
|||
*3 pass*2 error*
|
||||
""")
|
||||
|
||||
|
||||
|
||||
def test_setupfunc_missing_funcarg(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
@ -2651,6 +2655,7 @@ class TestErrors(object):
|
|||
"*1 error*",
|
||||
])
|
||||
|
||||
|
||||
class TestShowFixtures(object):
|
||||
def test_funcarg_compat(self, testdir):
|
||||
config = testdir.parseconfigure("--funcargs")
|
||||
|
@ -2661,16 +2666,14 @@ class TestShowFixtures(object):
|
|||
result.stdout.fnmatch_lines([
|
||||
"*tmpdir*",
|
||||
"*temporary directory*",
|
||||
]
|
||||
)
|
||||
])
|
||||
|
||||
def test_show_fixtures_verbose(self, testdir):
|
||||
result = testdir.runpytest("--fixtures", "-v")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*tmpdir*--*tmpdir.py*",
|
||||
"*temporary directory*",
|
||||
]
|
||||
)
|
||||
])
|
||||
|
||||
def test_show_fixtures_testmodule(self, testdir):
|
||||
p = testdir.makepyfile('''
|
||||
|
@ -2742,7 +2745,6 @@ class TestShowFixtures(object):
|
|||
|
||||
""")
|
||||
|
||||
|
||||
def test_show_fixtures_different_files(self, testdir):
|
||||
"""
|
||||
#833: --fixtures only shows fixtures from first file
|
||||
|
@ -2921,6 +2923,7 @@ class TestContextManagerFixtureFuncs(object):
|
|||
result = testdir.runpytest("-s")
|
||||
result.stdout.fnmatch_lines("*mew*")
|
||||
|
||||
|
||||
class TestParameterizedSubRequest(object):
|
||||
def test_call_from_fixture(self, testdir):
|
||||
testfile = testdir.makepyfile("""
|
||||
|
@ -3026,6 +3029,3 @@ class TestParameterizedSubRequest(object):
|
|||
E*{1}:5
|
||||
*1 failed*
|
||||
""".format(fixfile.strpath, testfile.basename))
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -76,6 +76,7 @@ def test_wrapped_getfslineno():
|
|||
fs2, lineno2 = python.getfslineno(wrap)
|
||||
assert lineno > lineno2, "getfslineno does not unwrap correctly"
|
||||
|
||||
|
||||
class TestMockDecoration(object):
|
||||
def test_wrapped_getfuncargnames(self):
|
||||
from _pytest.compat import getfuncargnames
|
||||
|
@ -246,6 +247,7 @@ class TestReRunTests(object):
|
|||
*2 passed*
|
||||
""")
|
||||
|
||||
|
||||
def test_pytestconfig_is_session_scoped():
|
||||
from _pytest.fixtures import pytestconfig
|
||||
assert pytestconfig._pytestfixturefunction.scope == "session"
|
||||
|
|
|
@ -29,13 +29,15 @@ class TestMetafunc(object):
|
|||
return python.Metafunc(func, fixtureinfo, None)
|
||||
|
||||
def test_no_funcargs(self, testdir):
|
||||
def function(): pass
|
||||
def function():
|
||||
pass
|
||||
metafunc = self.Metafunc(function)
|
||||
assert not metafunc.fixturenames
|
||||
repr(metafunc._calls)
|
||||
|
||||
def test_function_basic(self):
|
||||
def func(arg1, arg2="qwe"): pass
|
||||
def func(arg1, arg2="qwe"):
|
||||
pass
|
||||
metafunc = self.Metafunc(func)
|
||||
assert len(metafunc.fixturenames) == 1
|
||||
assert 'arg1' in metafunc.fixturenames
|
||||
|
@ -43,7 +45,8 @@ class TestMetafunc(object):
|
|||
assert metafunc.cls is None
|
||||
|
||||
def test_addcall_no_args(self):
|
||||
def func(arg1): pass
|
||||
def func(arg1):
|
||||
pass
|
||||
metafunc = self.Metafunc(func)
|
||||
metafunc.addcall()
|
||||
assert len(metafunc._calls) == 1
|
||||
|
@ -52,7 +55,8 @@ class TestMetafunc(object):
|
|||
assert not hasattr(call, 'param')
|
||||
|
||||
def test_addcall_id(self):
|
||||
def func(arg1): pass
|
||||
def func(arg1):
|
||||
pass
|
||||
metafunc = self.Metafunc(func)
|
||||
pytest.raises(ValueError, "metafunc.addcall(id=None)")
|
||||
|
||||
|
@ -65,10 +69,12 @@ class TestMetafunc(object):
|
|||
assert metafunc._calls[1].id == "2"
|
||||
|
||||
def test_addcall_param(self):
|
||||
def func(arg1): pass
|
||||
def func(arg1):
|
||||
pass
|
||||
metafunc = self.Metafunc(func)
|
||||
|
||||
class obj(object): pass
|
||||
class obj(object):
|
||||
pass
|
||||
|
||||
metafunc.addcall(param=obj)
|
||||
metafunc.addcall(param=obj)
|
||||
|
@ -79,11 +85,13 @@ class TestMetafunc(object):
|
|||
assert metafunc._calls[2].getparam("arg1") == 1
|
||||
|
||||
def test_addcall_funcargs(self):
|
||||
def func(x): pass
|
||||
def func(x):
|
||||
pass
|
||||
|
||||
metafunc = self.Metafunc(func)
|
||||
|
||||
class obj(object): pass
|
||||
class obj(object):
|
||||
pass
|
||||
|
||||
metafunc.addcall(funcargs={"x": 2})
|
||||
metafunc.addcall(funcargs={"x": 3})
|
||||
|
@ -94,7 +102,8 @@ class TestMetafunc(object):
|
|||
assert not hasattr(metafunc._calls[1], 'param')
|
||||
|
||||
def test_parametrize_error(self):
|
||||
def func(x, y): pass
|
||||
def func(x, y):
|
||||
pass
|
||||
metafunc = self.Metafunc(func)
|
||||
metafunc.parametrize("x", [1, 2])
|
||||
pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5, 6]))
|
||||
|
@ -104,7 +113,8 @@ class TestMetafunc(object):
|
|||
pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5, 6]))
|
||||
|
||||
def test_parametrize_bad_scope(self, testdir):
|
||||
def func(x): pass
|
||||
def func(x):
|
||||
pass
|
||||
metafunc = self.Metafunc(func)
|
||||
try:
|
||||
metafunc.parametrize("x", [1], scope='doggy')
|
||||
|
@ -112,7 +122,8 @@ class TestMetafunc(object):
|
|||
assert "has an unsupported scope value 'doggy'" in str(ve)
|
||||
|
||||
def test_parametrize_and_id(self):
|
||||
def func(x, y): pass
|
||||
def func(x, y):
|
||||
pass
|
||||
metafunc = self.Metafunc(func)
|
||||
|
||||
metafunc.parametrize("x", [1, 2], ids=['basic', 'advanced'])
|
||||
|
@ -122,14 +133,16 @@ class TestMetafunc(object):
|
|||
|
||||
def test_parametrize_and_id_unicode(self):
|
||||
"""Allow unicode strings for "ids" parameter in Python 2 (##1905)"""
|
||||
def func(x): pass
|
||||
def func(x):
|
||||
pass
|
||||
metafunc = self.Metafunc(func)
|
||||
metafunc.parametrize("x", [1, 2], ids=[u'basic', u'advanced'])
|
||||
ids = [x.id for x in metafunc._calls]
|
||||
assert ids == [u"basic", u"advanced"]
|
||||
|
||||
def test_parametrize_with_wrong_number_of_ids(self, testdir):
|
||||
def func(x, y): pass
|
||||
def func(x, y):
|
||||
pass
|
||||
metafunc = self.Metafunc(func)
|
||||
|
||||
pytest.raises(ValueError, lambda:
|
||||
|
@ -141,13 +154,15 @@ class TestMetafunc(object):
|
|||
|
||||
@pytest.mark.issue510
|
||||
def test_parametrize_empty_list(self):
|
||||
def func( y): pass
|
||||
def func(y):
|
||||
pass
|
||||
metafunc = self.Metafunc(func)
|
||||
metafunc.parametrize("y", [])
|
||||
assert 'skip' in metafunc._calls[0].keywords
|
||||
|
||||
def test_parametrize_with_userobjects(self):
|
||||
def func(x, y): pass
|
||||
def func(x, y):
|
||||
pass
|
||||
metafunc = self.Metafunc(func)
|
||||
|
||||
class A(object):
|
||||
|
@ -178,11 +193,27 @@ class TestMetafunc(object):
|
|||
"""
|
||||
from _pytest.python import _idval
|
||||
values = [
|
||||
(u'', ''),
|
||||
(u'ascii', 'ascii'),
|
||||
(u'ação', 'a\\xe7\\xe3o'),
|
||||
(u'josé@blah.com', 'jos\\xe9@blah.com'),
|
||||
(u'δοκ.ιμή@παράδειγμα.δοκιμή', '\\u03b4\\u03bf\\u03ba.\\u03b9\\u03bc\\u03ae@\\u03c0\\u03b1\\u03c1\\u03ac\\u03b4\\u03b5\\u03b9\\u03b3\\u03bc\\u03b1.\\u03b4\\u03bf\\u03ba\\u03b9\\u03bc\\u03ae'),
|
||||
(
|
||||
u'',
|
||||
''
|
||||
),
|
||||
(
|
||||
u'ascii',
|
||||
'ascii'
|
||||
),
|
||||
(
|
||||
u'ação',
|
||||
'a\\xe7\\xe3o'
|
||||
),
|
||||
(
|
||||
u'josé@blah.com',
|
||||
'jos\\xe9@blah.com'
|
||||
),
|
||||
(
|
||||
u'δοκ.ιμή@παράδειγμα.δοκιμή',
|
||||
'\\u03b4\\u03bf\\u03ba.\\u03b9\\u03bc\\u03ae@\\u03c0\\u03b1\\u03c1\\u03ac\\u03b4\\u03b5\\u03b9\\u03b3'
|
||||
'\\u03bc\\u03b1.\\u03b4\\u03bf\\u03ba\\u03b9\\u03bc\\u03ae'
|
||||
),
|
||||
]
|
||||
for val, expected in values:
|
||||
assert _idval(val, 'a', 6, None) == expected
|
||||
|
@ -331,7 +362,6 @@ class TestMetafunc(object):
|
|||
"\nUpdate your code as this will raise an error in pytest-4.0.",
|
||||
]
|
||||
|
||||
|
||||
def test_parametrize_ids_exception(self, testdir):
|
||||
"""
|
||||
:param testdir: the instance of Testdir class, a temporary
|
||||
|
@ -376,7 +406,8 @@ class TestMetafunc(object):
|
|||
assert result == ["a0", "a1", "b0", "c", "b1"]
|
||||
|
||||
def test_addcall_and_parametrize(self):
|
||||
def func(x, y): pass
|
||||
def func(x, y):
|
||||
pass
|
||||
metafunc = self.Metafunc(func)
|
||||
metafunc.addcall({'x': 1})
|
||||
metafunc.parametrize('y', [2, 3])
|
||||
|
@ -388,7 +419,8 @@ class TestMetafunc(object):
|
|||
|
||||
@pytest.mark.issue714
|
||||
def test_parametrize_indirect(self):
|
||||
def func(x, y): pass
|
||||
def func(x, y):
|
||||
pass
|
||||
metafunc = self.Metafunc(func)
|
||||
metafunc.parametrize('x', [1], indirect=True)
|
||||
metafunc.parametrize('y', [2, 3], indirect=True)
|
||||
|
@ -400,7 +432,8 @@ class TestMetafunc(object):
|
|||
|
||||
@pytest.mark.issue714
|
||||
def test_parametrize_indirect_list(self):
|
||||
def func(x, y): pass
|
||||
def func(x, y):
|
||||
pass
|
||||
metafunc = self.Metafunc(func)
|
||||
metafunc.parametrize('x, y', [('a', 'b')], indirect=['x'])
|
||||
assert metafunc._calls[0].funcargs == dict(y='b')
|
||||
|
@ -408,7 +441,8 @@ class TestMetafunc(object):
|
|||
|
||||
@pytest.mark.issue714
|
||||
def test_parametrize_indirect_list_all(self):
|
||||
def func(x, y): pass
|
||||
def func(x, y):
|
||||
pass
|
||||
metafunc = self.Metafunc(func)
|
||||
metafunc.parametrize('x, y', [('a', 'b')], indirect=['x', 'y'])
|
||||
assert metafunc._calls[0].funcargs == {}
|
||||
|
@ -416,7 +450,8 @@ class TestMetafunc(object):
|
|||
|
||||
@pytest.mark.issue714
|
||||
def test_parametrize_indirect_list_empty(self):
|
||||
def func(x, y): pass
|
||||
def func(x, y):
|
||||
pass
|
||||
metafunc = self.Metafunc(func)
|
||||
metafunc.parametrize('x, y', [('a', 'b')], indirect=[])
|
||||
assert metafunc._calls[0].funcargs == dict(x='a', y='b')
|
||||
|
@ -454,7 +489,8 @@ class TestMetafunc(object):
|
|||
|
||||
@pytest.mark.issue714
|
||||
def test_parametrize_indirect_list_error(self, testdir):
|
||||
def func(x, y): pass
|
||||
def func(x, y):
|
||||
pass
|
||||
metafunc = self.Metafunc(func)
|
||||
with pytest.raises(ValueError):
|
||||
metafunc.parametrize('x, y', [('a', 'b')], indirect=['x', 'z'])
|
||||
|
@ -550,7 +586,8 @@ class TestMetafunc(object):
|
|||
])
|
||||
|
||||
def test_addcalls_and_parametrize_indirect(self):
|
||||
def func(x, y): pass
|
||||
def func(x, y):
|
||||
pass
|
||||
metafunc = self.Metafunc(func)
|
||||
metafunc.addcall(param="123")
|
||||
metafunc.parametrize('x', [1], indirect=True)
|
||||
|
@ -672,16 +709,20 @@ class TestMetafunc(object):
|
|||
""")
|
||||
|
||||
def test_format_args(self):
|
||||
def function1(): pass
|
||||
def function1():
|
||||
pass
|
||||
assert fixtures._format_args(function1) == '()'
|
||||
|
||||
def function2(arg1): pass
|
||||
def function2(arg1):
|
||||
pass
|
||||
assert fixtures._format_args(function2) == "(arg1)"
|
||||
|
||||
def function3(arg1, arg2="qwe"): pass
|
||||
def function3(arg1, arg2="qwe"):
|
||||
pass
|
||||
assert fixtures._format_args(function3) == "(arg1, arg2='qwe')"
|
||||
|
||||
def function4(arg1, *args, **kwargs): pass
|
||||
def function4(arg1, *args, **kwargs):
|
||||
pass
|
||||
assert fixtures._format_args(function4) == "(arg1, *args, **kwargs)"
|
||||
|
||||
|
||||
|
@ -776,7 +817,6 @@ class TestMetafuncFunctional(object):
|
|||
result = testdir.runpytest(p)
|
||||
result.assert_outcomes(passed=1)
|
||||
|
||||
|
||||
def test_generate_plugin_and_module(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_generate_tests(metafunc):
|
||||
|
@ -1251,6 +1291,7 @@ class TestMetafuncFunctionalAuto(object):
|
|||
|
||||
class TestMarkersWithParametrization(object):
|
||||
pytestmark = pytest.mark.issue308
|
||||
|
||||
def test_simple_mark(self, testdir):
|
||||
s = """
|
||||
import pytest
|
||||
|
@ -1434,7 +1475,6 @@ class TestMarkersWithParametrization(object):
|
|||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=2, skipped=2)
|
||||
|
||||
|
||||
@pytest.mark.issue290
|
||||
def test_parametrize_ID_generation_string_int_works(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
|
@ -1451,7 +1491,6 @@ class TestMarkersWithParametrization(object):
|
|||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=2)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('strict', [True, False])
|
||||
def test_parametrize_marked_value(self, testdir, strict):
|
||||
s = """
|
||||
|
@ -1475,7 +1514,6 @@ class TestMarkersWithParametrization(object):
|
|||
passed, failed = (0, 2) if strict else (2, 0)
|
||||
reprec.assertoutcome(passed=passed, failed=failed)
|
||||
|
||||
|
||||
def test_pytest_make_parametrize_id(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_make_parametrize_id(config, val):
|
||||
|
|
|
@ -118,7 +118,6 @@ class TestRaises(object):
|
|||
for o in gc.get_objects():
|
||||
assert type(o) is not T
|
||||
|
||||
|
||||
def test_raises_match(self):
|
||||
msg = r"with base \d+"
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
from __future__ import absolute_import, division, print_function
|
||||
import py, pytest
|
||||
import py
|
||||
import pytest
|
||||
|
||||
# test for _argcomplete but not specific for any application
|
||||
|
||||
|
||||
def equal_with_bash(prefix, ffc, fc, out=None):
|
||||
res = ffc(prefix)
|
||||
res_bash = set(fc(prefix))
|
||||
|
@ -17,6 +19,8 @@ def equal_with_bash(prefix, ffc, fc, out=None):
|
|||
# copied from argcomplete.completers as import from there
|
||||
# also pulls in argcomplete.__init__ which opens filedescriptor 9
|
||||
# this gives an IOError at the end of testrun
|
||||
|
||||
|
||||
def _wrapcall(*args, **kargs):
|
||||
try:
|
||||
if py.std.sys.version_info > (2, 7):
|
||||
|
@ -36,8 +40,10 @@ def _wrapcall(*args, **kargs):
|
|||
except py.std.subprocess.CalledProcessError:
|
||||
return []
|
||||
|
||||
|
||||
class FilesCompleter(object):
|
||||
'File completer class, optionally takes a list of allowed extensions'
|
||||
|
||||
def __init__(self, allowednames=(), directories=True):
|
||||
# Fix if someone passes in a string instead of a list
|
||||
if type(allowednames) is str:
|
||||
|
@ -69,6 +75,7 @@ class FilesCompleter(object):
|
|||
completion += [f + '/' for f in anticomp]
|
||||
return completion
|
||||
|
||||
|
||||
class TestArgComplete(object):
|
||||
@pytest.mark.skipif("sys.platform in ('win32', 'darwin')")
|
||||
def test_compare_with_compgen(self):
|
||||
|
|
|
@ -283,6 +283,7 @@ class TestBinReprIntegration(object):
|
|||
"*test_check*PASS*",
|
||||
])
|
||||
|
||||
|
||||
def callequal(left, right, verbose=False):
|
||||
config = mock_config()
|
||||
config.verbose = verbose
|
||||
|
@ -712,6 +713,7 @@ def test_python25_compile_issue257(testdir):
|
|||
*1 failed*
|
||||
""")
|
||||
|
||||
|
||||
def test_rewritten(testdir):
|
||||
testdir.makepyfile("""
|
||||
def test_rewritten():
|
||||
|
@ -719,11 +721,13 @@ def test_rewritten(testdir):
|
|||
""")
|
||||
assert testdir.runpytest().ret == 0
|
||||
|
||||
|
||||
def test_reprcompare_notin(mock_config):
|
||||
detail = plugin.pytest_assertrepr_compare(
|
||||
mock_config, 'not in', 'foo', 'aaafoobbb')[1:]
|
||||
assert detail == ["'foo' is contained here:", ' aaafoobbb', '? +++']
|
||||
|
||||
|
||||
def test_pytest_assertrepr_compare_integration(testdir):
|
||||
testdir.makepyfile("""
|
||||
def test_hello():
|
||||
|
@ -740,6 +744,7 @@ def test_pytest_assertrepr_compare_integration(testdir):
|
|||
"*E*50*",
|
||||
])
|
||||
|
||||
|
||||
def test_sequence_comparison_uses_repr(testdir):
|
||||
testdir.makepyfile("""
|
||||
def test_hello():
|
||||
|
@ -791,6 +796,7 @@ def test_assertion_options(testdir):
|
|||
result = testdir.runpytest_subprocess("--assert=plain")
|
||||
assert "3 == 4" not in result.stdout.str()
|
||||
|
||||
|
||||
def test_triple_quoted_string_issue113(testdir):
|
||||
testdir.makepyfile("""
|
||||
def test_hello():
|
||||
|
@ -802,6 +808,7 @@ def test_triple_quoted_string_issue113(testdir):
|
|||
])
|
||||
assert 'SyntaxError' not in result.stdout.str()
|
||||
|
||||
|
||||
def test_traceback_failure(testdir):
|
||||
p1 = testdir.makepyfile("""
|
||||
def g():
|
||||
|
@ -893,6 +900,7 @@ def test_warn_missing(testdir):
|
|||
"*WARNING*assert statements are not executed*",
|
||||
])
|
||||
|
||||
|
||||
def test_recursion_source_decode(testdir):
|
||||
testdir.makepyfile("""
|
||||
def test_something():
|
||||
|
@ -907,6 +915,7 @@ def test_recursion_source_decode(testdir):
|
|||
<Module*>
|
||||
""")
|
||||
|
||||
|
||||
def test_AssertionError_message(testdir):
|
||||
testdir.makepyfile("""
|
||||
def test_hello():
|
||||
|
@ -920,6 +929,7 @@ def test_AssertionError_message(testdir):
|
|||
*AssertionError: (1, 2)*
|
||||
""")
|
||||
|
||||
|
||||
@pytest.mark.skipif(PY3, reason='This bug does not exist on PY3')
|
||||
def test_set_with_unsortable_elements():
|
||||
# issue #718
|
||||
|
@ -956,6 +966,7 @@ def test_set_with_unsortable_elements():
|
|||
""").strip()
|
||||
assert '\n'.join(expl) == dedent
|
||||
|
||||
|
||||
def test_diff_newline_at_end(monkeypatch, testdir):
|
||||
testdir.makepyfile(r"""
|
||||
def test_diff():
|
||||
|
@ -970,6 +981,7 @@ def test_diff_newline_at_end(monkeypatch, testdir):
|
|||
* ? +
|
||||
""")
|
||||
|
||||
|
||||
def test_assert_tuple_warning(testdir):
|
||||
testdir.makepyfile("""
|
||||
def test_tuple():
|
||||
|
@ -981,6 +993,7 @@ def test_assert_tuple_warning(testdir):
|
|||
'*assertion is always true*',
|
||||
])
|
||||
|
||||
|
||||
def test_assert_indirect_tuple_no_warning(testdir):
|
||||
testdir.makepyfile("""
|
||||
def test_tuple():
|
||||
|
@ -991,6 +1004,7 @@ def test_assert_indirect_tuple_no_warning(testdir):
|
|||
output = '\n'.join(result.stdout.lines)
|
||||
assert 'WR1' not in output
|
||||
|
||||
|
||||
def test_assert_with_unicode(monkeypatch, testdir):
|
||||
testdir.makepyfile(u"""
|
||||
# -*- coding: utf-8 -*-
|
||||
|
@ -1000,6 +1014,7 @@ def test_assert_with_unicode(monkeypatch, testdir):
|
|||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(['*AssertionError*'])
|
||||
|
||||
|
||||
def test_raise_unprintable_assertion_error(testdir):
|
||||
testdir.makepyfile(r"""
|
||||
def test_raise_assertion_error():
|
||||
|
@ -1008,6 +1023,7 @@ def test_raise_unprintable_assertion_error(testdir):
|
|||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines([r"> raise AssertionError('\xff')", 'E AssertionError: *'])
|
||||
|
||||
|
||||
def test_raise_assertion_error_raisin_repr(testdir):
|
||||
testdir.makepyfile(u"""
|
||||
class RaisingRepr(object):
|
||||
|
@ -1019,6 +1035,7 @@ def test_raise_assertion_error_raisin_repr(testdir):
|
|||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(['E AssertionError: <unprintable AssertionError object>'])
|
||||
|
||||
|
||||
def test_issue_1944(testdir):
|
||||
testdir.makepyfile("""
|
||||
def f():
|
||||
|
|
|
@ -1,29 +1,30 @@
|
|||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import glob
|
||||
import os
|
||||
import py_compile
|
||||
import stat
|
||||
import sys
|
||||
import zipfile
|
||||
|
||||
import py
|
||||
import pytest
|
||||
|
||||
ast = pytest.importorskip("ast")
|
||||
if sys.platform.startswith("java"):
|
||||
# XXX should be xfail
|
||||
pytest.skip("assert rewrite does currently not work on jython")
|
||||
|
||||
import _pytest._code
|
||||
from _pytest.assertion import util
|
||||
from _pytest.assertion.rewrite import rewrite_asserts, PYTEST_TAG, AssertionRewritingHook
|
||||
from _pytest.main import EXIT_NOTESTSCOLLECTED
|
||||
|
||||
ast = pytest.importorskip("ast")
|
||||
if sys.platform.startswith("java"):
|
||||
# XXX should be xfail
|
||||
pytest.skip("assert rewrite does currently not work on jython")
|
||||
|
||||
|
||||
def setup_module(mod):
|
||||
mod._old_reprcompare = util._reprcompare
|
||||
_pytest._code._reprcompare = None
|
||||
|
||||
|
||||
def teardown_module(mod):
|
||||
util._reprcompare = mod._old_reprcompare
|
||||
del mod._old_reprcompare
|
||||
|
@ -34,6 +35,7 @@ def rewrite(src):
|
|||
rewrite_asserts(tree)
|
||||
return tree
|
||||
|
||||
|
||||
def getmsg(f, extra_ns=None, must_pass=False):
|
||||
"""Rewrite the assertions in f, run it, and get the failure message."""
|
||||
src = '\n'.join(_pytest._code.Code(f).source().lines)
|
||||
|
|
|
@ -8,6 +8,7 @@ import shutil
|
|||
|
||||
pytest_plugins = "pytester",
|
||||
|
||||
|
||||
class TestNewAPI(object):
|
||||
def test_config_cache_makedir(self, testdir):
|
||||
testdir.makeini("[pytest]")
|
||||
|
@ -367,7 +368,6 @@ class TestLastFailed(object):
|
|||
lastfailed = rlf(fail_import=0, fail_run=1)
|
||||
assert list(lastfailed) == ['test_maybe.py::test_hello']
|
||||
|
||||
|
||||
def test_lastfailed_failure_subset(self, testdir, monkeypatch):
|
||||
|
||||
testdir.makepyfile(test_maybe="""
|
||||
|
@ -409,12 +409,10 @@ class TestLastFailed(object):
|
|||
result, lastfailed = rlf(fail_import=1, fail_run=0)
|
||||
assert sorted(list(lastfailed)) == ['test_maybe.py', 'test_maybe2.py']
|
||||
|
||||
|
||||
result, lastfailed = rlf(fail_import=0, fail_run=0,
|
||||
args=('test_maybe2.py',))
|
||||
assert list(lastfailed) == ['test_maybe.py']
|
||||
|
||||
|
||||
# edge case of test selection - even if we remember failures
|
||||
# from other tests we still need to run all tests if no test
|
||||
# matches the failures
|
||||
|
|
|
@ -49,10 +49,10 @@ def oswritebytes(fd, obj):
|
|||
os.write(fd, tobytes(obj))
|
||||
|
||||
|
||||
|
||||
def StdCaptureFD(out=True, err=True, in_=True):
|
||||
return capture.MultiCapture(out, err, in_, Capture=capture.FDCapture)
|
||||
|
||||
|
||||
def StdCapture(out=True, err=True, in_=True):
|
||||
return capture.MultiCapture(out, err, in_, Capture=capture.SysCapture)
|
||||
|
||||
|
@ -705,6 +705,7 @@ def tmpfile(testdir):
|
|||
if not f.closed:
|
||||
f.close()
|
||||
|
||||
|
||||
@needsosdup
|
||||
def test_dupfile(tmpfile):
|
||||
flist = []
|
||||
|
@ -723,12 +724,14 @@ def test_dupfile(tmpfile):
|
|||
assert "01234" in repr(s)
|
||||
tmpfile.close()
|
||||
|
||||
|
||||
def test_dupfile_on_bytesio():
|
||||
io = py.io.BytesIO()
|
||||
f = capture.safe_text_dupfile(io, "wb")
|
||||
f.write("hello")
|
||||
assert io.getvalue() == b"hello"
|
||||
|
||||
|
||||
def test_dupfile_on_textio():
|
||||
io = py.io.TextIO()
|
||||
f = capture.safe_text_dupfile(io, "wb")
|
||||
|
@ -1052,6 +1055,7 @@ def test_fdcapture_tmpfile_remains_the_same(tmpfile, use):
|
|||
capfile2 = cap.err.tmpfile
|
||||
assert capfile2 == capfile
|
||||
|
||||
|
||||
@needsosdup
|
||||
def test_close_and_capture_again(testdir):
|
||||
testdir.makepyfile("""
|
||||
|
@ -1071,7 +1075,6 @@ def test_close_and_capture_again(testdir):
|
|||
""")
|
||||
|
||||
|
||||
|
||||
@pytest.mark.parametrize('method', ['SysCapture', 'FDCapture'])
|
||||
def test_capturing_and_logging_fundamentals(testdir, method):
|
||||
if method == "StdCaptureFD" and not hasattr(os, 'dup'):
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
from __future__ import absolute_import, division, print_function
|
||||
import pytest, py
|
||||
import pytest
|
||||
import py
|
||||
|
||||
from _pytest.main import Session, EXIT_NOTESTSCOLLECTED
|
||||
|
||||
|
||||
class TestCollector(object):
|
||||
def test_collect_versus_item(self):
|
||||
from pytest import Collector, Item
|
||||
|
@ -68,7 +70,6 @@ class TestCollector(object):
|
|||
parent = fn.getparent(pytest.Class)
|
||||
assert parent is cls
|
||||
|
||||
|
||||
def test_getcustomfile_roundtrip(self, testdir):
|
||||
hello = testdir.makefile(".xxx", hello="world")
|
||||
testdir.makepyfile(conftest="""
|
||||
|
@ -102,6 +103,7 @@ class TestCollector(object):
|
|||
'*no tests ran in*',
|
||||
])
|
||||
|
||||
|
||||
class TestCollectFS(object):
|
||||
def test_ignored_certain_directories(self, testdir):
|
||||
tmpdir = testdir.tmpdir
|
||||
|
@ -334,6 +336,7 @@ class TestCustomConftests(object):
|
|||
"*test_x*"
|
||||
])
|
||||
|
||||
|
||||
class TestSession(object):
|
||||
def test_parsearg(self, testdir):
|
||||
p = testdir.makepyfile("def test_func(): pass")
|
||||
|
@ -510,6 +513,7 @@ class TestSession(object):
|
|||
# ensure we are reporting the collection of the single test item (#2464)
|
||||
assert [x.name for x in self.get_reported_items(hookrec)] == ['test_method']
|
||||
|
||||
|
||||
class Test_getinitialnodes(object):
|
||||
def test_global_file(self, testdir, tmpdir):
|
||||
x = tmpdir.ensure("x.py")
|
||||
|
@ -537,6 +541,7 @@ class Test_getinitialnodes(object):
|
|||
for col in col.listchain():
|
||||
assert col.config is config
|
||||
|
||||
|
||||
class Test_genitems(object):
|
||||
def test_check_collect_hashes(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
|
@ -689,6 +694,7 @@ COLLECTION_ERROR_PY_FILES = dict(
|
|||
""",
|
||||
)
|
||||
|
||||
|
||||
def test_exit_on_collection_error(testdir):
|
||||
"""Verify that all collection errors are collected and no tests executed"""
|
||||
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
from __future__ import absolute_import, division, print_function
|
||||
import py, pytest
|
||||
import py
|
||||
import pytest
|
||||
|
||||
import _pytest._code
|
||||
from _pytest.config import getcfg, get_common_ancestor, determine_setup
|
||||
from _pytest.main import EXIT_NOTESTSCOLLECTED
|
||||
|
||||
|
||||
class TestParseIni(object):
|
||||
|
||||
@pytest.mark.parametrize('section, filename',
|
||||
|
@ -85,6 +87,7 @@ class TestParseIni(object):
|
|||
result = testdir.inline_run("--confcutdir=.")
|
||||
assert result.ret == 0
|
||||
|
||||
|
||||
class TestConfigCmdlineParsing(object):
|
||||
def test_parsing_again_fails(self, testdir):
|
||||
config = testdir.parseconfig()
|
||||
|
@ -116,6 +119,7 @@ class TestConfigCmdlineParsing(object):
|
|||
ret = pytest.main("-c " + temp_cfg_file)
|
||||
assert ret == _pytest.main.EXIT_OK
|
||||
|
||||
|
||||
class TestConfigAPI(object):
|
||||
def test_config_trace(self, testdir):
|
||||
config = testdir.parseconfig()
|
||||
|
@ -472,6 +476,7 @@ def test_plugin_preparse_prevents_setuptools_loading(testdir, monkeypatch):
|
|||
plugin = config.pluginmanager.getplugin("mytestplugin")
|
||||
assert plugin is None
|
||||
|
||||
|
||||
def test_cmdline_processargs_simple(testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_cmdline_preparse(args):
|
||||
|
@ -483,6 +488,7 @@ def test_cmdline_processargs_simple(testdir):
|
|||
"*-h*",
|
||||
])
|
||||
|
||||
|
||||
def test_invalid_options_show_extra_information(testdir):
|
||||
"""display extra information when pytest exits due to unrecognized
|
||||
options in the command-line"""
|
||||
|
@ -528,6 +534,7 @@ def test_toolongargs_issue224(testdir):
|
|||
result = testdir.runpytest("-m", "hello" * 500)
|
||||
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||
|
||||
|
||||
def test_config_in_subdirectory_colon_command_line_issue2148(testdir):
|
||||
conftest_source = '''
|
||||
def pytest_addoption(parser):
|
||||
|
@ -643,6 +650,7 @@ class TestWarning(object):
|
|||
*hello*
|
||||
""")
|
||||
|
||||
|
||||
class TestRootdir(object):
|
||||
def test_simple_noini(self, tmpdir):
|
||||
assert get_common_ancestor([tmpdir]) == tmpdir
|
||||
|
@ -724,7 +732,6 @@ class TestOverrideIniArgs(object):
|
|||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines(["custom_option:3.0"])
|
||||
|
||||
|
||||
def test_override_ini_pathlist(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_addoption(parser):
|
||||
|
@ -826,4 +833,3 @@ class TestOverrideIniArgs(object):
|
|||
rootdir, inifile, inicfg = determine_setup(None, ['a/exist'])
|
||||
assert rootdir == tmpdir
|
||||
assert inifile is None
|
||||
|
||||
|
|
|
@ -19,11 +19,13 @@ def basedir(request, tmpdir_factory):
|
|||
tmpdir.ensure("adir/b/__init__.py")
|
||||
return tmpdir
|
||||
|
||||
|
||||
def ConftestWithSetinitial(path):
|
||||
conftest = PytestPluginManager()
|
||||
conftest_setinitial(conftest, [path])
|
||||
return conftest
|
||||
|
||||
|
||||
def conftest_setinitial(conftest, args, confcutdir=None):
|
||||
class Namespace(object):
|
||||
def __init__(self):
|
||||
|
@ -32,6 +34,7 @@ def conftest_setinitial(conftest, args, confcutdir=None):
|
|||
self.noconftest = False
|
||||
conftest._set_initial_conftests(Namespace())
|
||||
|
||||
|
||||
class TestConftestValueAccessGlobal(object):
|
||||
def test_basic_init(self, basedir):
|
||||
conftest = PytestPluginManager()
|
||||
|
@ -70,6 +73,7 @@ class TestConftestValueAccessGlobal(object):
|
|||
assert path.dirpath() == basedir.join("adir", "b")
|
||||
assert path.purebasename.startswith("conftest")
|
||||
|
||||
|
||||
def test_conftest_in_nonpkg_with_init(tmpdir):
|
||||
tmpdir.ensure("adir-1.0/conftest.py").write("a=1 ; Directory = 3")
|
||||
tmpdir.ensure("adir-1.0/b/conftest.py").write("b=2 ; a = 1.5")
|
||||
|
@ -77,6 +81,7 @@ def test_conftest_in_nonpkg_with_init(tmpdir):
|
|||
tmpdir.ensure("adir-1.0/__init__.py")
|
||||
ConftestWithSetinitial(tmpdir.join("adir-1.0", "b"))
|
||||
|
||||
|
||||
def test_doubledash_considered(testdir):
|
||||
conf = testdir.mkdir("--option")
|
||||
conf.join("conftest.py").ensure()
|
||||
|
@ -85,6 +90,7 @@ def test_doubledash_considered(testdir):
|
|||
l = conftest._getconftestmodules(conf)
|
||||
assert len(l) == 1
|
||||
|
||||
|
||||
def test_issue151_load_all_conftests(testdir):
|
||||
names = "code proj src".split()
|
||||
for name in names:
|
||||
|
@ -96,6 +102,7 @@ def test_issue151_load_all_conftests(testdir):
|
|||
d = list(conftest._conftestpath2mod.values())
|
||||
assert len(d) == len(names)
|
||||
|
||||
|
||||
def test_conftest_global_import(testdir):
|
||||
testdir.makeconftest("x=3")
|
||||
p = testdir.makepyfile("""
|
||||
|
@ -117,6 +124,7 @@ def test_conftest_global_import(testdir):
|
|||
res = testdir.runpython(p)
|
||||
assert res.ret == 0
|
||||
|
||||
|
||||
def test_conftestcutdir(testdir):
|
||||
conf = testdir.makeconftest("")
|
||||
p = testdir.mkdir("x")
|
||||
|
@ -136,6 +144,7 @@ def test_conftestcutdir(testdir):
|
|||
assert len(l) == 1
|
||||
assert l[0].__file__.startswith(str(conf))
|
||||
|
||||
|
||||
def test_conftestcutdir_inplace_considered(testdir):
|
||||
conf = testdir.makeconftest("")
|
||||
conftest = PytestPluginManager()
|
||||
|
@ -144,6 +153,7 @@ def test_conftestcutdir_inplace_considered(testdir):
|
|||
assert len(l) == 1
|
||||
assert l[0].__file__.startswith(str(conf))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("name", 'test tests whatever .dotdir'.split())
|
||||
def test_setinitial_conftest_subdirs(testdir, name):
|
||||
sub = testdir.mkdir(name)
|
||||
|
@ -157,6 +167,7 @@ def test_setinitial_conftest_subdirs(testdir, name):
|
|||
assert subconftest not in conftest._conftestpath2mod
|
||||
assert len(conftest._conftestpath2mod) == 0
|
||||
|
||||
|
||||
def test_conftest_confcutdir(testdir):
|
||||
testdir.makeconftest("assert 0")
|
||||
x = testdir.mkdir("x")
|
||||
|
@ -168,6 +179,7 @@ def test_conftest_confcutdir(testdir):
|
|||
result.stdout.fnmatch_lines(["*--xyz*"])
|
||||
assert 'warning: could not load initial' not in result.stdout.str()
|
||||
|
||||
|
||||
def test_no_conftest(testdir):
|
||||
testdir.makeconftest("assert 0")
|
||||
result = testdir.runpytest("--noconftest")
|
||||
|
@ -176,6 +188,7 @@ def test_no_conftest(testdir):
|
|||
result = testdir.runpytest()
|
||||
assert result.ret == EXIT_USAGEERROR
|
||||
|
||||
|
||||
def test_conftest_existing_resultlog(testdir):
|
||||
x = testdir.mkdir("tests")
|
||||
x.join("conftest.py").write(_pytest._code.Source("""
|
||||
|
@ -186,6 +199,7 @@ def test_conftest_existing_resultlog(testdir):
|
|||
result = testdir.runpytest("-h", "--resultlog", "result.log")
|
||||
result.stdout.fnmatch_lines(["*--xyz*"])
|
||||
|
||||
|
||||
def test_conftest_existing_junitxml(testdir):
|
||||
x = testdir.mkdir("tests")
|
||||
x.join("conftest.py").write(_pytest._code.Source("""
|
||||
|
@ -196,6 +210,7 @@ def test_conftest_existing_junitxml(testdir):
|
|||
result = testdir.runpytest("-h", "--junitxml", "junit.xml")
|
||||
result.stdout.fnmatch_lines(["*--xyz*"])
|
||||
|
||||
|
||||
def test_conftest_import_order(testdir, monkeypatch):
|
||||
ct1 = testdir.makeconftest("")
|
||||
sub = testdir.mkdir("sub")
|
||||
|
|
|
@ -294,7 +294,6 @@ class TestDoctests(object):
|
|||
"*:5: DocTestFailure"
|
||||
])
|
||||
|
||||
|
||||
def test_txtfile_failing(self, testdir):
|
||||
p = testdir.maketxtfile("""
|
||||
>>> i = 0
|
||||
|
@ -932,4 +931,3 @@ class TestDoctestReportingOption(object):
|
|||
result.stderr.fnmatch_lines([
|
||||
"*error: argument --doctest-report: invalid choice: 'obviously_invalid_format' (choose from*"
|
||||
])
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ from __future__ import absolute_import, division, print_function
|
|||
from _pytest.main import EXIT_NOTESTSCOLLECTED
|
||||
import pytest
|
||||
|
||||
|
||||
def test_version(testdir, pytestconfig):
|
||||
result = testdir.runpytest("--version")
|
||||
assert result.ret == 0
|
||||
|
@ -15,6 +16,7 @@ def test_version(testdir, pytestconfig):
|
|||
"*at*",
|
||||
])
|
||||
|
||||
|
||||
def test_help(testdir):
|
||||
result = testdir.runpytest("--help")
|
||||
assert result.ret == 0
|
||||
|
@ -26,6 +28,7 @@ def test_help(testdir):
|
|||
*to see*fixtures*pytest --fixtures*
|
||||
""")
|
||||
|
||||
|
||||
def test_hookvalidation_unknown(testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_hello(xyz):
|
||||
|
@ -37,6 +40,7 @@ def test_hookvalidation_unknown(testdir):
|
|||
'*unknown hook*pytest_hello*'
|
||||
])
|
||||
|
||||
|
||||
def test_hookvalidation_optional(testdir):
|
||||
testdir.makeconftest("""
|
||||
import pytest
|
||||
|
@ -47,6 +51,7 @@ def test_hookvalidation_optional(testdir):
|
|||
result = testdir.runpytest()
|
||||
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||
|
||||
|
||||
def test_traceconfig(testdir):
|
||||
result = testdir.runpytest("--traceconfig")
|
||||
result.stdout.fnmatch_lines([
|
||||
|
@ -54,12 +59,14 @@ def test_traceconfig(testdir):
|
|||
"*active plugins*",
|
||||
])
|
||||
|
||||
|
||||
def test_debug(testdir, monkeypatch):
|
||||
result = testdir.runpytest_subprocess("--debug")
|
||||
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||
p = testdir.tmpdir.join("pytestdebug.log")
|
||||
assert "pytest_sessionstart" in p.read()
|
||||
|
||||
|
||||
def test_PYTEST_DEBUG(testdir, monkeypatch):
|
||||
monkeypatch.setenv("PYTEST_DEBUG", "1")
|
||||
result = testdir.runpytest_subprocess()
|
||||
|
|
|
@ -600,6 +600,7 @@ class TestPython(object):
|
|||
assert "hello-stdout call" in systemout.toxml()
|
||||
assert "hello-stdout teardown" in systemout.toxml()
|
||||
|
||||
|
||||
def test_mangle_test_address():
|
||||
from _pytest.junitxml import mangle_test_address
|
||||
address = '::'.join(
|
||||
|
@ -760,11 +761,13 @@ def test_logxml_makedir(testdir):
|
|||
assert result.ret == 0
|
||||
assert testdir.tmpdir.join("path/to/results.xml").check()
|
||||
|
||||
|
||||
def test_logxml_check_isdir(testdir):
|
||||
"""Give an error if --junit-xml is a directory (#2089)"""
|
||||
result = testdir.runpytest("--junit-xml=.")
|
||||
result.stderr.fnmatch_lines(["*--junitxml must be a filename*"])
|
||||
|
||||
|
||||
def test_escaped_parametrized_names_xml(testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
@ -1057,4 +1060,3 @@ def test_set_suite_name(testdir, suite_name):
|
|||
assert result.ret == 0
|
||||
node = dom.find_first_by_tag("testsuite")
|
||||
node.assert_attr(name=expected)
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ import sys
|
|||
import pytest
|
||||
from _pytest.mark import MarkGenerator as Mark, ParameterSet, transfer_markers
|
||||
|
||||
|
||||
class TestMark(object):
|
||||
def test_markinfo_repr(self):
|
||||
from _pytest.mark import MarkInfo, Mark
|
||||
|
@ -140,6 +141,7 @@ def test_ini_markers(testdir):
|
|||
rec = testdir.inline_run()
|
||||
rec.assertoutcome(passed=1)
|
||||
|
||||
|
||||
def test_markers_option(testdir):
|
||||
testdir.makeini("""
|
||||
[pytest]
|
||||
|
@ -153,6 +155,7 @@ def test_markers_option(testdir):
|
|||
"*a1some*another marker",
|
||||
])
|
||||
|
||||
|
||||
def test_markers_option_with_plugin_in_current_dir(testdir):
|
||||
testdir.makeconftest('pytest_plugins = "flip_flop"')
|
||||
testdir.makepyfile(flip_flop="""\
|
||||
|
@ -186,6 +189,7 @@ def test_mark_on_pseudo_function(testdir):
|
|||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
|
||||
def test_strict_prohibits_unregistered_markers(testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
@ -199,6 +203,7 @@ def test_strict_prohibits_unregistered_markers(testdir):
|
|||
"*unregisteredmark*not*registered*",
|
||||
])
|
||||
|
||||
|
||||
@pytest.mark.parametrize("spec", [
|
||||
("xyz", ("test_one",)),
|
||||
("xyz and xyz2", ()),
|
||||
|
@ -222,6 +227,7 @@ def test_mark_option(spec, testdir):
|
|||
assert len(passed) == len(passed_result)
|
||||
assert list(passed) == list(passed_result)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("spec", [
|
||||
("interface", ("test_interface",)),
|
||||
("not interface", ("test_nointer",)),
|
||||
|
@ -247,6 +253,7 @@ def test_mark_option_custom(spec, testdir):
|
|||
assert len(passed) == len(passed_result)
|
||||
assert list(passed) == list(passed_result)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("spec", [
|
||||
("interface", ("test_interface",)),
|
||||
("not interface", ("test_nointer", "test_pass")),
|
||||
|
@ -455,7 +462,6 @@ class TestFunctional(object):
|
|||
items, rec = testdir.inline_genitems(p)
|
||||
self.assert_markers(items, test_foo=('a', 'b'), test_bar=('a',))
|
||||
|
||||
|
||||
@pytest.mark.issue568
|
||||
@pytest.mark.xfail(reason="markers smear on methods of base classes")
|
||||
def test_mark_should_not_pass_to_siebling_class(self, testdir):
|
||||
|
@ -480,7 +486,6 @@ class TestFunctional(object):
|
|||
assert not hasattr(base_item.obj, 'b')
|
||||
assert not hasattr(sub_item_other.obj, 'b')
|
||||
|
||||
|
||||
def test_mark_decorator_baseclasses_merged(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import pytest
|
||||
|
|
|
@ -319,6 +319,7 @@ def test_issue156_undo_staticmethod(Sample):
|
|||
monkeypatch.undo()
|
||||
assert Sample.hello()
|
||||
|
||||
|
||||
def test_issue1338_name_resolving():
|
||||
pytest.importorskip('requests')
|
||||
monkeypatch = MonkeyPatch()
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
from __future__ import absolute_import, division, print_function
|
||||
import pytest
|
||||
|
||||
|
||||
def setup_module(mod):
|
||||
mod.nose = pytest.importorskip("nose")
|
||||
|
||||
|
||||
def test_nose_setup(testdir):
|
||||
p = testdir.makepyfile("""
|
||||
l = []
|
||||
|
@ -44,6 +46,7 @@ def test_setup_func_not_callable():
|
|||
|
||||
call_optional(A(), "f")
|
||||
|
||||
|
||||
def test_nose_setup_func(testdir):
|
||||
p = testdir.makepyfile("""
|
||||
from nose.tools import with_setup
|
||||
|
@ -112,6 +115,7 @@ def test_nose_setup_func_failure_2(testdir):
|
|||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
|
||||
def test_nose_setup_partial(testdir):
|
||||
pytest.importorskip("functools")
|
||||
p = testdir.makepyfile("""
|
||||
|
@ -266,6 +270,7 @@ def test_nose_style_setup_teardown(testdir):
|
|||
"*2 passed*",
|
||||
])
|
||||
|
||||
|
||||
def test_nose_setup_ordering(testdir):
|
||||
testdir.makepyfile("""
|
||||
def setup_module(mod):
|
||||
|
@ -305,6 +310,7 @@ def test_apiwrapper_problem_issue260(testdir):
|
|||
result = testdir.runpytest()
|
||||
result.assert_outcomes(passed=1)
|
||||
|
||||
|
||||
def test_setup_teardown_linking_issue265(testdir):
|
||||
# we accidentally didnt integrate nose setupstate with normal setupstate
|
||||
# this test ensures that won't happen again
|
||||
|
@ -352,6 +358,7 @@ def test_SkipTest_in_test(testdir):
|
|||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(skipped=1)
|
||||
|
||||
|
||||
def test_istest_function_decorator(testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import nose.tools
|
||||
|
@ -362,6 +369,7 @@ def test_istest_function_decorator(testdir):
|
|||
result = testdir.runpytest(p)
|
||||
result.assert_outcomes(passed=1)
|
||||
|
||||
|
||||
def test_nottest_function_decorator(testdir):
|
||||
testdir.makepyfile("""
|
||||
import nose.tools
|
||||
|
@ -374,6 +382,7 @@ def test_nottest_function_decorator(testdir):
|
|||
calls = reprec.getreports("pytest_runtest_logreport")
|
||||
assert not calls
|
||||
|
||||
|
||||
def test_istest_class_decorator(testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import nose.tools
|
||||
|
@ -385,6 +394,7 @@ def test_istest_class_decorator(testdir):
|
|||
result = testdir.runpytest(p)
|
||||
result.assert_outcomes(passed=1)
|
||||
|
||||
|
||||
def test_nottest_class_decorator(testdir):
|
||||
testdir.makepyfile("""
|
||||
import nose.tools
|
||||
|
|
|
@ -1,13 +1,16 @@
|
|||
from __future__ import absolute_import, division, print_function
|
||||
import sys
|
||||
import os
|
||||
import py, pytest
|
||||
import py
|
||||
import pytest
|
||||
from _pytest import config as parseopt
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def parser():
|
||||
return parseopt.Parser()
|
||||
|
||||
|
||||
class TestParser(object):
|
||||
def test_no_help_by_default(self, capsys):
|
||||
parser = parseopt.Parser(usage="xyz")
|
||||
|
@ -161,12 +164,12 @@ class TestParser(object):
|
|||
assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2']
|
||||
args = parser.parse(['-R', '-S', '4', '2', '-R'])
|
||||
assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2']
|
||||
assert args.R == True
|
||||
assert args.S == False
|
||||
assert args.R is True
|
||||
assert args.S is False
|
||||
args = parser.parse(['-R', '4', '-S', '2'])
|
||||
assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2']
|
||||
assert args.R == True
|
||||
assert args.S == False
|
||||
assert args.R is True
|
||||
assert args.S is False
|
||||
|
||||
def test_parse_defaultgetter(self):
|
||||
def defaultget(option):
|
||||
|
|
|
@ -3,6 +3,7 @@ from __future__ import absolute_import, division, print_function
|
|||
import sys
|
||||
import pytest
|
||||
|
||||
|
||||
class TestPasteCapture(object):
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -114,5 +115,3 @@ class TestPaste(object):
|
|||
assert 'lexer=%s' % lexer in data.decode()
|
||||
assert 'code=full-paste-contents' in data.decode()
|
||||
assert 'expiry=1week' in data.decode()
|
||||
|
||||
|
||||
|
|
|
@ -33,7 +33,6 @@ def custom_pdb_calls():
|
|||
return called
|
||||
|
||||
|
||||
|
||||
class TestPDB(object):
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -377,7 +376,6 @@ class TestPDB(object):
|
|||
])
|
||||
assert custom_pdb_calls == ["init", "reset", "interaction"]
|
||||
|
||||
|
||||
def test_pdb_custom_cls_without_pdb(self, testdir, custom_pdb_calls):
|
||||
p1 = testdir.makepyfile("""xxx """)
|
||||
result = testdir.runpytest_inprocess(
|
||||
|
|
|
@ -12,6 +12,7 @@ from _pytest.main import EXIT_NOTESTSCOLLECTED, Session
|
|||
def pytestpm():
|
||||
return PytestPluginManager()
|
||||
|
||||
|
||||
class TestPytestPluginInteractions(object):
|
||||
def test_addhooks_conftestplugin(self, testdir):
|
||||
testdir.makepyfile(newhooks="""
|
||||
|
@ -197,6 +198,7 @@ def test_namespace_has_default_and_env_plugins(testdir):
|
|||
result = testdir.runpython(p)
|
||||
assert result.ret == 0
|
||||
|
||||
|
||||
def test_default_markers(testdir):
|
||||
result = testdir.runpytest("--markers")
|
||||
result.stdout.fnmatch_lines([
|
||||
|
|
|
@ -64,6 +64,7 @@ def test_parseconfig(testdir):
|
|||
assert config2 != config1
|
||||
assert config1 != pytest.config
|
||||
|
||||
|
||||
def test_testdir_runs_with_plugin(testdir):
|
||||
testdir.makepyfile("""
|
||||
pytest_plugins = "pytester"
|
||||
|
@ -78,6 +79,7 @@ def make_holder():
|
|||
class apiclass(object):
|
||||
def pytest_xyz(self, arg):
|
||||
"x"
|
||||
|
||||
def pytest_xyz_noarg(self):
|
||||
"x"
|
||||
|
||||
|
@ -117,6 +119,7 @@ def test_makepyfile_unicode(testdir):
|
|||
unichr = chr
|
||||
testdir.makepyfile(unichr(0xfffd))
|
||||
|
||||
|
||||
def test_inline_run_clean_modules(testdir):
|
||||
test_mod = testdir.makepyfile("def test_foo(): assert True")
|
||||
result = testdir.inline_run(str(test_mod))
|
||||
|
@ -126,6 +129,7 @@ def test_inline_run_clean_modules(testdir):
|
|||
result2 = testdir.inline_run(str(test_mod))
|
||||
assert result2.ret == EXIT_TESTSFAILED
|
||||
|
||||
|
||||
def test_assert_outcomes_after_pytest_erro(testdir):
|
||||
testdir.makepyfile("def test_foo(): assert True")
|
||||
|
||||
|
|
|
@ -283,9 +283,11 @@ class TestWarns(object):
|
|||
assert str(record[0].message) == "user"
|
||||
assert str(record[1].message) == "runtime"
|
||||
|
||||
class MyUserWarning(UserWarning): pass
|
||||
class MyUserWarning(UserWarning):
|
||||
pass
|
||||
|
||||
class MyRuntimeWarning(RuntimeWarning): pass
|
||||
class MyRuntimeWarning(RuntimeWarning):
|
||||
pass
|
||||
|
||||
with pytest.warns((UserWarning, RuntimeWarning)) as record:
|
||||
warnings.warn("user", MyUserWarning)
|
||||
|
@ -295,7 +297,6 @@ class TestWarns(object):
|
|||
assert str(record[0].message) == "user"
|
||||
assert str(record[1].message) == "runtime"
|
||||
|
||||
|
||||
def test_double_test(self, testdir):
|
||||
"""If a test is run again, the warning should still be raised"""
|
||||
testdir.makepyfile('''
|
||||
|
|
|
@ -32,6 +32,7 @@ def test_generic_path(testdir):
|
|||
res = generic_path(item)
|
||||
assert res == 'test/a:B().c[1]'
|
||||
|
||||
|
||||
def test_write_log_entry():
|
||||
reslog = ResultLog(None, None)
|
||||
reslog.logfile = py.io.TextIO()
|
||||
|
@ -176,6 +177,7 @@ def test_generic(testdir, LineMatcher):
|
|||
"x *:test_xfail_norun",
|
||||
])
|
||||
|
||||
|
||||
def test_makedir_for_resultlog(testdir, LineMatcher):
|
||||
"""--resultlog should automatically create directories for the log file"""
|
||||
testdir.plugins.append("resultlog")
|
||||
|
@ -224,5 +226,3 @@ def test_failure_issue380(testdir):
|
|||
""")
|
||||
result = testdir.runpytest("--resultlog=log")
|
||||
assert result.ret == 2
|
||||
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ import pytest
|
|||
import sys
|
||||
from _pytest import runner, main
|
||||
|
||||
|
||||
class TestSetupState(object):
|
||||
def test_setup(self, testdir):
|
||||
ss = runner.SetupState()
|
||||
|
@ -39,11 +40,14 @@ class TestSetupState(object):
|
|||
def test_teardown_multiple_one_fails(self, testdir):
|
||||
r = []
|
||||
|
||||
def fin1(): r.append('fin1')
|
||||
def fin1():
|
||||
r.append('fin1')
|
||||
|
||||
def fin2(): raise Exception('oops')
|
||||
def fin2():
|
||||
raise Exception('oops')
|
||||
|
||||
def fin3(): r.append('fin3')
|
||||
def fin3():
|
||||
r.append('fin3')
|
||||
|
||||
item = testdir.getitem("def test_func(): pass")
|
||||
ss = runner.SetupState()
|
||||
|
@ -58,9 +62,11 @@ class TestSetupState(object):
|
|||
def test_teardown_multiple_fail(self, testdir):
|
||||
# Ensure the first exception is the one which is re-raised.
|
||||
# Ideally both would be reported however.
|
||||
def fin1(): raise Exception('oops1')
|
||||
def fin1():
|
||||
raise Exception('oops1')
|
||||
|
||||
def fin2(): raise Exception('oops2')
|
||||
def fin2():
|
||||
raise Exception('oops2')
|
||||
|
||||
item = testdir.getitem("def test_func(): pass")
|
||||
ss = runner.SetupState()
|
||||
|
@ -316,6 +322,7 @@ class BaseFunctionalTests(object):
|
|||
else:
|
||||
pytest.fail("did not raise")
|
||||
|
||||
|
||||
class TestExecutionNonForked(BaseFunctionalTests):
|
||||
def getrunner(self):
|
||||
def f(item):
|
||||
|
@ -333,6 +340,7 @@ class TestExecutionNonForked(BaseFunctionalTests):
|
|||
else:
|
||||
pytest.fail("did not raise")
|
||||
|
||||
|
||||
class TestExecutionForked(BaseFunctionalTests):
|
||||
pytestmark = pytest.mark.skipif("not hasattr(os, 'fork')")
|
||||
|
||||
|
@ -351,6 +359,7 @@ class TestExecutionForked(BaseFunctionalTests):
|
|||
assert rep.failed
|
||||
assert rep.when == "???"
|
||||
|
||||
|
||||
class TestSessionReports(object):
|
||||
def test_collect_result(self, testdir):
|
||||
col = testdir.getmodulecol("""
|
||||
|
@ -380,6 +389,7 @@ reporttypes = [
|
|||
runner.CollectReport,
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize('reporttype', reporttypes, ids=[x.__name__ for x in reporttypes])
|
||||
def test_report_extra_parameters(reporttype):
|
||||
if hasattr(py.std.inspect, 'signature'):
|
||||
|
@ -390,6 +400,7 @@ def test_report_extra_parameters(reporttype):
|
|||
report = reporttype(newthing=1, **basekw)
|
||||
assert report.newthing == 1
|
||||
|
||||
|
||||
def test_callinfo():
|
||||
ci = runner.CallInfo(lambda: 0, '123')
|
||||
assert ci.when == "123"
|
||||
|
@ -403,6 +414,8 @@ def test_callinfo():
|
|||
|
||||
# design question: do we want general hooks in python files?
|
||||
# then something like the following functional tests makes sense
|
||||
|
||||
|
||||
@pytest.mark.xfail
|
||||
def test_runtest_in_module_ordering(testdir):
|
||||
p1 = testdir.makepyfile("""
|
||||
|
@ -439,6 +452,7 @@ def test_outcomeexception_exceptionattributes():
|
|||
outcome = runner.OutcomeException('test')
|
||||
assert outcome.args[0] == outcome.msg
|
||||
|
||||
|
||||
def test_pytest_exit():
|
||||
try:
|
||||
pytest.exit("hello")
|
||||
|
@ -446,6 +460,7 @@ def test_pytest_exit():
|
|||
excinfo = _pytest._code.ExceptionInfo()
|
||||
assert excinfo.errisinstance(KeyboardInterrupt)
|
||||
|
||||
|
||||
def test_pytest_fail():
|
||||
try:
|
||||
pytest.fail("hello")
|
||||
|
@ -454,6 +469,7 @@ def test_pytest_fail():
|
|||
s = excinfo.exconly(tryshort=True)
|
||||
assert s.startswith("Failed")
|
||||
|
||||
|
||||
def test_pytest_exit_msg(testdir):
|
||||
testdir.makeconftest("""
|
||||
import pytest
|
||||
|
@ -466,6 +482,7 @@ def test_pytest_exit_msg(testdir):
|
|||
"Exit: oh noes",
|
||||
])
|
||||
|
||||
|
||||
def test_pytest_fail_notrace(testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
@ -531,6 +548,7 @@ def test_exception_printing_skip():
|
|||
s = excinfo.exconly(tryshort=True)
|
||||
assert s.startswith("Skipped")
|
||||
|
||||
|
||||
def test_importorskip(monkeypatch):
|
||||
importorskip = pytest.importorskip
|
||||
|
||||
|
@ -561,10 +579,12 @@ def test_importorskip(monkeypatch):
|
|||
print(_pytest._code.ExceptionInfo())
|
||||
pytest.fail("spurious skip")
|
||||
|
||||
|
||||
def test_importorskip_imports_last_module_part():
|
||||
ospath = pytest.importorskip("os.path")
|
||||
assert os.path == ospath
|
||||
|
||||
|
||||
def test_importorskip_dev_module(monkeypatch):
|
||||
try:
|
||||
mod = py.std.types.ModuleType("mockmodule")
|
||||
|
@ -754,5 +774,3 @@ class TestReportContents(object):
|
|||
rep = reports[1]
|
||||
assert rep.capstdout == ''
|
||||
assert rep.capstderr == ''
|
||||
|
||||
|
||||
|
|
|
@ -36,6 +36,7 @@ def test_module_and_function_setup(testdir):
|
|||
rep = reprec.matchreport("test_module")
|
||||
assert rep.passed
|
||||
|
||||
|
||||
def test_module_setup_failure_no_teardown(testdir):
|
||||
reprec = testdir.inline_runsource("""
|
||||
l = []
|
||||
|
@ -53,6 +54,7 @@ def test_module_setup_failure_no_teardown(testdir):
|
|||
calls = reprec.getcalls("pytest_runtest_setup")
|
||||
assert calls[0].item.module.l == [1]
|
||||
|
||||
|
||||
def test_setup_function_failure_no_teardown(testdir):
|
||||
reprec = testdir.inline_runsource("""
|
||||
modlevel = []
|
||||
|
@ -69,6 +71,7 @@ def test_setup_function_failure_no_teardown(testdir):
|
|||
calls = reprec.getcalls("pytest_runtest_setup")
|
||||
assert calls[0].item.module.modlevel == [1]
|
||||
|
||||
|
||||
def test_class_setup(testdir):
|
||||
reprec = testdir.inline_runsource("""
|
||||
class TestSimpleClassSetup(object):
|
||||
|
@ -92,6 +95,7 @@ def test_class_setup(testdir):
|
|||
""")
|
||||
reprec.assertoutcome(passed=1 + 2 + 1)
|
||||
|
||||
|
||||
def test_class_setup_failure_no_teardown(testdir):
|
||||
reprec = testdir.inline_runsource("""
|
||||
class TestSimpleClassSetup(object):
|
||||
|
@ -110,6 +114,7 @@ def test_class_setup_failure_no_teardown(testdir):
|
|||
""")
|
||||
reprec.assertoutcome(failed=1, passed=1)
|
||||
|
||||
|
||||
def test_method_setup(testdir):
|
||||
reprec = testdir.inline_runsource("""
|
||||
class TestSetupMethod(object):
|
||||
|
@ -126,6 +131,7 @@ def test_method_setup(testdir):
|
|||
""")
|
||||
reprec.assertoutcome(passed=2)
|
||||
|
||||
|
||||
def test_method_setup_failure_no_teardown(testdir):
|
||||
reprec = testdir.inline_runsource("""
|
||||
class TestMethodSetup(object):
|
||||
|
@ -145,6 +151,7 @@ def test_method_setup_failure_no_teardown(testdir):
|
|||
""")
|
||||
reprec.assertoutcome(failed=1, passed=1)
|
||||
|
||||
|
||||
def test_method_generator_setup(testdir):
|
||||
reprec = testdir.inline_runsource("""
|
||||
class TestSetupTeardownOnInstance(object):
|
||||
|
@ -167,6 +174,7 @@ def test_method_generator_setup(testdir):
|
|||
""")
|
||||
reprec.assertoutcome(passed=1, failed=1)
|
||||
|
||||
|
||||
def test_func_generator_setup(testdir):
|
||||
reprec = testdir.inline_runsource("""
|
||||
import sys
|
||||
|
@ -195,6 +203,7 @@ def test_func_generator_setup(testdir):
|
|||
rep = reprec.matchreport("test_one", names="pytest_runtest_logreport")
|
||||
assert rep.passed
|
||||
|
||||
|
||||
def test_method_setup_uses_fresh_instances(testdir):
|
||||
reprec = testdir.inline_runsource("""
|
||||
class TestSelfState1(object):
|
||||
|
@ -207,6 +216,7 @@ def test_method_setup_uses_fresh_instances(testdir):
|
|||
""")
|
||||
reprec.assertoutcome(passed=2, failed=0)
|
||||
|
||||
|
||||
def test_setup_that_skips_calledagain(testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import pytest
|
||||
|
@ -220,6 +230,7 @@ def test_setup_that_skips_calledagain(testdir):
|
|||
reprec = testdir.inline_run(p)
|
||||
reprec.assertoutcome(skipped=2)
|
||||
|
||||
|
||||
def test_setup_fails_again_on_all_tests(testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import pytest
|
||||
|
@ -233,6 +244,7 @@ def test_setup_fails_again_on_all_tests(testdir):
|
|||
reprec = testdir.inline_run(p)
|
||||
reprec.assertoutcome(failed=2)
|
||||
|
||||
|
||||
def test_setup_funcarg_setup_when_outer_scope_fails(testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import pytest
|
||||
|
|
|
@ -3,6 +3,7 @@ import pytest
|
|||
|
||||
from _pytest.main import EXIT_NOTESTSCOLLECTED
|
||||
|
||||
|
||||
class SessionTests(object):
|
||||
def test_basic_testitem_events(self, testdir):
|
||||
tfile = testdir.makepyfile("""
|
||||
|
@ -21,7 +22,10 @@ class SessionTests(object):
|
|||
assert len(skipped) == 0
|
||||
assert len(passed) == 1
|
||||
assert len(failed) == 3
|
||||
end = lambda x: x.nodeid.split("::")[-1]
|
||||
|
||||
def end(x):
|
||||
return x.nodeid.split("::")[-1]
|
||||
|
||||
assert end(failed[0]) == "test_one_one"
|
||||
assert end(failed[1]) == "test_other"
|
||||
itemstarted = reprec.getcalls("pytest_itemcollected")
|
||||
|
@ -135,6 +139,7 @@ class SessionTests(object):
|
|||
assert len(reports) == 1
|
||||
assert reports[0].skipped
|
||||
|
||||
|
||||
class TestNewSession(SessionTests):
|
||||
|
||||
def test_order_of_execution(self, testdir):
|
||||
|
@ -215,12 +220,14 @@ def test_plugin_specify(testdir):
|
|||
# "config.do_configure(config)"
|
||||
# )
|
||||
|
||||
|
||||
def test_plugin_already_exists(testdir):
|
||||
config = testdir.parseconfig("-p", "terminal")
|
||||
assert config.option.plugins == ['terminal']
|
||||
config._do_configure()
|
||||
config._ensure_unconfigure()
|
||||
|
||||
|
||||
def test_exclude(testdir):
|
||||
hellodir = testdir.mkdir("hello")
|
||||
hellodir.join("test_hello.py").write("x y syntaxerror")
|
||||
|
@ -231,6 +238,7 @@ def test_exclude(testdir):
|
|||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
|
||||
|
||||
def test_sessionfinish_with_start(testdir):
|
||||
testdir.makeconftest("""
|
||||
import os
|
||||
|
|
|
@ -350,7 +350,6 @@ class TestXFail(object):
|
|||
"*1 xfailed*",
|
||||
])
|
||||
|
||||
|
||||
@pytest.mark.parametrize('expected, actual, matchline',
|
||||
[('TypeError', 'TypeError', "*1 xfailed*"),
|
||||
('(AttributeError, TypeError)', 'TypeError', "*1 xfailed*"),
|
||||
|
@ -582,6 +581,7 @@ class TestSkip(object):
|
|||
"*1 skipped*",
|
||||
])
|
||||
|
||||
|
||||
class TestSkipif(object):
|
||||
def test_skipif_conditional(self, testdir):
|
||||
item = testdir.getitem("""
|
||||
|
@ -687,6 +687,7 @@ def test_skip_reasons_folding():
|
|||
assert lineno == lineno
|
||||
assert reason == message
|
||||
|
||||
|
||||
def test_skipped_reasons_functional(testdir):
|
||||
testdir.makepyfile(
|
||||
test_one="""
|
||||
|
@ -711,6 +712,7 @@ def test_skipped_reasons_functional(testdir):
|
|||
])
|
||||
assert result.ret == 0
|
||||
|
||||
|
||||
def test_reportchars(testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
@ -733,6 +735,7 @@ def test_reportchars(testdir):
|
|||
"SKIP*four*",
|
||||
])
|
||||
|
||||
|
||||
def test_reportchars_error(testdir):
|
||||
testdir.makepyfile(
|
||||
conftest="""
|
||||
|
@ -748,6 +751,7 @@ def test_reportchars_error(testdir):
|
|||
'ERROR*test_foo*',
|
||||
])
|
||||
|
||||
|
||||
def test_reportchars_all(testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
@ -770,6 +774,7 @@ def test_reportchars_all(testdir):
|
|||
"XPASS*test_3*",
|
||||
])
|
||||
|
||||
|
||||
def test_reportchars_all_error(testdir):
|
||||
testdir.makepyfile(
|
||||
conftest="""
|
||||
|
@ -785,6 +790,7 @@ def test_reportchars_all_error(testdir):
|
|||
'ERROR*test_foo*',
|
||||
])
|
||||
|
||||
|
||||
@pytest.mark.xfail("hasattr(sys, 'pypy_version_info')")
|
||||
def test_errors_in_xfail_skip_expressions(testdir):
|
||||
testdir.makepyfile("""
|
||||
|
@ -816,6 +822,7 @@ def test_errors_in_xfail_skip_expressions(testdir):
|
|||
"*1 pass*2 error*",
|
||||
])
|
||||
|
||||
|
||||
def test_xfail_skipif_with_globals(testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
@ -834,6 +841,7 @@ def test_xfail_skipif_with_globals(testdir):
|
|||
"*x == 3*",
|
||||
])
|
||||
|
||||
|
||||
def test_direct_gives_error(testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
@ -854,6 +862,7 @@ def test_default_markers(testdir):
|
|||
"*xfail(*condition, reason=None, run=True, raises=None, strict=False)*expected failure*",
|
||||
])
|
||||
|
||||
|
||||
def test_xfail_test_setup_exception(testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_runtest_setup():
|
||||
|
@ -870,6 +879,7 @@ def test_xfail_test_setup_exception(testdir):
|
|||
assert 'xfailed' in result.stdout.str()
|
||||
assert 'xpassed' not in result.stdout.str()
|
||||
|
||||
|
||||
def test_imperativeskip_on_xfail_test(testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
@ -893,6 +903,7 @@ def test_imperativeskip_on_xfail_test(testdir):
|
|||
*2 skipped*
|
||||
""")
|
||||
|
||||
|
||||
class TestBooleanCondition(object):
|
||||
def test_skipif(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
|
|
|
@ -31,6 +31,7 @@ class Option(object):
|
|||
l.append('--fulltrace')
|
||||
return l
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
if "option" in metafunc.fixturenames:
|
||||
metafunc.addcall(id="default",
|
||||
|
@ -320,6 +321,7 @@ def test_repr_python_version(monkeypatch):
|
|||
finally:
|
||||
monkeypatch.undo() # do this early as pytest can get confused
|
||||
|
||||
|
||||
class TestFixtureReporting(object):
|
||||
def test_setup_fixture_error(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
|
@ -405,6 +407,7 @@ class TestFixtureReporting(object):
|
|||
"*1 failed*",
|
||||
])
|
||||
|
||||
|
||||
class TestTerminalFunctional(object):
|
||||
def test_deselected(self, testdir):
|
||||
testpath = testdir.makepyfile("""
|
||||
|
@ -552,11 +555,13 @@ def test_fail_extra_reporting(testdir):
|
|||
"FAIL*test_fail_extra_reporting*",
|
||||
])
|
||||
|
||||
|
||||
def test_fail_reporting_on_pass(testdir):
|
||||
testdir.makepyfile("def test_this(): assert 1")
|
||||
result = testdir.runpytest('-rf')
|
||||
assert 'short test summary' not in result.stdout.str()
|
||||
|
||||
|
||||
def test_pass_extra_reporting(testdir):
|
||||
testdir.makepyfile("def test_this(): assert 1")
|
||||
result = testdir.runpytest()
|
||||
|
@ -567,11 +572,13 @@ def test_pass_extra_reporting(testdir):
|
|||
"PASS*test_pass_extra_reporting*",
|
||||
])
|
||||
|
||||
|
||||
def test_pass_reporting_on_fail(testdir):
|
||||
testdir.makepyfile("def test_this(): assert 0")
|
||||
result = testdir.runpytest('-rp')
|
||||
assert 'short test summary' not in result.stdout.str()
|
||||
|
||||
|
||||
def test_pass_output_reporting(testdir):
|
||||
testdir.makepyfile("""
|
||||
def test_pass_output():
|
||||
|
@ -584,6 +591,7 @@ def test_pass_output_reporting(testdir):
|
|||
"Four score and seven years ago...",
|
||||
])
|
||||
|
||||
|
||||
def test_color_yes(testdir):
|
||||
testdir.makepyfile("def test_this(): assert 1")
|
||||
result = testdir.runpytest('--color=yes')
|
||||
|
@ -660,6 +668,7 @@ def test_terminalreporter_reportopt_addopts(testdir):
|
|||
"*1 passed*"
|
||||
])
|
||||
|
||||
|
||||
def test_tbstyle_short(testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import pytest
|
||||
|
@ -685,6 +694,7 @@ def test_tbstyle_short(testdir):
|
|||
assert 'x = 0' in s
|
||||
assert 'assert x' in s
|
||||
|
||||
|
||||
def test_traceconfig(testdir, monkeypatch):
|
||||
result = testdir.runpytest("--traceconfig")
|
||||
result.stdout.fnmatch_lines([
|
||||
|
@ -697,6 +707,7 @@ class TestGenericReporting(object):
|
|||
""" this test class can be subclassed with a different option
|
||||
provider to run e.g. distributed tests.
|
||||
"""
|
||||
|
||||
def test_collect_fail(self, testdir, option):
|
||||
testdir.makepyfile("import xyz\n")
|
||||
result = testdir.runpytest(*option.args)
|
||||
|
@ -723,7 +734,6 @@ class TestGenericReporting(object):
|
|||
"*2 failed*",
|
||||
])
|
||||
|
||||
|
||||
def test_tb_option(self, testdir, option):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
@ -787,6 +797,7 @@ def pytest_report_header(config, startdir):
|
|||
str(testdir.tmpdir),
|
||||
])
|
||||
|
||||
|
||||
@pytest.mark.xfail("not hasattr(os, 'dup')")
|
||||
def test_fdopen_kept_alive_issue124(testdir):
|
||||
testdir.makepyfile("""
|
||||
|
@ -805,6 +816,7 @@ def test_fdopen_kept_alive_issue124(testdir):
|
|||
"*2 passed*"
|
||||
])
|
||||
|
||||
|
||||
def test_tbstyle_native_setup_error(testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
@ -820,6 +832,7 @@ def test_tbstyle_native_setup_error(testdir):
|
|||
'*File *test_tbstyle_native_setup_error.py", line *, in setup_error_fixture*'
|
||||
])
|
||||
|
||||
|
||||
def test_terminal_summary(testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_terminal_summary(terminalreporter, exitstatus):
|
||||
|
|
|
@ -5,6 +5,7 @@ import pytest
|
|||
|
||||
from _pytest.tmpdir import tmpdir
|
||||
|
||||
|
||||
def test_funcarg(testdir):
|
||||
testdir.makepyfile("""
|
||||
def pytest_generate_tests(metafunc):
|
||||
|
@ -29,12 +30,14 @@ def test_funcarg(testdir):
|
|||
bn = p.basename.strip("0123456789")
|
||||
assert bn == "qwe__abc"
|
||||
|
||||
|
||||
def test_ensuretemp(recwarn):
|
||||
d1 = pytest.ensuretemp('hello')
|
||||
d2 = pytest.ensuretemp('hello')
|
||||
assert d1 == d2
|
||||
assert d1.check(dir=1)
|
||||
|
||||
|
||||
class TestTempdirHandler(object):
|
||||
def test_mktemp(self, testdir):
|
||||
from _pytest.tmpdir import TempdirFactory
|
||||
|
@ -49,6 +52,7 @@ class TestTempdirHandler(object):
|
|||
assert tmp2.relto(t.getbasetemp()).startswith("this")
|
||||
assert tmp2 != tmp
|
||||
|
||||
|
||||
class TestConfigTmpdir(object):
|
||||
def test_getbasetemp_custom_removes_old(self, testdir):
|
||||
mytemp = testdir.tmpdir.join("xyz")
|
||||
|
@ -76,6 +80,7 @@ def test_basetemp(testdir):
|
|||
assert result.ret == 0
|
||||
assert mytemp.join('hello').check()
|
||||
|
||||
|
||||
@pytest.mark.skipif(not hasattr(py.path.local, 'mksymlinkto'),
|
||||
reason="symlink not available on this platform")
|
||||
def test_tmpdir_always_is_realpath(testdir):
|
||||
|
|
|
@ -3,6 +3,7 @@ from _pytest.main import EXIT_NOTESTSCOLLECTED
|
|||
import pytest
|
||||
import gc
|
||||
|
||||
|
||||
def test_simple_unittest(testdir):
|
||||
testpath = testdir.makepyfile("""
|
||||
import unittest
|
||||
|
@ -16,6 +17,7 @@ def test_simple_unittest(testdir):
|
|||
assert reprec.matchreport("testpassing").passed
|
||||
assert reprec.matchreport("test_failing").failed
|
||||
|
||||
|
||||
def test_runTest_method(testdir):
|
||||
testdir.makepyfile("""
|
||||
import unittest
|
||||
|
@ -35,6 +37,7 @@ def test_runTest_method(testdir):
|
|||
*2 passed*
|
||||
""")
|
||||
|
||||
|
||||
def test_isclasscheck_issue53(testdir):
|
||||
testpath = testdir.makepyfile("""
|
||||
import unittest
|
||||
|
@ -46,6 +49,7 @@ def test_isclasscheck_issue53(testdir):
|
|||
result = testdir.runpytest(testpath)
|
||||
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||
|
||||
|
||||
def test_setup(testdir):
|
||||
testpath = testdir.makepyfile("""
|
||||
import unittest
|
||||
|
@ -66,6 +70,7 @@ def test_setup(testdir):
|
|||
rep = reprec.matchreport("test_both", when="teardown")
|
||||
assert rep.failed and '42' in str(rep.longrepr)
|
||||
|
||||
|
||||
def test_setUpModule(testdir):
|
||||
testpath = testdir.makepyfile("""
|
||||
l = []
|
||||
|
@ -87,6 +92,7 @@ def test_setUpModule(testdir):
|
|||
"*2 passed*",
|
||||
])
|
||||
|
||||
|
||||
def test_setUpModule_failing_no_teardown(testdir):
|
||||
testpath = testdir.makepyfile("""
|
||||
l = []
|
||||
|
@ -105,6 +111,7 @@ def test_setUpModule_failing_no_teardown(testdir):
|
|||
call = reprec.getcalls("pytest_runtest_setup")[0]
|
||||
assert not call.item.module.l
|
||||
|
||||
|
||||
def test_new_instances(testdir):
|
||||
testpath = testdir.makepyfile("""
|
||||
import unittest
|
||||
|
@ -117,6 +124,7 @@ def test_new_instances(testdir):
|
|||
reprec = testdir.inline_run(testpath)
|
||||
reprec.assertoutcome(passed=2)
|
||||
|
||||
|
||||
def test_teardown(testdir):
|
||||
testpath = testdir.makepyfile("""
|
||||
import unittest
|
||||
|
@ -136,6 +144,7 @@ def test_teardown(testdir):
|
|||
assert passed == 2
|
||||
assert passed + skipped + failed == 2
|
||||
|
||||
|
||||
def test_teardown_issue1649(testdir):
|
||||
"""
|
||||
Are TestCase objects cleaned up? Often unittest TestCase objects set
|
||||
|
@ -158,6 +167,7 @@ def test_teardown_issue1649(testdir):
|
|||
for obj in gc.get_objects():
|
||||
assert type(obj).__name__ != 'TestCaseObjectsShouldBeCleanedUp'
|
||||
|
||||
|
||||
@pytest.mark.skipif("sys.version_info < (2,7)")
|
||||
def test_unittest_skip_issue148(testdir):
|
||||
testpath = testdir.makepyfile("""
|
||||
|
@ -177,6 +187,7 @@ def test_unittest_skip_issue148(testdir):
|
|||
reprec = testdir.inline_run(testpath)
|
||||
reprec.assertoutcome(skipped=1)
|
||||
|
||||
|
||||
def test_method_and_teardown_failing_reporting(testdir):
|
||||
testdir.makepyfile("""
|
||||
import unittest, pytest
|
||||
|
@ -196,6 +207,7 @@ def test_method_and_teardown_failing_reporting(testdir):
|
|||
"*1 failed*1 error*",
|
||||
])
|
||||
|
||||
|
||||
def test_setup_failure_is_shown(testdir):
|
||||
testdir.makepyfile("""
|
||||
import unittest
|
||||
|
@ -216,6 +228,7 @@ def test_setup_failure_is_shown(testdir):
|
|||
])
|
||||
assert 'never42' not in result.stdout.str()
|
||||
|
||||
|
||||
def test_setup_setUpClass(testdir):
|
||||
testpath = testdir.makepyfile("""
|
||||
import unittest
|
||||
|
@ -238,6 +251,7 @@ def test_setup_setUpClass(testdir):
|
|||
reprec = testdir.inline_run(testpath)
|
||||
reprec.assertoutcome(passed=3)
|
||||
|
||||
|
||||
def test_setup_class(testdir):
|
||||
testpath = testdir.makepyfile("""
|
||||
import unittest
|
||||
|
@ -279,6 +293,7 @@ def test_testcase_adderrorandfailure_defers(testdir, type):
|
|||
result = testdir.runpytest()
|
||||
assert 'should not raise' not in result.stdout.str()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("type", ['Error', 'Failure'])
|
||||
def test_testcase_custom_exception_info(testdir, type):
|
||||
testdir.makepyfile("""
|
||||
|
@ -310,6 +325,7 @@ def test_testcase_custom_exception_info(testdir, type):
|
|||
"*1 failed*",
|
||||
])
|
||||
|
||||
|
||||
def test_testcase_totally_incompatible_exception_info(testdir):
|
||||
item, = testdir.getitems("""
|
||||
from unittest import TestCase
|
||||
|
@ -321,6 +337,7 @@ def test_testcase_totally_incompatible_exception_info(testdir):
|
|||
excinfo = item._excinfo.pop(0)
|
||||
assert 'ERROR: Unknown Incompatible' in str(excinfo.getrepr())
|
||||
|
||||
|
||||
def test_module_level_pytestmark(testdir):
|
||||
testpath = testdir.makepyfile("""
|
||||
import unittest
|
||||
|
@ -520,6 +537,7 @@ class TestTrialUnittest(object):
|
|||
child.expect("hellopdb")
|
||||
child.sendeof()
|
||||
|
||||
|
||||
def test_djangolike_testcase(testdir):
|
||||
# contributed from Morten Breekevold
|
||||
testdir.makepyfile("""
|
||||
|
@ -585,6 +603,7 @@ def test_unittest_not_shown_in_traceback(testdir):
|
|||
res = testdir.runpytest()
|
||||
assert "failUnlessEqual" not in res.stdout.str()
|
||||
|
||||
|
||||
def test_unorderable_types(testdir):
|
||||
testdir.makepyfile("""
|
||||
import unittest
|
||||
|
@ -602,6 +621,7 @@ def test_unorderable_types(testdir):
|
|||
assert "TypeError" not in result.stdout.str()
|
||||
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||
|
||||
|
||||
def test_unittest_typerror_traceback(testdir):
|
||||
testdir.makepyfile("""
|
||||
import unittest
|
||||
|
@ -769,6 +789,7 @@ def test_issue333_result_clearing(testdir):
|
|||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(failed=1)
|
||||
|
||||
|
||||
@pytest.mark.skipif("sys.version_info < (2,7)")
|
||||
def test_unittest_raise_skip_issue748(testdir):
|
||||
testdir.makepyfile(test_foo="""
|
||||
|
@ -784,6 +805,7 @@ def test_unittest_raise_skip_issue748(testdir):
|
|||
*1 skipped*
|
||||
""")
|
||||
|
||||
|
||||
@pytest.mark.skipif("sys.version_info < (2,7)")
|
||||
def test_unittest_skip_issue1169(testdir):
|
||||
testdir.makepyfile(test_foo="""
|
||||
|
@ -800,6 +822,7 @@ def test_unittest_skip_issue1169(testdir):
|
|||
*1 skipped*
|
||||
""")
|
||||
|
||||
|
||||
def test_class_method_containing_test_issue1558(testdir):
|
||||
testdir.makepyfile(test_foo="""
|
||||
import unittest
|
||||
|
|
|
@ -8,6 +8,7 @@ import pytest
|
|||
|
||||
WARNINGS_SUMMARY_HEADER = 'warnings summary'
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def pyfile_with_warnings(testdir, request):
|
||||
"""
|
||||
|
@ -112,7 +113,6 @@ def test_ignore(testdir, pyfile_with_warnings, method):
|
|||
assert WARNINGS_SUMMARY_HEADER not in result.stdout.str()
|
||||
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.version_info < (3, 0),
|
||||
reason='warnings message is unicode is ok in python3')
|
||||
def test_unicode(testdir, pyfile_with_warnings):
|
||||
|
|
13
tox.ini
13
tox.ini
|
@ -12,7 +12,7 @@ envlist=
|
|||
py36
|
||||
py37
|
||||
pypy
|
||||
{py27,py35}-{pexpect,xdist,trial,numpy}
|
||||
{py27,py35}-{pexpect,xdist,trial}
|
||||
py27-nobyte
|
||||
doctesting
|
||||
freeze
|
||||
|
@ -38,7 +38,8 @@ deps=
|
|||
[testenv:py27-subprocess]
|
||||
changedir = .
|
||||
basepython = python2.7
|
||||
deps=pytest-xdist>=1.13
|
||||
deps =
|
||||
pytest-xdist>=1.13
|
||||
mock
|
||||
nose
|
||||
commands =
|
||||
|
@ -62,7 +63,8 @@ commands =
|
|||
{envpython} scripts/check-rst.py
|
||||
|
||||
[testenv:py27-xdist]
|
||||
deps=pytest-xdist>=1.13
|
||||
deps =
|
||||
pytest-xdist>=1.13
|
||||
mock
|
||||
nose
|
||||
hypothesis>=3.5.2
|
||||
|
@ -146,7 +148,8 @@ commands=
|
|||
changedir = doc/en
|
||||
skipsdist = True
|
||||
basepython = python3.5
|
||||
deps=sphinx
|
||||
deps =
|
||||
sphinx
|
||||
PyYAML
|
||||
regendoc>=0.6.1
|
||||
whitelist_externals =
|
||||
|
@ -203,5 +206,5 @@ filterwarnings=
|
|||
ignore:.*inspect.getargspec.*deprecated, use inspect.signature.*:DeprecationWarning
|
||||
|
||||
[flake8]
|
||||
ignore =E401,E225,E261,E128,E124,E301,E302,E121,E303,W391,E501,E231,E126,E701,E265,E241,E251,E226,E101,W191,E131,E203,E122,E123,E271,E712,E222,E127,E125,E221,W292,E111,E113,E293,E262,W293,E129,E702,E201,E272,E202,E704,E731,E402
|
||||
max-line-length = 120
|
||||
exclude = _pytest/vendored_packages/pluggy.py
|
||||
|
|
Loading…
Reference in New Issue