Fixed E128 flake8 errors
continuation line under-indented for visual indent
This commit is contained in:
parent
57438f3efe
commit
cf97159009
|
@ -339,8 +339,8 @@ class Traceback(list):
|
|||
loc = f.f_locals
|
||||
for otherloc in l:
|
||||
if f.is_true(f.eval(co_equal,
|
||||
__recursioncache_locals_1=loc,
|
||||
__recursioncache_locals_2=otherloc)):
|
||||
__recursioncache_locals_1=loc,
|
||||
__recursioncache_locals_2=otherloc)):
|
||||
return i
|
||||
l.append(entry.frame.f_locals)
|
||||
return None
|
||||
|
@ -408,7 +408,7 @@ class ExceptionInfo(object):
|
|||
return ReprFileLocation(path, lineno+1, exconly)
|
||||
|
||||
def getrepr(self, showlocals=False, style="long",
|
||||
abspath=False, tbfilter=True, funcargs=False):
|
||||
abspath=False, tbfilter=True, funcargs=False):
|
||||
""" return str()able representation of this exception info.
|
||||
showlocals: show locals per traceback entry
|
||||
style: long|short|no|native traceback style
|
||||
|
@ -425,7 +425,7 @@ class ExceptionInfo(object):
|
|||
)), self._getreprcrash())
|
||||
|
||||
fmt = FormattedExcinfo(showlocals=showlocals, style=style,
|
||||
abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
|
||||
abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
|
||||
return fmt.repr_excinfo(self)
|
||||
|
||||
def __str__(self):
|
||||
|
|
|
@ -199,7 +199,7 @@ class Source(object):
|
|||
#
|
||||
|
||||
def compile_(source, filename=None, mode='exec', flags=
|
||||
generators.compiler_flag, dont_inherit=0):
|
||||
generators.compiler_flag, dont_inherit=0):
|
||||
""" compile the given source to a raw code object,
|
||||
and maintain an internal cache which allows later
|
||||
retrieval of the source code for the code object
|
||||
|
|
|
@ -224,7 +224,7 @@ def cacheshow(config, session):
|
|||
val = config.cache.get(key, dummy)
|
||||
if val is dummy:
|
||||
tw.line("%s contains unreadable content, "
|
||||
"will be ignored" % key)
|
||||
"will be ignored" % key)
|
||||
else:
|
||||
tw.line("%s contains:" % key)
|
||||
stream = py.io.TextIO()
|
||||
|
|
|
@ -59,7 +59,7 @@ def iscoroutinefunction(func):
|
|||
which in turns also initializes the "logging" module as side-effect (see issue #8).
|
||||
"""
|
||||
return (getattr(func, '_is_coroutine', False) or
|
||||
(hasattr(inspect, 'iscoroutinefunction') and inspect.iscoroutinefunction(func)))
|
||||
(hasattr(inspect, 'iscoroutinefunction') and inspect.iscoroutinefunction(func)))
|
||||
|
||||
|
||||
def getlocation(function, curdir):
|
||||
|
|
|
@ -276,11 +276,11 @@ class PytestPluginManager(PluginManager):
|
|||
# XXX now that the pluginmanager exposes hookimpl(tryfirst...)
|
||||
# we should remove tryfirst/trylast as markers
|
||||
config.addinivalue_line("markers",
|
||||
"tryfirst: mark a hook implementation function such that the "
|
||||
"plugin machinery will try to call it first/as early as possible.")
|
||||
"tryfirst: mark a hook implementation function such that the "
|
||||
"plugin machinery will try to call it first/as early as possible.")
|
||||
config.addinivalue_line("markers",
|
||||
"trylast: mark a hook implementation function such that the "
|
||||
"plugin machinery will try to call it last/as late as possible.")
|
||||
"trylast: mark a hook implementation function such that the "
|
||||
"plugin machinery will try to call it last/as late as possible.")
|
||||
|
||||
def _warn(self, message):
|
||||
kwargs = message if isinstance(message, dict) else {
|
||||
|
@ -777,7 +777,7 @@ class MyOptionParser(argparse.ArgumentParser):
|
|||
extra_info = {}
|
||||
self._parser = parser
|
||||
argparse.ArgumentParser.__init__(self, usage=parser._usage,
|
||||
add_help=False, formatter_class=DropShorterLongHelpFormatter)
|
||||
add_help=False, formatter_class=DropShorterLongHelpFormatter)
|
||||
# extra_info is a dict of (param -> value) to display if there's
|
||||
# an usage error to provide more contextual information to the user
|
||||
self.extra_info = extra_info
|
||||
|
@ -940,9 +940,9 @@ class Config(object):
|
|||
else:
|
||||
style = "native"
|
||||
excrepr = excinfo.getrepr(funcargs=True,
|
||||
showlocals=getattr(option, 'showlocals', False),
|
||||
style=style,
|
||||
)
|
||||
showlocals=getattr(option, 'showlocals', False),
|
||||
style=style,
|
||||
)
|
||||
res = self.hook.pytest_internalerror(excrepr=excrepr,
|
||||
excinfo=excinfo)
|
||||
if not py.builtin.any(res):
|
||||
|
@ -1074,7 +1074,7 @@ class Config(object):
|
|||
self.known_args_namespace.confcutdir = confcutdir
|
||||
try:
|
||||
self.hook.pytest_load_initial_conftests(early_config=self,
|
||||
args=args, parser=self._parser)
|
||||
args=args, parser=self._parser)
|
||||
except ConftestImportFailure:
|
||||
e = sys.exc_info()[1]
|
||||
if ns.help or ns.version:
|
||||
|
|
|
@ -24,26 +24,26 @@ DOCTEST_REPORT_CHOICES = (
|
|||
|
||||
def pytest_addoption(parser):
|
||||
parser.addini('doctest_optionflags', 'option flags for doctests',
|
||||
type="args", default=["ELLIPSIS"])
|
||||
type="args", default=["ELLIPSIS"])
|
||||
parser.addini("doctest_encoding", 'encoding used for doctest files', default="utf-8")
|
||||
group = parser.getgroup("collect")
|
||||
group.addoption("--doctest-modules",
|
||||
action="store_true", default=False,
|
||||
help="run doctests in all .py modules",
|
||||
dest="doctestmodules")
|
||||
action="store_true", default=False,
|
||||
help="run doctests in all .py modules",
|
||||
dest="doctestmodules")
|
||||
group.addoption("--doctest-report",
|
||||
type=str.lower, default="udiff",
|
||||
help="choose another output format for diffs on doctest failure",
|
||||
choices=DOCTEST_REPORT_CHOICES,
|
||||
dest="doctestreport")
|
||||
type=str.lower, default="udiff",
|
||||
help="choose another output format for diffs on doctest failure",
|
||||
choices=DOCTEST_REPORT_CHOICES,
|
||||
dest="doctestreport")
|
||||
group.addoption("--doctest-glob",
|
||||
action="append", default=[], metavar="pat",
|
||||
help="doctests file matching pattern, default: test*.txt",
|
||||
dest="doctestglob")
|
||||
action="append", default=[], metavar="pat",
|
||||
help="doctests file matching pattern, default: test*.txt",
|
||||
dest="doctestglob")
|
||||
group.addoption("--doctest-ignore-import-errors",
|
||||
action="store_true", default=False,
|
||||
help="ignore doctest ImportErrors",
|
||||
dest="doctest_ignore_import_errors")
|
||||
action="store_true", default=False,
|
||||
help="ignore doctest ImportErrors",
|
||||
dest="doctest_ignore_import_errors")
|
||||
|
||||
|
||||
def pytest_collect_file(path, parent):
|
||||
|
@ -128,7 +128,7 @@ class DoctestItem(pytest.Item):
|
|||
indent = '...'
|
||||
if excinfo.errisinstance(doctest.DocTestFailure):
|
||||
lines += checker.output_difference(example,
|
||||
doctestfailure.got, report_choice).split("\n")
|
||||
doctestfailure.got, report_choice).split("\n")
|
||||
else:
|
||||
inner_excinfo = ExceptionInfo(excinfo.value.exc_info)
|
||||
lines += ["UNEXPECTED EXCEPTION: %s" %
|
||||
|
|
|
@ -106,9 +106,9 @@ def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
|
|||
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
|
||||
else:
|
||||
fixturedef = FixtureDef(fixturemanager, '', argname,
|
||||
get_direct_param_fixture_func,
|
||||
arg2scope[argname],
|
||||
valuelist, False, False)
|
||||
get_direct_param_fixture_func,
|
||||
arg2scope[argname],
|
||||
valuelist, False, False)
|
||||
arg2fixturedefs[argname] = [fixturedef]
|
||||
if node is not None:
|
||||
node._name2pseudofixturedef[argname] = fixturedef
|
||||
|
@ -528,7 +528,7 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
|||
"fixture %r with a %r scoped request object, "
|
||||
"involved factories\n%s" % (
|
||||
(requested_scope, argname, invoking_scope, "\n".join(lines))),
|
||||
pytrace=False)
|
||||
pytrace=False)
|
||||
|
||||
def _factorytraceback(self):
|
||||
lines = []
|
||||
|
@ -699,7 +699,7 @@ def call_fixture_func(fixturefunc, request, kwargs):
|
|||
pass
|
||||
else:
|
||||
fail_fixturefunc(fixturefunc,
|
||||
"yield_fixture function has more than one 'yield'")
|
||||
"yield_fixture function has more than one 'yield'")
|
||||
|
||||
request.addfinalizer(teardown)
|
||||
else:
|
||||
|
|
|
@ -41,20 +41,20 @@ class HelpAction(Action):
|
|||
def pytest_addoption(parser):
|
||||
group = parser.getgroup('debugconfig')
|
||||
group.addoption('--version', action="store_true",
|
||||
help="display pytest lib version and import information.")
|
||||
help="display pytest lib version and import information.")
|
||||
group._addoption("-h", "--help", action=HelpAction, dest="help",
|
||||
help="show help message and configuration info")
|
||||
help="show help message and configuration info")
|
||||
group._addoption('-p', action="append", dest="plugins", default = [],
|
||||
metavar="name",
|
||||
help="early-load given plugin (multi-allowed). "
|
||||
"To avoid loading of plugins, use the `no:` prefix, e.g. "
|
||||
"`no:doctest`.")
|
||||
metavar="name",
|
||||
help="early-load given plugin (multi-allowed). "
|
||||
"To avoid loading of plugins, use the `no:` prefix, e.g. "
|
||||
"`no:doctest`.")
|
||||
group.addoption('--traceconfig', '--trace-config',
|
||||
action="store_true", default=False,
|
||||
help="trace considerations of conftest.py files."),
|
||||
action="store_true", default=False,
|
||||
help="trace considerations of conftest.py files."),
|
||||
group.addoption('--debug',
|
||||
action="store_true", dest="debug", default=False,
|
||||
help="store internal tracing debug information in 'pytestdebug.log'.")
|
||||
action="store_true", dest="debug", default=False,
|
||||
help="store internal tracing debug information in 'pytestdebug.log'.")
|
||||
group._addoption(
|
||||
'-o', '--override-ini', nargs='*', dest="override_ini",
|
||||
action="append",
|
||||
|
@ -69,10 +69,10 @@ def pytest_cmdline_parse():
|
|||
path = os.path.abspath("pytestdebug.log")
|
||||
debugfile = open(path, 'w')
|
||||
debugfile.write("versions pytest-%s, py-%s, "
|
||||
"python-%s\ncwd=%s\nargs=%s\n\n" %(
|
||||
pytest.__version__, py.__version__,
|
||||
".".join(map(str, sys.version_info)),
|
||||
os.getcwd(), config._origargs))
|
||||
"python-%s\ncwd=%s\nargs=%s\n\n" %(
|
||||
pytest.__version__, py.__version__,
|
||||
".".join(map(str, sys.version_info)),
|
||||
os.getcwd(), config._origargs))
|
||||
config.trace.root.setwriter(debugfile.write)
|
||||
undo_tracing = config.pluginmanager.enable_tracing()
|
||||
sys.stderr.write("writing pytestdebug information to %s\n" % path)
|
||||
|
@ -90,7 +90,7 @@ def pytest_cmdline_main(config):
|
|||
if config.option.version:
|
||||
p = py.path.local(pytest.__file__)
|
||||
sys.stderr.write("This is pytest version %s, imported from %s\n" %
|
||||
(pytest.__version__, p))
|
||||
(pytest.__version__, p))
|
||||
plugininfo = getpluginversioninfo(config)
|
||||
if plugininfo:
|
||||
for line in plugininfo:
|
||||
|
@ -161,7 +161,7 @@ def pytest_report_header(config):
|
|||
lines = []
|
||||
if config.option.debug or config.option.traceconfig:
|
||||
lines.append("using: pytest-%s pylib-%s" %
|
||||
(pytest.__version__,py.__version__))
|
||||
(pytest.__version__,py.__version__))
|
||||
|
||||
verinfo = getpluginversioninfo(config)
|
||||
if verinfo:
|
||||
|
|
|
@ -29,9 +29,9 @@ EXIT_NOTESTSCOLLECTED = 5
|
|||
|
||||
def pytest_addoption(parser):
|
||||
parser.addini("norecursedirs", "directory patterns to avoid for recursion",
|
||||
type="args", default=['.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg', 'venv'])
|
||||
type="args", default=['.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg', 'venv'])
|
||||
parser.addini("testpaths", "directories to search for tests when no files or directories are given in the command line.",
|
||||
type="args", default=[])
|
||||
type="args", default=[])
|
||||
#parser.addini("dirpatterns",
|
||||
# "patterns specifying possible locations of test files",
|
||||
# type="linelist", default=["**/test_*.txt",
|
||||
|
@ -39,42 +39,42 @@ def pytest_addoption(parser):
|
|||
#)
|
||||
group = parser.getgroup("general", "running and selection options")
|
||||
group._addoption('-x', '--exitfirst', action="store_const",
|
||||
dest="maxfail", const=1,
|
||||
help="exit instantly on first error or failed test."),
|
||||
dest="maxfail", const=1,
|
||||
help="exit instantly on first error or failed test."),
|
||||
group._addoption('--maxfail', metavar="num",
|
||||
action="store", type=int, dest="maxfail", default=0,
|
||||
help="exit after first num failures or errors.")
|
||||
action="store", type=int, dest="maxfail", default=0,
|
||||
help="exit after first num failures or errors.")
|
||||
group._addoption('--strict', action="store_true",
|
||||
help="marks not registered in configuration file raise errors.")
|
||||
help="marks not registered in configuration file raise errors.")
|
||||
group._addoption("-c", metavar="file", type=str, dest="inifilename",
|
||||
help="load configuration from `file` instead of trying to locate one of the implicit configuration files.")
|
||||
help="load configuration from `file` instead of trying to locate one of the implicit configuration files.")
|
||||
group._addoption("--continue-on-collection-errors", action="store_true",
|
||||
default=False, dest="continue_on_collection_errors",
|
||||
help="Force test execution even if collection errors occur.")
|
||||
default=False, dest="continue_on_collection_errors",
|
||||
help="Force test execution even if collection errors occur.")
|
||||
|
||||
group = parser.getgroup("collect", "collection")
|
||||
group.addoption('--collectonly', '--collect-only', action="store_true",
|
||||
help="only collect tests, don't execute them."),
|
||||
help="only collect tests, don't execute them."),
|
||||
group.addoption('--pyargs', action="store_true",
|
||||
help="try to interpret all arguments as python packages.")
|
||||
help="try to interpret all arguments as python packages.")
|
||||
group.addoption("--ignore", action="append", metavar="path",
|
||||
help="ignore path during collection (multi-allowed).")
|
||||
help="ignore path during collection (multi-allowed).")
|
||||
# when changing this to --conf-cut-dir, config.py Conftest.setinitial
|
||||
# needs upgrading as well
|
||||
group.addoption('--confcutdir', dest="confcutdir", default=None,
|
||||
metavar="dir", type=functools.partial(directory_arg, optname="--confcutdir"),
|
||||
help="only load conftest.py's relative to specified dir.")
|
||||
metavar="dir", type=functools.partial(directory_arg, optname="--confcutdir"),
|
||||
help="only load conftest.py's relative to specified dir.")
|
||||
group.addoption('--noconftest', action="store_true",
|
||||
dest="noconftest", default=False,
|
||||
help="Don't load any conftest.py files.")
|
||||
dest="noconftest", default=False,
|
||||
help="Don't load any conftest.py files.")
|
||||
group.addoption('--keepduplicates', '--keep-duplicates', action="store_true",
|
||||
dest="keepduplicates", default=False,
|
||||
help="Keep duplicate tests.")
|
||||
dest="keepduplicates", default=False,
|
||||
help="Keep duplicate tests.")
|
||||
|
||||
group = parser.getgroup("debugconfig",
|
||||
"test session debugging and configuration")
|
||||
"test session debugging and configuration")
|
||||
group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir",
|
||||
help="base temporary directory for this test run.")
|
||||
help="base temporary directory for this test run.")
|
||||
|
||||
|
||||
|
||||
|
@ -618,7 +618,7 @@ class Session(FSCollector):
|
|||
items = self._perform_collect(args, genitems)
|
||||
self.config.pluginmanager.check_pending()
|
||||
hook.pytest_collection_modifyitems(session=self,
|
||||
config=self.config, items=items)
|
||||
config=self.config, items=items)
|
||||
finally:
|
||||
hook.pytest_collection_finish(session=self)
|
||||
self.testscollected = len(items)
|
||||
|
|
|
@ -9,9 +9,9 @@ import tempfile
|
|||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("terminal reporting")
|
||||
group._addoption('--pastebin', metavar="mode",
|
||||
action='store', dest="pastebin", default=None,
|
||||
choices=['failed', 'all'],
|
||||
help="send failed|all info to bpaste.net pastebin service.")
|
||||
action='store', dest="pastebin", default=None,
|
||||
choices=['failed', 'all'],
|
||||
help="send failed|all info to bpaste.net pastebin service.")
|
||||
|
||||
|
||||
@pytest.hookimpl(trylast=True)
|
||||
|
|
|
@ -25,13 +25,13 @@ from _pytest.assertion.rewrite import AssertionRewritingHook
|
|||
def pytest_addoption(parser):
|
||||
# group = parser.getgroup("pytester", "pytester (self-tests) options")
|
||||
parser.addoption('--lsof',
|
||||
action="store_true", dest="lsof", default=False,
|
||||
help=("run FD checks if lsof is available"))
|
||||
action="store_true", dest="lsof", default=False,
|
||||
help=("run FD checks if lsof is available"))
|
||||
|
||||
parser.addoption('--runpytest', default="inprocess", dest="runpytest",
|
||||
choices=("inprocess", "subprocess", ),
|
||||
help=("run pytest sub runs in tests using an 'inprocess' "
|
||||
"or 'subprocess' (python -m main) method"))
|
||||
choices=("inprocess", "subprocess", ),
|
||||
help=("run pytest sub runs in tests using an 'inprocess' "
|
||||
"or 'subprocess' (python -m main) method"))
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
|
@ -62,7 +62,7 @@ class LsofFdLeakChecker(object):
|
|||
def _parse_lsof_output(self, out):
|
||||
def isopen(line):
|
||||
return line.startswith('f') and ("deleted" not in line and
|
||||
'mem' not in line and "txt" not in line and 'cwd' not in line)
|
||||
'mem' not in line and "txt" not in line and 'cwd' not in line)
|
||||
|
||||
open_files = []
|
||||
|
||||
|
@ -130,7 +130,7 @@ def getexecutable(name, cache={}):
|
|||
if executable:
|
||||
import subprocess
|
||||
popen = subprocess.Popen([str(executable), "--version"],
|
||||
universal_newlines=True, stderr=subprocess.PIPE)
|
||||
universal_newlines=True, stderr=subprocess.PIPE)
|
||||
out, err = popen.communicate()
|
||||
if name == "jython":
|
||||
if not err or "2.5" not in err:
|
||||
|
@ -264,7 +264,7 @@ class HookRecorder:
|
|||
return [x.report for x in self.getcalls(names)]
|
||||
|
||||
def matchreport(self, inamepart="",
|
||||
names="pytest_runtest_logreport pytest_collectreport", when=None):
|
||||
names="pytest_runtest_logreport pytest_collectreport", when=None):
|
||||
""" return a testreport whose dotted import path matches """
|
||||
l = []
|
||||
for rep in self.getreports(names=names):
|
||||
|
@ -933,7 +933,7 @@ class Testdir:
|
|||
try:
|
||||
now = time.time()
|
||||
popen = self.popen(cmdargs, stdout=f1, stderr=f2,
|
||||
close_fds=(sys.platform != "win32"))
|
||||
close_fds=(sys.platform != "win32"))
|
||||
ret = popen.wait()
|
||||
finally:
|
||||
f1.close()
|
||||
|
@ -987,7 +987,7 @@ class Testdir:
|
|||
|
||||
"""
|
||||
p = py.path.local.make_numbered_dir(prefix="runpytest-",
|
||||
keep=None, rootdir=self.tmpdir)
|
||||
keep=None, rootdir=self.tmpdir)
|
||||
args = ('--basetemp=%s' % p, ) + args
|
||||
#for x in args:
|
||||
# if '--confcutdir' in str(x):
|
||||
|
|
|
@ -62,8 +62,8 @@ def pyobj_property(name):
|
|||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("general")
|
||||
group.addoption('--fixtures', '--funcargs',
|
||||
action="store_true", dest="showfixtures", default=False,
|
||||
help="show available fixtures, sorted by plugin appearance")
|
||||
action="store_true", dest="showfixtures", default=False,
|
||||
help="show available fixtures, sorted by plugin appearance")
|
||||
group.addoption(
|
||||
'--fixtures-per-test',
|
||||
action="store_true",
|
||||
|
@ -72,20 +72,20 @@ def pytest_addoption(parser):
|
|||
help="show fixtures per test",
|
||||
)
|
||||
parser.addini("usefixtures", type="args", default=[],
|
||||
help="list of default fixtures to be used with this project")
|
||||
help="list of default fixtures to be used with this project")
|
||||
parser.addini("python_files", type="args",
|
||||
default=['test_*.py', '*_test.py'],
|
||||
help="glob-style file patterns for Python test module discovery")
|
||||
default=['test_*.py', '*_test.py'],
|
||||
help="glob-style file patterns for Python test module discovery")
|
||||
parser.addini("python_classes", type="args", default=["Test",],
|
||||
help="prefixes or glob names for Python test class discovery")
|
||||
help="prefixes or glob names for Python test class discovery")
|
||||
parser.addini("python_functions", type="args", default=["test",],
|
||||
help="prefixes or glob names for Python test function and "
|
||||
"method discovery")
|
||||
help="prefixes or glob names for Python test function and "
|
||||
"method discovery")
|
||||
|
||||
group.addoption("--import-mode", default="prepend",
|
||||
choices=["prepend", "append"], dest="importmode",
|
||||
help="prepend/append to sys.path when importing test modules, "
|
||||
"default is to prepend.")
|
||||
choices=["prepend", "append"], dest="importmode",
|
||||
help="prepend/append to sys.path when importing test modules, "
|
||||
"default is to prepend.")
|
||||
|
||||
|
||||
def pytest_cmdline_main(config):
|
||||
|
@ -114,18 +114,18 @@ def pytest_generate_tests(metafunc):
|
|||
|
||||
def pytest_configure(config):
|
||||
config.addinivalue_line("markers",
|
||||
"parametrize(argnames, argvalues): call a test function multiple "
|
||||
"times passing in different arguments in turn. argvalues generally "
|
||||
"needs to be a list of values if argnames specifies only one name "
|
||||
"or a list of tuples of values if argnames specifies multiple names. "
|
||||
"Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
|
||||
"decorated test function, one with arg1=1 and another with arg1=2."
|
||||
"see http://pytest.org/latest/parametrize.html for more info and "
|
||||
"examples."
|
||||
"parametrize(argnames, argvalues): call a test function multiple "
|
||||
"times passing in different arguments in turn. argvalues generally "
|
||||
"needs to be a list of values if argnames specifies only one name "
|
||||
"or a list of tuples of values if argnames specifies multiple names. "
|
||||
"Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
|
||||
"decorated test function, one with arg1=1 and another with arg1=2."
|
||||
"see http://pytest.org/latest/parametrize.html for more info and "
|
||||
"examples."
|
||||
)
|
||||
config.addinivalue_line("markers",
|
||||
"usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
|
||||
"all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures "
|
||||
"usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
|
||||
"all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures "
|
||||
)
|
||||
|
||||
|
||||
|
@ -177,8 +177,8 @@ def pytest_pycollect_makeitem(collector, name, obj):
|
|||
# We musn't if it's been wrapped with mock.patch (python 2 only)
|
||||
if not (isfunction(obj) or isfunction(get_real_func(obj))):
|
||||
collector.warn(code="C2", message=
|
||||
"cannot collect %r because it is not a function."
|
||||
% name, )
|
||||
"cannot collect %r because it is not a function."
|
||||
% name, )
|
||||
elif getattr(obj, "__test__", True):
|
||||
if is_generator(obj):
|
||||
res = Generator(name, parent=collector)
|
||||
|
@ -496,7 +496,7 @@ class Class(PyCollector):
|
|||
return []
|
||||
if hasinit(self.obj):
|
||||
self.warn("C1", "cannot collect test class %r because it has a "
|
||||
"__init__ constructor" % self.obj.__name__)
|
||||
"__init__ constructor" % self.obj.__name__)
|
||||
return []
|
||||
elif hasnew(self.obj):
|
||||
self.warn("C1", "cannot collect test class %r because it has a "
|
||||
|
@ -582,7 +582,7 @@ class FunctionMixin(PyobjMixin):
|
|||
if not excinfo.value.pytrace:
|
||||
return py._builtin._totext(excinfo.value)
|
||||
return super(FunctionMixin, self)._repr_failure_py(excinfo,
|
||||
style=style)
|
||||
style=style)
|
||||
|
||||
def repr_failure(self, excinfo, outerr=None):
|
||||
assert outerr is None, "XXX outerr usage is deprecated"
|
||||
|
@ -737,7 +737,7 @@ class Metafunc(fixtures.FuncargnamesCompatAttr):
|
|||
self._arg2fixturedefs = fixtureinfo.name2fixturedefs
|
||||
|
||||
def parametrize(self, argnames, argvalues, indirect=False, ids=None,
|
||||
scope=None):
|
||||
scope=None):
|
||||
""" Add new invocations to the underlying test function using the list
|
||||
of argvalues for the given argnames. Parametrization is performed
|
||||
during the collection phase. If you need to setup expensive resources
|
||||
|
@ -1088,7 +1088,7 @@ def _showfixtures_main(config, session):
|
|||
tw.line(" " + line.strip())
|
||||
else:
|
||||
tw.line(" %s: no docstring available" %(loc,),
|
||||
red=True)
|
||||
red=True)
|
||||
|
||||
|
||||
# builtin pytest.raises helper
|
||||
|
|
|
@ -9,8 +9,8 @@ import os
|
|||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("terminal reporting", "resultlog plugin options")
|
||||
group.addoption('--resultlog', '--result-log', action="store",
|
||||
metavar="path", default=None,
|
||||
help="DEPRECATED path for machine-readable result log.")
|
||||
metavar="path", default=None,
|
||||
help="DEPRECATED path for machine-readable result log.")
|
||||
|
||||
def pytest_configure(config):
|
||||
resultlog = config.option.resultlog
|
||||
|
|
|
@ -16,8 +16,8 @@ from _pytest._code.code import TerminalRepr, ExceptionInfo
|
|||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("terminal reporting", "reporting", after="general")
|
||||
group.addoption('--durations',
|
||||
action="store", type=int, default=None, metavar="N",
|
||||
help="show N slowest setup/test durations (N=0 for all)."),
|
||||
action="store", type=int, default=None, metavar="N",
|
||||
help="show N slowest setup/test durations (N=0 for all)."),
|
||||
|
||||
def pytest_terminal_summary(terminalreporter):
|
||||
durations = terminalreporter.config.option.durations
|
||||
|
@ -42,7 +42,7 @@ def pytest_terminal_summary(terminalreporter):
|
|||
for rep in dlist:
|
||||
nodeid = rep.nodeid.replace("::()::", "::")
|
||||
tr.write_line("%02.2fs %-8s %s" %
|
||||
(rep.duration, rep.when, nodeid))
|
||||
(rep.duration, rep.when, nodeid))
|
||||
|
||||
def pytest_sessionstart(session):
|
||||
session._setupstate = SetupState()
|
||||
|
@ -72,7 +72,7 @@ def runtestprotocol(item, log=True, nextitem=None):
|
|||
if not item.config.option.setuponly:
|
||||
reports.append(call_and_report(item, "call", log))
|
||||
reports.append(call_and_report(item, "teardown", log,
|
||||
nextitem=nextitem))
|
||||
nextitem=nextitem))
|
||||
# after all teardown hooks have been called
|
||||
# want funcargs and request info to go away
|
||||
if hasrequest:
|
||||
|
@ -266,7 +266,7 @@ def pytest_runtest_makereport(item, call):
|
|||
longrepr = item.repr_failure(excinfo)
|
||||
else: # exception in setup or teardown
|
||||
longrepr = item._repr_failure_py(excinfo,
|
||||
style=item.config.option.tbstyle)
|
||||
style=item.config.option.tbstyle)
|
||||
for rwhen, key, content in item._report_sections:
|
||||
sections.append(("Captured %s %s" %(key, rwhen), content))
|
||||
return TestReport(item.nodeid, item.location,
|
||||
|
@ -344,7 +344,7 @@ def pytest_make_collect_report(collector):
|
|||
errorinfo = CollectErrorRepr(errorinfo)
|
||||
longrepr = errorinfo
|
||||
rep = CollectReport(collector.nodeid, outcome, longrepr,
|
||||
getattr(call, 'result', None))
|
||||
getattr(call, 'result', None))
|
||||
rep.call = call # see collect_one_node
|
||||
return rep
|
||||
|
||||
|
|
|
@ -13,8 +13,8 @@ from _pytest.runner import fail, skip
|
|||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("general")
|
||||
group.addoption('--runxfail',
|
||||
action="store_true", dest="runxfail", default=False,
|
||||
help="run tests even if they are marked xfail")
|
||||
action="store_true", dest="runxfail", default=False,
|
||||
help="run tests even if they are marked xfail")
|
||||
|
||||
parser.addini("xfail_strict", "default for the strict parameter of xfail "
|
||||
"markers when not given explicitly (default: "
|
||||
|
@ -37,25 +37,25 @@ def pytest_configure(config):
|
|||
setattr(pytest, "xfail", nop)
|
||||
|
||||
config.addinivalue_line("markers",
|
||||
"skip(reason=None): skip the given test function with an optional reason. "
|
||||
"Example: skip(reason=\"no way of currently testing this\") skips the "
|
||||
"test."
|
||||
"skip(reason=None): skip the given test function with an optional reason. "
|
||||
"Example: skip(reason=\"no way of currently testing this\") skips the "
|
||||
"test."
|
||||
)
|
||||
config.addinivalue_line("markers",
|
||||
"skipif(condition): skip the given test function if eval(condition) "
|
||||
"results in a True value. Evaluation happens within the "
|
||||
"module global context. Example: skipif('sys.platform == \"win32\"') "
|
||||
"skips the test if we are on the win32 platform. see "
|
||||
"http://pytest.org/latest/skipping.html"
|
||||
"skipif(condition): skip the given test function if eval(condition) "
|
||||
"results in a True value. Evaluation happens within the "
|
||||
"module global context. Example: skipif('sys.platform == \"win32\"') "
|
||||
"skips the test if we are on the win32 platform. see "
|
||||
"http://pytest.org/latest/skipping.html"
|
||||
)
|
||||
config.addinivalue_line("markers",
|
||||
"xfail(condition, reason=None, run=True, raises=None, strict=False): "
|
||||
"mark the test function as an expected failure if eval(condition) "
|
||||
"has a True value. Optionally specify a reason for better reporting "
|
||||
"and run=False if you don't even want to execute the test function. "
|
||||
"If only specific exception(s) are expected, you can list them in "
|
||||
"raises, and if the test fails in other ways, it will be reported as "
|
||||
"a true failure. See http://pytest.org/latest/skipping.html"
|
||||
"xfail(condition, reason=None, run=True, raises=None, strict=False): "
|
||||
"mark the test function as an expected failure if eval(condition) "
|
||||
"has a True value. Optionally specify a reason for better reporting "
|
||||
"and run=False if you don't even want to execute the test function. "
|
||||
"If only specific exception(s) are expected, you can list them in "
|
||||
"raises, and if the test fails in other ways, it will be reported as "
|
||||
"a true failure. See http://pytest.org/latest/skipping.html"
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -19,33 +19,33 @@ import _pytest._pluggy as pluggy
|
|||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("terminal reporting", "reporting", after="general")
|
||||
group._addoption('-v', '--verbose', action="count",
|
||||
dest="verbose", default=0, help="increase verbosity."),
|
||||
dest="verbose", default=0, help="increase verbosity."),
|
||||
group._addoption('-q', '--quiet', action="count",
|
||||
dest="quiet", default=0, help="decrease verbosity."),
|
||||
dest="quiet", default=0, help="decrease verbosity."),
|
||||
group._addoption('-r',
|
||||
action="store", dest="reportchars", default='', metavar="chars",
|
||||
help="show extra test summary info as specified by chars (f)ailed, "
|
||||
"(E)error, (s)skipped, (x)failed, (X)passed, "
|
||||
"(p)passed, (P)passed with output, (a)all except pP. "
|
||||
"Warnings are displayed at all times except when "
|
||||
"--disable-warnings is set")
|
||||
action="store", dest="reportchars", default='', metavar="chars",
|
||||
help="show extra test summary info as specified by chars (f)ailed, "
|
||||
"(E)error, (s)skipped, (x)failed, (X)passed, "
|
||||
"(p)passed, (P)passed with output, (a)all except pP. "
|
||||
"Warnings are displayed at all times except when "
|
||||
"--disable-warnings is set")
|
||||
group._addoption('--disable-warnings', '--disable-pytest-warnings', default=False,
|
||||
dest='disable_warnings', action='store_true',
|
||||
help='disable warnings summary')
|
||||
group._addoption('-l', '--showlocals',
|
||||
action="store_true", dest="showlocals", default=False,
|
||||
help="show locals in tracebacks (disabled by default).")
|
||||
action="store_true", dest="showlocals", default=False,
|
||||
help="show locals in tracebacks (disabled by default).")
|
||||
group._addoption('--tb', metavar="style",
|
||||
action="store", dest="tbstyle", default='auto',
|
||||
choices=['auto', 'long', 'short', 'no', 'line', 'native'],
|
||||
help="traceback print mode (auto/long/short/line/native/no).")
|
||||
action="store", dest="tbstyle", default='auto',
|
||||
choices=['auto', 'long', 'short', 'no', 'line', 'native'],
|
||||
help="traceback print mode (auto/long/short/line/native/no).")
|
||||
group._addoption('--fulltrace', '--full-trace',
|
||||
action="store_true", default=False,
|
||||
help="don't cut any tracebacks (default is to cut).")
|
||||
action="store_true", default=False,
|
||||
help="don't cut any tracebacks (default is to cut).")
|
||||
group._addoption('--color', metavar="color",
|
||||
action="store", dest="color", default='auto',
|
||||
choices=['yes', 'no', 'auto'],
|
||||
help="color terminal output (yes/no/auto).")
|
||||
action="store", dest="color", default='auto',
|
||||
choices=['yes', 'no', 'auto'],
|
||||
help="color terminal output (yes/no/auto).")
|
||||
|
||||
def pytest_configure(config):
|
||||
config.option.verbose -= config.option.quiet
|
||||
|
|
|
@ -38,7 +38,7 @@ class TempdirFactory:
|
|||
p = basetemp.mkdir(basename)
|
||||
else:
|
||||
p = py.path.local.make_numbered_dir(prefix=basename,
|
||||
keep=0, rootdir=basetemp, lock_timeout=None)
|
||||
keep=0, rootdir=basetemp, lock_timeout=None)
|
||||
self.trace("mktemp", p)
|
||||
return p
|
||||
|
||||
|
|
|
@ -210,7 +210,7 @@ def pytest_runtest_protocol(item):
|
|||
check_testcase_implements_trial_reporter()
|
||||
|
||||
def excstore(self, exc_value=None, exc_type=None, exc_tb=None,
|
||||
captureVars=None):
|
||||
captureVars=None):
|
||||
if exc_value is None:
|
||||
self._rawexcinfo = sys.exc_info()
|
||||
else:
|
||||
|
@ -219,7 +219,7 @@ def pytest_runtest_protocol(item):
|
|||
self._rawexcinfo = (exc_type, exc_value, exc_tb)
|
||||
try:
|
||||
Failure__init__(self, exc_value, exc_type, exc_tb,
|
||||
captureVars=captureVars)
|
||||
captureVars=captureVars)
|
||||
except TypeError:
|
||||
Failure__init__(self, exc_value, exc_type, exc_tb)
|
||||
|
||||
|
|
|
@ -298,7 +298,7 @@ def test_excinfo_exconly():
|
|||
excinfo = pytest.raises(ValueError, h)
|
||||
assert excinfo.exconly().startswith('ValueError')
|
||||
excinfo = pytest.raises(ValueError,
|
||||
"raise ValueError('hello\\nworld')")
|
||||
"raise ValueError('hello\\nworld')")
|
||||
msg = excinfo.exconly(tryshort=True)
|
||||
assert msg.startswith('ValueError')
|
||||
assert msg.endswith("world")
|
||||
|
|
|
@ -419,10 +419,10 @@ class TestFunction(object):
|
|||
pass
|
||||
|
||||
f1 = pytest.Function(name="name", parent=session, config=config,
|
||||
args=(1,), callobj=func1)
|
||||
args=(1,), callobj=func1)
|
||||
assert f1 == f1
|
||||
f2 = pytest.Function(name="name",config=config,
|
||||
callobj=func2, parent=session)
|
||||
callobj=func2, parent=session)
|
||||
assert f1 != f2
|
||||
|
||||
def test_issue197_parametrize_emptyset(self, testdir):
|
||||
|
@ -838,7 +838,7 @@ class TestConftestCustomization(object):
|
|||
modcol = testdir.getmodulecol("def _hello(): pass")
|
||||
l = []
|
||||
monkeypatch.setattr(pytest.Module, 'makeitem',
|
||||
lambda self, name, obj: l.append(name))
|
||||
lambda self, name, obj: l.append(name))
|
||||
l = modcol.collect()
|
||||
assert '_hello' not in l
|
||||
|
||||
|
|
|
@ -133,11 +133,11 @@ class TestMetafunc(object):
|
|||
metafunc = self.Metafunc(func)
|
||||
|
||||
pytest.raises(ValueError, lambda:
|
||||
metafunc.parametrize("x", [1,2], ids=['basic']))
|
||||
metafunc.parametrize("x", [1,2], ids=['basic']))
|
||||
|
||||
pytest.raises(ValueError, lambda:
|
||||
metafunc.parametrize(("x","y"), [("abc", "def"),
|
||||
("ghi", "jkl")], ids=["one"]))
|
||||
metafunc.parametrize(("x","y"), [("abc", "def"),
|
||||
("ghi", "jkl")], ids=["one"]))
|
||||
|
||||
@pytest.mark.issue510
|
||||
def test_parametrize_empty_list(self):
|
||||
|
@ -1114,7 +1114,7 @@ class TestMetafuncFunctional(object):
|
|||
|
||||
@pytest.mark.issue463
|
||||
@pytest.mark.parametrize('attr', ['parametrise', 'parameterize',
|
||||
'parameterise'])
|
||||
'parameterise'])
|
||||
def test_parametrize_misspelling(self, testdir, attr):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
|
|
@ -51,17 +51,17 @@ class FilesCompleter(object):
|
|||
if self.allowednames:
|
||||
if self.directories:
|
||||
files = _wrapcall(['bash','-c',
|
||||
"compgen -A directory -- '{p}'".format(p=prefix)])
|
||||
"compgen -A directory -- '{p}'".format(p=prefix)])
|
||||
completion += [ f + '/' for f in files]
|
||||
for x in self.allowednames:
|
||||
completion += _wrapcall(['bash', '-c',
|
||||
"compgen -A file -X '!*.{0}' -- '{p}'".format(x,p=prefix)])
|
||||
"compgen -A file -X '!*.{0}' -- '{p}'".format(x,p=prefix)])
|
||||
else:
|
||||
completion += _wrapcall(['bash', '-c',
|
||||
"compgen -A file -- '{p}'".format(p=prefix)])
|
||||
"compgen -A file -- '{p}'".format(p=prefix)])
|
||||
|
||||
anticomp = _wrapcall(['bash', '-c',
|
||||
"compgen -A directory -- '{p}'".format(p=prefix)])
|
||||
"compgen -A directory -- '{p}'".format(p=prefix)])
|
||||
|
||||
completion = list( set(completion) - set(anticomp))
|
||||
|
||||
|
|
|
@ -639,7 +639,7 @@ def test_rewritten():
|
|||
assert testdir.runpytest().ret == 0
|
||||
|
||||
@pytest.mark.skipif(sys.version_info < (3,3),
|
||||
reason='packages without __init__.py not supported on python 2')
|
||||
reason='packages without __init__.py not supported on python 2')
|
||||
def test_package_without__init__py(self, testdir):
|
||||
pkg = testdir.mkdir('a_package_without_init_py')
|
||||
pkg.join('module.py').ensure()
|
||||
|
|
|
@ -72,7 +72,7 @@ class TestCaptureManager(object):
|
|||
|
||||
@needsosdup
|
||||
@pytest.mark.parametrize("method",
|
||||
['no', 'sys', pytest.mark.skipif('not hasattr(os, "dup")', 'fd')])
|
||||
['no', 'sys', pytest.mark.skipif('not hasattr(os, "dup")', 'fd')])
|
||||
def test_capturing_basic_api(self, method):
|
||||
capouter = StdCaptureFD()
|
||||
old = sys.stdout, sys.stderr, sys.stdin
|
||||
|
|
|
@ -149,7 +149,7 @@ class TestConfigAPI(object):
|
|||
def test_config_getvalueorskip(self, testdir):
|
||||
config = testdir.parseconfig()
|
||||
pytest.raises(pytest.skip.Exception,
|
||||
"config.getvalueorskip('hello')")
|
||||
"config.getvalueorskip('hello')")
|
||||
verbose = config.getvalueorskip("verbose")
|
||||
assert verbose == config.option.verbose
|
||||
|
||||
|
|
|
@ -32,14 +32,14 @@ class TestDoctests(object):
|
|||
path = testdir.makepyfile(whatever="#")
|
||||
for p in (path, testdir.tmpdir):
|
||||
items, reprec = testdir.inline_genitems(p,
|
||||
'--doctest-modules')
|
||||
'--doctest-modules')
|
||||
assert len(items) == 0
|
||||
|
||||
def test_collect_module_single_modulelevel_doctest(self, testdir):
|
||||
path = testdir.makepyfile(whatever='""">>> pass"""')
|
||||
for p in (path, testdir.tmpdir):
|
||||
items, reprec = testdir.inline_genitems(p,
|
||||
'--doctest-modules')
|
||||
'--doctest-modules')
|
||||
assert len(items) == 1
|
||||
assert isinstance(items[0], DoctestItem)
|
||||
assert isinstance(items[0].parent, DoctestModule)
|
||||
|
@ -52,7 +52,7 @@ class TestDoctests(object):
|
|||
""")
|
||||
for p in (path, testdir.tmpdir):
|
||||
items, reprec = testdir.inline_genitems(p,
|
||||
'--doctest-modules')
|
||||
'--doctest-modules')
|
||||
assert len(items) == 2
|
||||
assert isinstance(items[0], DoctestItem)
|
||||
assert isinstance(items[1], DoctestItem)
|
||||
|
@ -77,7 +77,7 @@ class TestDoctests(object):
|
|||
""")
|
||||
for p in (path, testdir.tmpdir):
|
||||
items, reprec = testdir.inline_genitems(p,
|
||||
'--doctest-modules')
|
||||
'--doctest-modules')
|
||||
assert len(items) == 2
|
||||
assert isinstance(items[0], DoctestItem)
|
||||
assert isinstance(items[1], DoctestItem)
|
||||
|
|
|
@ -249,7 +249,7 @@ class TestParser(object):
|
|||
group = parser.getgroup("general")
|
||||
group.addoption('--doit', '--func-args', action='store_true', help='foo')
|
||||
group._addoption("-h", "--help", action="store_true", dest="help",
|
||||
help="show help message and configuration info")
|
||||
help="show help message and configuration info")
|
||||
parser.parse(['-h'])
|
||||
help = parser.optparser.format_help()
|
||||
assert '-doit, --func-args foo' in help
|
||||
|
|
|
@ -352,7 +352,7 @@ class TestPytestPluginManager(object):
|
|||
class TestPytestPluginManagerBootstrapming(object):
|
||||
def test_preparse_args(self, pytestpm):
|
||||
pytest.raises(ImportError, lambda:
|
||||
pytestpm.consider_preparse(["xyz", "-p", "hello123"]))
|
||||
pytestpm.consider_preparse(["xyz", "-p", "hello123"]))
|
||||
|
||||
def test_plugin_prevent_register(self, pytestpm):
|
||||
pytestpm.consider_preparse(["xyz", "-p", "no:abc"])
|
||||
|
|
|
@ -110,7 +110,7 @@ class TestTerminal(object):
|
|||
item.config.pluginmanager.register(tr)
|
||||
location = item.reportinfo()
|
||||
tr.config.hook.pytest_runtest_logstart(nodeid=item.nodeid,
|
||||
location=location, fspath=str(item.fspath))
|
||||
location=location, fspath=str(item.fspath))
|
||||
linecomp.assert_contains_lines([
|
||||
"*test_show_runtest_logstart.py*"
|
||||
])
|
||||
|
@ -905,9 +905,9 @@ def test_terminal_summary_warnings_are_displayed(testdir):
|
|||
|
||||
("green", "1 passed, 2 skipped, 3 deselected, 2 xfailed",
|
||||
{"passed": (1,),
|
||||
"skipped": (1,2),
|
||||
"deselected": (1,2,3),
|
||||
"xfailed": (1,2)}),
|
||||
"skipped": (1,2),
|
||||
"deselected": (1,2,3),
|
||||
"xfailed": (1,2)}),
|
||||
])
|
||||
def test_summary_stats(exp_line, exp_color, stats_arg):
|
||||
print("Based on stats: %s" % stats_arg)
|
||||
|
|
2
tox.ini
2
tox.ini
|
@ -196,6 +196,6 @@ filterwarnings =
|
|||
ignore:.*inspect.getargspec.*deprecated, use inspect.signature.*:DeprecationWarning
|
||||
|
||||
[flake8]
|
||||
ignore = E128,E129,E131,E201,E202,E203,E221,E222,E225,E226,E231,E241,E251,E261,E262,E265,E271,E272,E293,E301,E302,E303,E401,E402,E501,E701,E702,E704,E712,E731
|
||||
ignore = E129,E131,E201,E202,E203,E221,E222,E225,E226,E231,E241,E251,E261,E262,E265,E271,E272,E293,E301,E302,E303,E401,E402,E501,E701,E702,E704,E712,E731
|
||||
max-line-length = 120
|
||||
exclude = _pytest/vendored_packages/pluggy.py
|
||||
|
|
Loading…
Reference in New Issue