diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000..bb45976f1 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,29 @@ +exclude: doc/en/example/py2py3/test_py2.py +repos: +- repo: https://github.com/ambv/black + rev: 18.4a4 + hooks: + - id: black + args: [--safe, --quiet] + python_version: python3.6 +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v1.2.3 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: debug-statements + exclude: _pytest/debugging.py + - id: flake8 +- repo: https://github.com/asottile/pyupgrade + rev: v1.2.0 + hooks: + - id: pyupgrade +- repo: local + hooks: + - id: rst + name: rst + entry: python scripts/check-rst.py + language: python + additional_dependencies: [pygments, restructuredtext_lint] + python_version: python3.6 diff --git a/.travis.yml b/.travis.yml index b0ed7bf29..22e840ed9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,9 @@ sudo: false language: python +stages: +- linting +- test +- deploy python: - '3.6' install: @@ -9,7 +13,7 @@ env: # coveralls is not listed in tox's envlist, but should run in travis - TOXENV=coveralls # note: please use "tox --listenvs" to populate the build matrix below - - TOXENV=linting + # please remove the linting env in all cases - TOXENV=py27 - TOXENV=py34 - TOXENV=py36 @@ -53,6 +57,14 @@ jobs: on: tags: true repo: pytest-dev/pytest + - stage: linting + python: '3.6' + env: + install: + - pip install pre-commit + - pre-commit install-hooks + script: + - pre-commit run --all-files script: tox --recreate diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index da73acda2..adecd91ea 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -139,7 +139,7 @@ Here's a rundown of how a repository transfer usually proceeds * ``joedoe`` transfers repository ownership to ``pytest-dev`` administrator ``calvin``. * ``calvin`` creates ``pytest-xyz-admin`` and ``pytest-xyz-developers`` teams, inviting ``joedoe`` to both as **maintainer**. * ``calvin`` transfers repository to ``pytest-dev`` and configures team access: - + - ``pytest-xyz-admin`` **admin** access; - ``pytest-xyz-developers`` **write** access; @@ -163,6 +163,7 @@ Short version ~~~~~~~~~~~~~ #. Fork the repository; +#. enable and install pre-commit https://pre-commit.com/ to ensure styleguides and codechecks are followed #. Target ``master`` for bugfixes and doc changes; #. Target ``features`` for new features or functionality changes. #. Follow **PEP-8**. There's a ``tox`` command to help fixing it: ``tox -e fix-lint``. @@ -202,20 +203,30 @@ Here is a simple overview, with pytest-specific bits: $ git clone git@github.com:YOUR_GITHUB_USERNAME/pytest.git $ cd pytest # now, to fix a bug create your own branch off "master": - + $ git checkout -b your-bugfix-branch-name master # or to instead add a feature create your own branch off "features": - + $ git checkout -b your-feature-branch-name features - Given we have "major.minor.micro" version numbers, bugfixes will usually - be released in micro releases whereas features will be released in + Given we have "major.minor.micro" version numbers, bugfixes will usually + be released in micro releases whereas features will be released in minor releases and incompatible changes in major releases. If you need some help with Git, follow this quick start guide: https://git.wiki.kernel.org/index.php/QuickStart +#. install pre-commit and install its hook on the pytest repo + + https://pre-commit.com/ is a framework for managing and maintaining multi-language pre-commit hooks + pytest uses pre-commit to ensure code-style and code formatting is the same + + $ pip install --user pre-commit + $ pre-commit install + + Afterwards pre-commit will run whenever you commit. + #. Install tox Tox is used to run all the tests and will automatically setup virtualenvs diff --git a/README.rst b/README.rst index dd8838d16..42d9caedb 100644 --- a/README.rst +++ b/README.rst @@ -23,6 +23,9 @@ .. image:: https://ci.appveyor.com/api/projects/status/mrgbjaua7t33pg6b?svg=true :target: https://ci.appveyor.com/project/pytestbot/pytest +.. image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/ambv/black + .. image:: https://www.codetriage.com/pytest-dev/pytest/badges/users.svg :target: https://www.codetriage.com/pytest-dev/pytest diff --git a/_pytest/__init__.py b/_pytest/__init__.py index 6e41f0504..46c7827ed 100644 --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,8 +1,8 @@ -__all__ = ['__version__'] +__all__ = ["__version__"] try: from ._version import version as __version__ except ImportError: # broken installation, we don't even try # unknown only works because we do poor mans version compare - __version__ = 'unknown' + __version__ = "unknown" diff --git a/_pytest/_argcomplete.py b/_pytest/_argcomplete.py index ea8c98c7f..8f480d71d 100644 --- a/_pytest/_argcomplete.py +++ b/_pytest/_argcomplete.py @@ -61,7 +61,7 @@ from glob import glob class FastFilesCompleter(object): - 'Fast file completer class' + "Fast file completer class" def __init__(self, directories=True): self.directories = directories @@ -74,21 +74,21 @@ class FastFilesCompleter(object): prefix_dir = 0 completion = [] globbed = [] - if '*' not in prefix and '?' not in prefix: + if "*" not in prefix and "?" not in prefix: # we are on unix, otherwise no bash if not prefix or prefix[-1] == os.path.sep: - globbed.extend(glob(prefix + '.*')) - prefix += '*' + globbed.extend(glob(prefix + ".*")) + prefix += "*" globbed.extend(glob(prefix)) for x in sorted(globbed): if os.path.isdir(x): - x += '/' + x += "/" # append stripping the prefix (like bash, not like compgen) completion.append(x[prefix_dir:]) return completion -if os.environ.get('_ARGCOMPLETE'): +if os.environ.get("_ARGCOMPLETE"): try: import argcomplete.completers except ImportError: @@ -97,7 +97,11 @@ if os.environ.get('_ARGCOMPLETE'): def try_argcomplete(parser): argcomplete.autocomplete(parser, always_complete_options=False) + + else: + def try_argcomplete(parser): pass + filescompleter = None diff --git a/_pytest/_code/_py2traceback.py b/_pytest/_code/_py2traceback.py index 5aacf0a42..2dd100c33 100644 --- a/_pytest/_code/_py2traceback.py +++ b/_pytest/_code/_py2traceback.py @@ -4,6 +4,7 @@ # from __future__ import absolute_import, division, print_function import types +from six import text_type def format_exception_only(etype, value): @@ -29,9 +30,12 @@ def format_exception_only(etype, value): # # Clear these out first because issubtype(string1, SyntaxError) # would throw another exception and mask the original problem. - if (isinstance(etype, BaseException) or - isinstance(etype, types.InstanceType) or - etype is None or type(etype) is str): + if ( + isinstance(etype, BaseException) + or isinstance(etype, types.InstanceType) + or etype is None + or type(etype) is str + ): return [_format_final_exc_line(etype, value)] stype = etype.__name__ @@ -50,14 +54,14 @@ def format_exception_only(etype, value): lines.append(' File "%s", line %d\n' % (filename, lineno)) if badline is not None: if isinstance(badline, bytes): # python 2 only - badline = badline.decode('utf-8', 'replace') - lines.append(u' %s\n' % badline.strip()) + badline = badline.decode("utf-8", "replace") + lines.append(u" %s\n" % badline.strip()) if offset is not None: - caretspace = badline.rstrip('\n')[:offset].lstrip() + caretspace = badline.rstrip("\n")[:offset].lstrip() # non-space whitespace (likes tabs) must be kept for alignment - caretspace = ((c.isspace() and c or ' ') for c in caretspace) + caretspace = ((c.isspace() and c or " ") for c in caretspace) # only three spaces to account for offset1 == pos 0 - lines.append(' %s^\n' % ''.join(caretspace)) + lines.append(" %s^\n" % "".join(caretspace)) value = msg lines.append(_format_final_exc_line(stype, value)) @@ -76,10 +80,10 @@ def _format_final_exc_line(etype, value): def _some_str(value): try: - return unicode(value) + return text_type(value) except Exception: try: return str(value) except Exception: pass - return '' % type(value).__name__ + return "" % type(value).__name__ diff --git a/_pytest/_code/code.py b/_pytest/_code/code.py index 76e143774..cb788c17f 100644 --- a/_pytest/_code/code.py +++ b/_pytest/_code/code.py @@ -8,8 +8,9 @@ import attr import re from weakref import ref from _pytest.compat import _PY2, _PY3, PY35, safe_str - +from six import text_type import py + builtin_repr = repr if _PY3: @@ -61,6 +62,7 @@ class Code(object): """ return a _pytest._code.Source object for the full source file of the code """ from _pytest._code import source + full, _ = source.findsource(self.raw) return full @@ -69,6 +71,7 @@ class Code(object): """ # return source only for that part of code import _pytest._code + return _pytest._code.Source(self.raw) def getargs(self, var=False): @@ -101,6 +104,7 @@ class Frame(object): def statement(self): """ statement this frame is at """ import _pytest._code + if self.code.fullsource is None: return _pytest._code.Source("") return self.code.fullsource.getstatement(self.lineno) @@ -144,7 +148,7 @@ class Frame(object): try: retval.append((arg, self.f_locals[arg])) except KeyError: - pass # this can occur when using Psyco + pass # this can occur when using Psyco return retval @@ -166,6 +170,7 @@ class TracebackEntry(object): @property def frame(self): import _pytest._code + return _pytest._code.Frame(self._rawentry.tb_frame) @property @@ -188,6 +193,7 @@ class TracebackEntry(object): def getlocals(self): return self.frame.f_locals + locals = property(getlocals, None, None, "locals of underlaying frame") def getfirstlinesource(self): @@ -199,6 +205,7 @@ class TracebackEntry(object): # we use the passed in astcache to not reparse asttrees # within exception info printing from _pytest._code.source import getstatementrange_ast + source = self.frame.code.fullsource if source is None: return None @@ -209,8 +216,9 @@ class TracebackEntry(object): astnode = astcache.get(key, None) start = self.getfirstlinesource() try: - astnode, _, end = getstatementrange_ast(self.lineno, source, - astnode=astnode) + astnode, _, end = getstatementrange_ast( + self.lineno, source, astnode=astnode + ) except SyntaxError: end = self.lineno + 1 else: @@ -230,10 +238,10 @@ class TracebackEntry(object): mostly for internal use """ try: - tbh = self.frame.f_locals['__tracebackhide__'] + tbh = self.frame.f_locals["__tracebackhide__"] except KeyError: try: - tbh = self.frame.f_globals['__tracebackhide__'] + tbh = self.frame.f_globals["__tracebackhide__"] except KeyError: return False @@ -246,7 +254,7 @@ class TracebackEntry(object): try: fn = str(self.path) except py.error.Error: - fn = '???' + fn = "???" name = self.frame.code.name try: line = str(self.statement).lstrip() @@ -258,6 +266,7 @@ class TracebackEntry(object): def name(self): return self.frame.code.raw.co_name + name = property(name, None, None, "co_name of underlaying code") @@ -270,11 +279,13 @@ class Traceback(list): def __init__(self, tb, excinfo=None): """ initialize from given python traceback object and ExceptionInfo """ self._excinfo = excinfo - if hasattr(tb, 'tb_next'): + if hasattr(tb, "tb_next"): + def f(cur): while cur is not None: yield self.Entry(cur, excinfo=excinfo) cur = cur.tb_next + list.__init__(self, f(tb)) else: list.__init__(self, tb) @@ -292,11 +303,16 @@ class Traceback(list): for x in self: code = x.frame.code codepath = code.path - if ((path is None or codepath == path) and - (excludepath is None or not hasattr(codepath, 'relto') or - not codepath.relto(excludepath)) and - (lineno is None or x.lineno == lineno) and - (firstlineno is None or x.frame.code.firstlineno == firstlineno)): + if ( + (path is None or codepath == path) + and ( + excludepath is None + or not hasattr(codepath, "relto") + or not codepath.relto(excludepath) + ) + and (lineno is None or x.lineno == lineno) + and (firstlineno is None or x.frame.code.firstlineno == firstlineno) + ): return Traceback(x._rawentry, self._excinfo) return self @@ -345,35 +361,41 @@ class Traceback(list): f = entry.frame loc = f.f_locals for otherloc in values: - if f.is_true(f.eval(co_equal, - __recursioncache_locals_1=loc, - __recursioncache_locals_2=otherloc)): + if f.is_true( + f.eval( + co_equal, + __recursioncache_locals_1=loc, + __recursioncache_locals_2=otherloc, + ) + ): return i values.append(entry.frame.f_locals) return None -co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2', - '?', 'eval') +co_equal = compile( + "__recursioncache_locals_1 == __recursioncache_locals_2", "?", "eval" +) class ExceptionInfo(object): """ wraps sys.exc_info() objects and offers help for navigating the traceback. """ - _striptext = '' - _assert_start_repr = "AssertionError(u\'assert " if _PY2 else "AssertionError(\'assert " + _striptext = "" + _assert_start_repr = "AssertionError(u'assert " if _PY2 else "AssertionError('assert " def __init__(self, tup=None, exprinfo=None): import _pytest._code + if tup is None: tup = sys.exc_info() if exprinfo is None and isinstance(tup[1], AssertionError): - exprinfo = getattr(tup[1], 'msg', None) + exprinfo = getattr(tup[1], "msg", None) if exprinfo is None: exprinfo = py.io.saferepr(tup[1]) if exprinfo and exprinfo.startswith(self._assert_start_repr): - self._striptext = 'AssertionError: ' + self._striptext = "AssertionError: " self._excinfo = tup #: the exception class self.type = tup[0] @@ -398,7 +420,7 @@ class ExceptionInfo(object): removed from the beginning) """ lines = format_exception_only(self.type, self.value) - text = ''.join(lines) + text = "".join(lines) text = text.rstrip() if tryshort: if text.startswith(self._striptext): @@ -415,8 +437,14 @@ class ExceptionInfo(object): path, lineno = entry.frame.code.raw.co_filename, entry.lineno return ReprFileLocation(path, lineno + 1, exconly) - def getrepr(self, showlocals=False, style="long", - abspath=False, tbfilter=True, funcargs=False): + def getrepr( + self, + showlocals=False, + style="long", + abspath=False, + tbfilter=True, + funcargs=False, + ): """ return str()able representation of this exception info. showlocals: show locals per traceback entry style: long|short|no|native traceback style @@ -424,16 +452,23 @@ class ExceptionInfo(object): in case of style==native, tbfilter and showlocals is ignored. """ - if style == 'native': - return ReprExceptionInfo(ReprTracebackNative( - traceback.format_exception( - self.type, - self.value, - self.traceback[0]._rawentry, - )), self._getreprcrash()) + if style == "native": + return ReprExceptionInfo( + ReprTracebackNative( + traceback.format_exception( + self.type, self.value, self.traceback[0]._rawentry + ) + ), + self._getreprcrash(), + ) - fmt = FormattedExcinfo(showlocals=showlocals, style=style, - abspath=abspath, tbfilter=tbfilter, funcargs=funcargs) + fmt = FormattedExcinfo( + showlocals=showlocals, + style=style, + abspath=abspath, + tbfilter=tbfilter, + funcargs=funcargs, + ) return fmt.repr_excinfo(self) def __str__(self): @@ -444,7 +479,7 @@ class ExceptionInfo(object): def __unicode__(self): entry = self.traceback[-1] loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) - return unicode(loc) + return text_type(loc) def match(self, regexp): """ @@ -455,8 +490,7 @@ class ExceptionInfo(object): """ __tracebackhide__ = True if not re.search(regexp, str(self.value)): - assert 0, "Pattern '{0!s}' not found in '{1!s}'".format( - regexp, self.value) + assert 0, "Pattern '{!s}' not found in '{!s}'".format(regexp, self.value) return True @@ -508,6 +542,7 @@ class FormattedExcinfo(object): def get_source(self, source, line_index=-1, excinfo=None, short=False): """ return formatted and marked up source lines. """ import _pytest._code + lines = [] if source is None or line_index >= len(source.lines): source = _pytest._code.Source("???") @@ -532,7 +567,7 @@ class FormattedExcinfo(object): lines = [] indent = " " * indent # get the real exception information out - exlines = excinfo.exconly(tryshort=True).split('\n') + exlines = excinfo.exconly(tryshort=True).split("\n") failindent = self.fail_marker + indent[1:] for line in exlines: lines.append(failindent + line) @@ -547,7 +582,7 @@ class FormattedExcinfo(object): keys.sort() for name in keys: value = locals[name] - if name == '__builtins__': + if name == "__builtins__": lines.append("__builtins__ = ") else: # This formatting could all be handled by the @@ -565,6 +600,7 @@ class FormattedExcinfo(object): def repr_traceback_entry(self, entry, excinfo=None): import _pytest._code + source = self._getentrysource(entry) if source is None: source = _pytest._code.Source("???") @@ -641,11 +677,16 @@ class FormattedExcinfo(object): except Exception as e: max_frames = 10 extraline = ( - '!!! Recursion error detected, but an error occurred locating the origin of recursion.\n' - ' The following exception happened when comparing locals in the stack frame:\n' - ' {exc_type}: {exc_msg}\n' - ' Displaying first and last {max_frames} stack frames out of {total}.' - ).format(exc_type=type(e).__name__, exc_msg=safe_str(e), max_frames=max_frames, total=len(traceback)) + "!!! Recursion error detected, but an error occurred locating the origin of recursion.\n" + " The following exception happened when comparing locals in the stack frame:\n" + " {exc_type}: {exc_msg}\n" + " Displaying first and last {max_frames} stack frames out of {total}." + ).format( + exc_type=type(e).__name__, + exc_msg=safe_str(e), + max_frames=max_frames, + total=len(traceback), + ) traceback = traceback[:max_frames] + traceback[-max_frames:] else: if recursionindex is not None: @@ -673,18 +714,24 @@ class FormattedExcinfo(object): else: # fallback to native repr if the exception doesn't have a traceback: # ExceptionInfo objects require a full traceback to work - reprtraceback = ReprTracebackNative(traceback.format_exception(type(e), e, None)) + reprtraceback = ReprTracebackNative( + traceback.format_exception(type(e), e, None) + ) reprcrash = None repr_chain += [(reprtraceback, reprcrash, descr)] if e.__cause__ is not None: e = e.__cause__ - excinfo = ExceptionInfo((type(e), e, e.__traceback__)) if e.__traceback__ else None - descr = 'The above exception was the direct cause of the following exception:' + excinfo = ExceptionInfo( + (type(e), e, e.__traceback__) + ) if e.__traceback__ else None + descr = "The above exception was the direct cause of the following exception:" elif (e.__context__ is not None and not e.__suppress_context__): e = e.__context__ - excinfo = ExceptionInfo((type(e), e, e.__traceback__)) if e.__traceback__ else None - descr = 'During handling of the above exception, another exception occurred:' + excinfo = ExceptionInfo( + (type(e), e, e.__traceback__) + ) if e.__traceback__ else None + descr = "During handling of the above exception, another exception occurred:" else: e = None repr_chain.reverse() @@ -692,10 +739,11 @@ class FormattedExcinfo(object): class TerminalRepr(object): + def __str__(self): s = self.__unicode__() if _PY2: - s = s.encode('utf-8') + s = s.encode("utf-8") return s def __unicode__(self): @@ -711,6 +759,7 @@ class TerminalRepr(object): class ExceptionRepr(TerminalRepr): + def __init__(self): self.sections = [] @@ -724,6 +773,7 @@ class ExceptionRepr(TerminalRepr): class ExceptionChainRepr(ExceptionRepr): + def __init__(self, chain): super(ExceptionChainRepr, self).__init__() self.chain = chain @@ -742,6 +792,7 @@ class ExceptionChainRepr(ExceptionRepr): class ReprExceptionInfo(ExceptionRepr): + def __init__(self, reprtraceback, reprcrash): super(ReprExceptionInfo, self).__init__() self.reprtraceback = reprtraceback @@ -768,8 +819,11 @@ class ReprTraceback(TerminalRepr): entry.toterminal(tw) if i < len(self.reprentries) - 1: next_entry = self.reprentries[i + 1] - if entry.style == "long" or \ - entry.style == "short" and next_entry.style == "long": + if ( + entry.style == "long" + or entry.style == "short" + and next_entry.style == "long" + ): tw.sep(self.entrysep) if self.extraline: @@ -777,6 +831,7 @@ class ReprTraceback(TerminalRepr): class ReprTracebackNative(ReprTraceback): + def __init__(self, tblines): self.style = "native" self.reprentries = [ReprEntryNative(tblines)] @@ -826,12 +881,11 @@ class ReprEntry(TerminalRepr): self.reprfileloc.toterminal(tw) def __str__(self): - return "%s\n%s\n%s" % ("\n".join(self.lines), - self.reprlocals, - self.reprfileloc) + return "%s\n%s\n%s" % ("\n".join(self.lines), self.reprlocals, self.reprfileloc) class ReprFileLocation(TerminalRepr): + def __init__(self, path, lineno, message): self.path = str(path) self.lineno = lineno @@ -849,6 +903,7 @@ class ReprFileLocation(TerminalRepr): class ReprLocals(TerminalRepr): + def __init__(self, lines): self.lines = lines @@ -858,6 +913,7 @@ class ReprLocals(TerminalRepr): class ReprFuncArgs(TerminalRepr): + def __init__(self, args): self.args = args @@ -885,22 +941,26 @@ def getrawcode(obj, trycall=True): try: return obj.__code__ except AttributeError: - obj = getattr(obj, 'im_func', obj) - obj = getattr(obj, 'func_code', obj) - obj = getattr(obj, 'f_code', obj) - obj = getattr(obj, '__code__', obj) - if trycall and not hasattr(obj, 'co_firstlineno'): - if hasattr(obj, '__call__') and not inspect.isclass(obj): + obj = getattr(obj, "im_func", obj) + obj = getattr(obj, "func_code", obj) + obj = getattr(obj, "f_code", obj) + obj = getattr(obj, "__code__", obj) + if trycall and not hasattr(obj, "co_firstlineno"): + if hasattr(obj, "__call__") and not inspect.isclass(obj): x = getrawcode(obj.__call__, trycall=False) - if hasattr(x, 'co_firstlineno'): + if hasattr(x, "co_firstlineno"): return x return obj if PY35: # RecursionError introduced in 3.5 + def is_recursion_error(excinfo): return excinfo.errisinstance(RecursionError) # noqa + + else: + def is_recursion_error(excinfo): if not excinfo.errisinstance(RuntimeError): return False diff --git a/_pytest/_code/source.py b/_pytest/_code/source.py index 6c2856ea8..6b982a4ca 100644 --- a/_pytest/_code/source.py +++ b/_pytest/_code/source.py @@ -21,8 +21,8 @@ class Source(object): def __init__(self, *parts, **kwargs): self.lines = lines = [] - de = kwargs.get('deindent', True) - rstrip = kwargs.get('rstrip', True) + de = kwargs.get("deindent", True) + rstrip = kwargs.get("rstrip", True) for part in parts: if not part: partlines = [] @@ -31,7 +31,7 @@ class Source(object): elif isinstance(part, (tuple, list)): partlines = [x.rstrip("\n") for x in part] elif isinstance(part, six.string_types): - partlines = part.split('\n') + partlines = part.split("\n") if rstrip: while partlines: if partlines[-1].strip(): @@ -79,7 +79,7 @@ class Source(object): source.lines[:] = self.lines[start:end] return source - def putaround(self, before='', after='', indent=' ' * 4): + def putaround(self, before="", after="", indent=" " * 4): """ return a copy of the source object with 'before' and 'after' wrapped around it. """ @@ -90,7 +90,7 @@ class Source(object): newsource.lines = before.lines + lines + after.lines return newsource - def indent(self, indent=' ' * 4): + def indent(self, indent=" " * 4): """ return a copy of the source object with all lines indented by the given indent-string. """ @@ -139,7 +139,7 @@ class Source(object): source = str(self) try: # compile(source+'\n', "x", "exec") - syntax_checker(source + '\n') + syntax_checker(source + "\n") except KeyboardInterrupt: raise except Exception: @@ -150,9 +150,14 @@ class Source(object): def __str__(self): return "\n".join(self.lines) - def compile(self, filename=None, mode='exec', - flag=generators.compiler_flag, - dont_inherit=0, _genframe=None): + def compile( + self, + filename=None, + mode="exec", + flag=generators.compiler_flag, + dont_inherit=0, + _genframe=None, + ): """ return compiled code object. if filename is None invent an artificial filename which displays the source/line position of the caller frame. @@ -164,10 +169,10 @@ class Source(object): base = "<%d-codegen " % self._compilecounter self.__class__._compilecounter += 1 if not filename: - filename = base + '%s:%d>' % (fn, lineno) + filename = base + "%s:%d>" % (fn, lineno) else: - filename = base + '%r %s:%d>' % (filename, fn, lineno) - source = "\n".join(self.lines) + '\n' + filename = base + "%r %s:%d>" % (filename, fn, lineno) + source = "\n".join(self.lines) + "\n" try: co = cpy_compile(source, filename, mode, flag) except SyntaxError: @@ -175,9 +180,9 @@ class Source(object): # re-represent syntax errors from parsing python strings msglines = self.lines[:ex.lineno] if ex.offset: - msglines.append(" " * ex.offset + '^') + msglines.append(" " * ex.offset + "^") msglines.append("(code was compiled probably from here: %s)" % filename) - newex = SyntaxError('\n'.join(msglines)) + newex = SyntaxError("\n".join(msglines)) newex.offset = ex.offset newex.lineno = ex.lineno newex.text = ex.text @@ -189,12 +194,15 @@ class Source(object): linecache.cache[filename] = (1, None, lines, filename) return co + # # public API shortcut functions # -def compile_(source, filename=None, mode='exec', flags=generators.compiler_flag, dont_inherit=0): +def compile_( + source, filename=None, mode="exec", flags=generators.compiler_flag, dont_inherit=0 +): """ compile the given source to a raw code object, and maintain an internal cache which allows later retrieval of the source code for the code object @@ -214,6 +222,7 @@ def getfslineno(obj): If the source cannot be determined return ("", -1) """ from .code import Code + try: code = Code(obj) except TypeError: @@ -235,6 +244,7 @@ def getfslineno(obj): assert isinstance(lineno, int) return fspath, lineno + # # helper functions # @@ -254,11 +264,12 @@ def findsource(obj): def getsource(obj, **kwargs): from .code import getrawcode + obj = getrawcode(obj) try: strsrc = inspect.getsource(obj) except IndentationError: - strsrc = "\"Buggy python version consider upgrading, cannot get source\"" + strsrc = '"Buggy python version consider upgrading, cannot get source"' assert isinstance(strsrc, str) return Source(strsrc, **kwargs) @@ -279,12 +290,14 @@ def deindent(lines, offset=None): def readline_generator(lines): for line in lines: - yield line + '\n' + yield line + "\n" it = readline_generator(lines) try: - for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(lambda: next(it)): + for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens( + lambda: next(it) + ): if sline > len(lines): break # End of input reached if sline > len(newlines): @@ -306,6 +319,7 @@ def deindent(lines, offset=None): def get_statement_startend2(lineno, node): import ast + # flatten all statements and except handlers into one lineno-list # AST's line numbers start indexing at 1 values = [] diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py index 39c57c5f3..2c9a8890c 100644 --- a/_pytest/assertion/__init__.py +++ b/_pytest/assertion/__init__.py @@ -12,17 +12,19 @@ from _pytest.assertion import truncate def pytest_addoption(parser): group = parser.getgroup("debugconfig") - group.addoption('--assert', - action="store", - dest="assertmode", - choices=("rewrite", "plain",), - default="rewrite", - metavar="MODE", - help="""Control assertion debugging tools. 'plain' + group.addoption( + "--assert", + action="store", + dest="assertmode", + choices=("rewrite", "plain"), + default="rewrite", + metavar="MODE", + help="""Control assertion debugging tools. 'plain' performs no assertion debugging. 'rewrite' (the default) rewrites assert statements in test modules on import to provide assert - expression information.""") + expression information.""", + ) def register_assert_rewrite(*names): @@ -38,7 +40,7 @@ def register_assert_rewrite(*names): """ for name in names: if not isinstance(name, str): - msg = 'expected module names as *args, got {0} instead' + msg = "expected module names as *args, got {0} instead" raise TypeError(msg.format(repr(names))) for hook in sys.meta_path: if isinstance(hook, rewrite.AssertionRewritingHook): @@ -68,13 +70,13 @@ class AssertionState(object): def install_importhook(config): """Try to install the rewrite hook, raise SystemError if it fails.""" # Jython has an AST bug that make the assertion rewriting hook malfunction. - if (sys.platform.startswith('java')): - raise SystemError('rewrite not supported') + if sys.platform.startswith("java"): + raise SystemError("rewrite not supported") - config._assertstate = AssertionState(config, 'rewrite') + config._assertstate = AssertionState(config, "rewrite") config._assertstate.hook = hook = rewrite.AssertionRewritingHook(config) sys.meta_path.insert(0, hook) - config._assertstate.trace('installed rewrite import hook') + config._assertstate.trace("installed rewrite import hook") def undo(): hook = config._assertstate.hook @@ -89,7 +91,7 @@ def pytest_collection(session): # this hook is only called when test modules are collected # so for example not in the master process of pytest-xdist # (which does not collect test modules) - assertstate = getattr(session.config, '_assertstate', None) + assertstate = getattr(session.config, "_assertstate", None) if assertstate: if assertstate.hook is not None: assertstate.hook.set_session(session) @@ -103,6 +105,7 @@ def pytest_runtest_setup(item): pytest_assertrepr_compare hook. This sets up this custom comparison for the test. """ + def callbinrepr(op, left, right): """Call the pytest_assertrepr_compare hook and prepare the result @@ -119,7 +122,8 @@ def pytest_runtest_setup(item): pretty printing. """ hook_result = item.ihook.pytest_assertrepr_compare( - config=item.config, op=op, left=left, right=right) + config=item.config, op=op, left=left, right=right + ) for new_expl in hook_result: if new_expl: new_expl = truncate.truncate_if_required(new_expl, item) @@ -128,6 +132,7 @@ def pytest_runtest_setup(item): if item.config.getvalue("assertmode") == "rewrite": res = res.replace("%", "%%") return res + util._reprcompare = callbinrepr @@ -136,7 +141,7 @@ def pytest_runtest_teardown(item): def pytest_sessionfinish(session): - assertstate = getattr(session.config, '_assertstate', None) + assertstate = getattr(session.config, "_assertstate", None) if assertstate: if assertstate.hook is not None: assertstate.hook.set_session(None) diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py index 0499a792f..b50df7fc2 100644 --- a/_pytest/assertion/rewrite.py +++ b/_pytest/assertion/rewrite.py @@ -40,6 +40,7 @@ ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3 if sys.version_info >= (3, 5): ast_Call = ast.Call else: + def ast_Call(a, b, c): return ast.Call(a, b, c, None, None) @@ -151,14 +152,13 @@ class AssertionRewritingHook(object): def _should_rewrite(self, name, fn_pypath, state): # always rewrite conftest files fn = str(fn_pypath) - if fn_pypath.basename == 'conftest.py': + if fn_pypath.basename == "conftest.py": state.trace("rewriting conftest file: %r" % (fn,)) return True if self.session is not None: if self.session.isinitpath(fn): - state.trace("matched test file (was specified on cmdline): %r" % - (fn,)) + state.trace("matched test file (was specified on cmdline): %r" % (fn,)) return True # modules not passed explicitly on the command line are only @@ -169,7 +169,7 @@ class AssertionRewritingHook(object): return True for marked in self._must_rewrite: - if name == marked or name.startswith(marked + '.'): + if name == marked or name.startswith(marked + "."): state.trace("matched marked file %r (from %r)" % (name, marked)) return True @@ -181,19 +181,20 @@ class AssertionRewritingHook(object): The named module or package as well as any nested modules will be rewritten on import. """ - already_imported = (set(names) - .intersection(sys.modules) - .difference(self._rewritten_names)) + already_imported = ( + set(names).intersection(sys.modules).difference(self._rewritten_names) + ) for name in already_imported: if not AssertionRewriter.is_rewrite_disabled( - sys.modules[name].__doc__ or ""): + sys.modules[name].__doc__ or "" + ): self._warn_already_imported(name) self._must_rewrite.update(names) def _warn_already_imported(self, name): self.config.warn( - 'P1', - 'Module already imported so cannot be rewritten: %s' % name) + "P1", "Module already imported so cannot be rewritten: %s" % name + ) def load_module(self, name): # If there is an existing module object named 'fullname' in @@ -237,6 +238,7 @@ class AssertionRewritingHook(object): """ try: import pkg_resources + # access an attribute in case a deferred importer is present pkg_resources.__name__ except ImportError: @@ -249,7 +251,7 @@ class AssertionRewritingHook(object): def get_data(self, pathname): """Optional PEP302 get_data API. """ - with open(pathname, 'rb') as f: + with open(pathname, "rb") as f: return f.read() @@ -282,7 +284,7 @@ RN = "\r\n".encode("utf-8") N = "\n".encode("utf-8") cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+") -BOM_UTF8 = '\xef\xbb\xbf' +BOM_UTF8 = "\xef\xbb\xbf" def _rewrite_test(config, fn): @@ -307,9 +309,11 @@ def _rewrite_test(config, fn): # gets this right. end1 = source.find("\n") end2 = source.find("\n", end1 + 1) - if (not source.startswith(BOM_UTF8) and - cookie_re.match(source[0:end1]) is None and - cookie_re.match(source[end1 + 1:end2]) is None): + if ( + not source.startswith(BOM_UTF8) + and cookie_re.match(source[0:end1]) is None + and cookie_re.match(source[end1 + 1:end2]) is None + ): if hasattr(state, "_indecode"): # encodings imported us again, so don't rewrite. return None, None @@ -354,20 +358,23 @@ def _read_pyc(source, pyc, trace=lambda x: None): size = source.size() data = fp.read(12) except EnvironmentError as e: - trace('_read_pyc(%s): EnvironmentError %s' % (source, e)) + trace("_read_pyc(%s): EnvironmentError %s" % (source, e)) return None # Check for invalid or out of date pyc file. - if (len(data) != 12 or data[:4] != imp.get_magic() or - struct.unpack(" 0 or - item.module != "__future__"): + elif ( + not isinstance(item, ast.ImportFrom) + or item.level > 0 + or item.module != "__future__" + ): lineno = item.lineno break pos += 1 else: lineno = item.lineno - imports = [ast.Import([alias], lineno=lineno, col_offset=0) - for alias in aliases] + imports = [ + ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases + ] mod.body[pos:pos] = imports # Collect asserts. nodes = [mod] @@ -618,10 +632,13 @@ class AssertionRewriter(ast.NodeVisitor): if isinstance(child, ast.AST): nodes.append(child) setattr(node, name, new) - elif (isinstance(field, ast.AST) and - # Don't recurse into expressions as they can't contain - # asserts. - not isinstance(field, ast.expr)): + elif ( + isinstance(field, ast.AST) + and + # Don't recurse into expressions as they can't contain + # asserts. + not isinstance(field, ast.expr) + ): nodes.append(field) @staticmethod @@ -719,8 +736,11 @@ class AssertionRewriter(ast.NodeVisitor): """ if isinstance(assert_.test, ast.Tuple) and self.config is not None: fslocation = (self.module_path, assert_.lineno) - self.config.warn('R1', 'assertion is always true, perhaps ' - 'remove parentheses?', fslocation=fslocation) + self.config.warn( + "R1", + "assertion is always true, perhaps " "remove parentheses?", + fslocation=fslocation, + ) self.statements = [] self.variables = [] self.variable_counter = itertools.count() @@ -734,7 +754,7 @@ class AssertionRewriter(ast.NodeVisitor): negation = ast.UnaryOp(ast.Not(), top_condition) self.statements.append(ast.If(negation, body, [])) if assert_.msg: - assertmsg = self.helper('format_assertmsg', assert_.msg) + assertmsg = self.helper("format_assertmsg", assert_.msg) explanation = "\n>assert " + explanation else: assertmsg = ast.Str("") @@ -751,8 +771,7 @@ class AssertionRewriter(ast.NodeVisitor): body.append(raise_) # Clear temporary variables by setting them to None. if self.variables: - variables = [ast.Name(name, ast.Store()) - for name in self.variables] + variables = [ast.Name(name, ast.Store()) for name in self.variables] clear = ast.Assign(variables, _NameConstant(None)) self.statements.append(clear) # Fix line numbers. @@ -839,7 +858,7 @@ class AssertionRewriter(ast.NodeVisitor): else: # **args have `arg` keywords with an .arg of None arg_expls.append("**" + expl) - expl = "%s(%s)" % (func_expl, ', '.join(arg_expls)) + expl = "%s(%s)" % (func_expl, ", ".join(arg_expls)) new_call = ast.Call(new_func, new_args, new_kwargs) res = self.assign(new_call) res_expl = self.explanation_param(self.display(res)) @@ -849,7 +868,7 @@ class AssertionRewriter(ast.NodeVisitor): def visit_Starred(self, starred): # From Python 3.5, a Starred node can appear in a function call res, expl = self.visit(starred.value) - return starred, '*' + expl + return starred, "*" + expl def visit_Call_legacy(self, call): """ @@ -874,9 +893,8 @@ class AssertionRewriter(ast.NodeVisitor): if call.kwargs: new_kwarg, expl = self.visit(call.kwargs) arg_expls.append("**" + expl) - expl = "%s(%s)" % (func_expl, ', '.join(arg_expls)) - new_call = ast.Call(new_func, new_args, new_kwargs, - new_star, new_kwarg) + expl = "%s(%s)" % (func_expl, ", ".join(arg_expls)) + new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg) res = self.assign(new_call) res_expl = self.explanation_param(self.display(res)) outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) @@ -904,7 +922,7 @@ class AssertionRewriter(ast.NodeVisitor): self.push_format_context() left_res, left_expl = self.visit(comp.left) if isinstance(comp.left, (ast.Compare, ast.BoolOp)): - left_expl = "({0})".format(left_expl) + left_expl = "({})".format(left_expl) res_variables = [self.variable() for i in range(len(comp.ops))] load_names = [ast.Name(v, ast.Load()) for v in res_variables] store_names = [ast.Name(v, ast.Store()) for v in res_variables] @@ -915,7 +933,7 @@ class AssertionRewriter(ast.NodeVisitor): for i, op, next_operand in it: next_res, next_expl = self.visit(next_operand) if isinstance(next_operand, (ast.Compare, ast.BoolOp)): - next_expl = "({0})".format(next_expl) + next_expl = "({})".format(next_expl) results.append(next_res) sym = binop_map[op.__class__] syms.append(ast.Str(sym)) @@ -925,11 +943,13 @@ class AssertionRewriter(ast.NodeVisitor): self.statements.append(ast.Assign([store_names[i]], res_expr)) left_res, left_expl = next_res, next_expl # Use pytest.assertion.util._reprcompare if that's available. - expl_call = self.helper("call_reprcompare", - ast.Tuple(syms, ast.Load()), - ast.Tuple(load_names, ast.Load()), - ast.Tuple(expls, ast.Load()), - ast.Tuple(results, ast.Load())) + expl_call = self.helper( + "call_reprcompare", + ast.Tuple(syms, ast.Load()), + ast.Tuple(load_names, ast.Load()), + ast.Tuple(expls, ast.Load()), + ast.Tuple(results, ast.Load()), + ) if len(comp.ops) > 1: res = ast.BoolOp(ast.And(), load_names) else: diff --git a/_pytest/assertion/truncate.py b/_pytest/assertion/truncate.py index 2ed12e2e5..79adeca6a 100644 --- a/_pytest/assertion/truncate.py +++ b/_pytest/assertion/truncate.py @@ -34,7 +34,7 @@ def _should_truncate_item(item): def _running_on_ci(): """Check if we're currently running on a CI system.""" - env_vars = ['CI', 'BUILD_NUMBER'] + env_vars = ["CI", "BUILD_NUMBER"] return any(var in os.environ for var in env_vars) @@ -67,16 +67,13 @@ def _truncate_explanation(input_lines, max_lines=None, max_chars=None): # Append useful message to explanation truncated_line_count = len(input_lines) - len(truncated_explanation) truncated_line_count += 1 # Account for the part-truncated final line - msg = '...Full output truncated' + msg = "...Full output truncated" if truncated_line_count == 1: - msg += ' ({0} line hidden)'.format(truncated_line_count) + msg += " ({} line hidden)".format(truncated_line_count) else: - msg += ' ({0} lines hidden)'.format(truncated_line_count) - msg += ", {0}" .format(USAGE_MSG) - truncated_explanation.extend([ - six.text_type(""), - six.text_type(msg), - ]) + msg += " ({} lines hidden)".format(truncated_line_count) + msg += ", {}".format(USAGE_MSG) + truncated_explanation.extend([six.text_type(""), six.text_type(msg)]) return truncated_explanation diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py index 7848d0997..bcb800a4a 100644 --- a/_pytest/assertion/util.py +++ b/_pytest/assertion/util.py @@ -20,7 +20,7 @@ _reprcompare = None # with non-ascii characters (see issue 877 and 1379) def ecu(s): try: - return u(s, 'utf-8', 'replace') + return u(s, "utf-8", "replace") except TypeError: return s @@ -38,7 +38,7 @@ def format_explanation(explanation): explanation = ecu(explanation) lines = _split_explanation(explanation) result = _format_lines(lines) - return u('\n').join(result) + return u("\n").join(result) def _split_explanation(explanation): @@ -48,13 +48,13 @@ def _split_explanation(explanation): Any other newlines will be escaped and appear in the line as the literal '\n' characters. """ - raw_lines = (explanation or u('')).split('\n') + raw_lines = (explanation or u("")).split("\n") lines = [raw_lines[0]] for values in raw_lines[1:]: - if values and values[0] in ['{', '}', '~', '>']: + if values and values[0] in ["{", "}", "~", ">"]: lines.append(values) else: - lines[-1] += '\\n' + values + lines[-1] += "\\n" + values return lines @@ -71,24 +71,24 @@ def _format_lines(lines): stack = [0] stackcnt = [0] for line in lines[1:]: - if line.startswith('{'): + if line.startswith("{"): if stackcnt[-1]: - s = u('and ') + s = u("and ") else: - s = u('where ') + s = u("where ") stack.append(len(result)) stackcnt[-1] += 1 stackcnt.append(0) - result.append(u(' +') + u(' ') * (len(stack) - 1) + s + line[1:]) - elif line.startswith('}'): + result.append(u(" +") + u(" ") * (len(stack) - 1) + s + line[1:]) + elif line.startswith("}"): stack.pop() stackcnt.pop() result[stack[-1]] += line[1:] else: - assert line[0] in ['~', '>'] + assert line[0] in ["~", ">"] stack[-1] += 1 - indent = len(stack) if line.startswith('~') else len(stack) - 1 - result.append(u(' ') * indent + line[1:]) + indent = len(stack) if line.startswith("~") else len(stack) - 1 + result.append(u(" ") * indent + line[1:]) assert len(stack) == 1 return result @@ -106,7 +106,7 @@ def assertrepr_compare(config, op, left, right): left_repr = py.io.saferepr(left, maxsize=int(width // 2)) right_repr = py.io.saferepr(right, maxsize=width - len(left_repr)) - summary = u('%s %s %s') % (ecu(left_repr), op, ecu(right_repr)) + summary = u("%s %s %s") % (ecu(left_repr), op, ecu(right_repr)) def issequence(x): return isinstance(x, Sequence) and not isinstance(x, basestring) @@ -127,10 +127,10 @@ def assertrepr_compare(config, op, left, right): except TypeError: return False - verbose = config.getoption('verbose') + verbose = config.getoption("verbose") explanation = None try: - if op == '==': + if op == "==": if istext(left) and istext(right): explanation = _diff_text(left, right, verbose) else: @@ -146,14 +146,17 @@ def assertrepr_compare(config, op, left, right): explanation.extend(expl) else: explanation = expl - elif op == 'not in': + elif op == "not in": if istext(left) and istext(right): explanation = _notin_text(left, right, verbose) except Exception: explanation = [ - u('(pytest_assertion plugin: representation of details failed. ' - 'Probably an object has a faulty __repr__.)'), - u(_pytest._code.ExceptionInfo())] + u( + "(pytest_assertion plugin: representation of details failed. " + "Probably an object has a faulty __repr__.)" + ), + u(_pytest._code.ExceptionInfo()), + ] if not explanation: return None @@ -170,6 +173,7 @@ def _diff_text(left, right, verbose=False): If the input are bytes they will be safely converted to text. """ from difflib import ndiff + explanation = [] def escape_for_readable_diff(binary_text): @@ -179,8 +183,8 @@ def _diff_text(left, right, verbose=False): newlines and carriage returns (#429). """ r = six.text_type(repr(binary_text)[1:-1]) - r = r.replace(r'\n', '\n') - r = r.replace(r'\r', '\r') + r = r.replace(r"\n", "\n") + r = r.replace(r"\r", "\r") return r if isinstance(left, six.binary_type): @@ -193,9 +197,11 @@ def _diff_text(left, right, verbose=False): if left[i] != right[i]: break if i > 42: - i -= 10 # Provide some context - explanation = [u('Skipping %s identical leading ' - 'characters in diff, use -v to show') % i] + i -= 10 # Provide some context + explanation = [ + u("Skipping %s identical leading " "characters in diff, use -v to show") + % i + ] left = left[i:] right = right[i:] if len(left) == len(right): @@ -203,40 +209,48 @@ def _diff_text(left, right, verbose=False): if left[-i] != right[-i]: break if i > 42: - i -= 10 # Provide some context - explanation += [u('Skipping %s identical trailing ' - 'characters in diff, use -v to show') % i] + i -= 10 # Provide some context + explanation += [ + u( + "Skipping %s identical trailing " + "characters in diff, use -v to show" + ) + % i + ] left = left[:-i] right = right[:-i] keepends = True if left.isspace() or right.isspace(): left = repr(str(left)) right = repr(str(right)) - explanation += [u'Strings contain only whitespace, escaping them using repr()'] - explanation += [line.strip('\n') - for line in ndiff(left.splitlines(keepends), - right.splitlines(keepends))] + explanation += [u"Strings contain only whitespace, escaping them using repr()"] + explanation += [ + line.strip("\n") + for line in ndiff(left.splitlines(keepends), right.splitlines(keepends)) + ] return explanation def _compare_eq_iterable(left, right, verbose=False): if not verbose: - return [u('Use -v to get the full diff')] + return [u("Use -v to get the full diff")] # dynamic import to speedup pytest import difflib try: left_formatting = pprint.pformat(left).splitlines() right_formatting = pprint.pformat(right).splitlines() - explanation = [u('Full diff:')] + explanation = [u("Full diff:")] except Exception: # hack: PrettyPrinter.pformat() in python 2 fails when formatting items that can't be sorted(), ie, calling # sorted() on a list would raise. See issue #718. # As a workaround, the full diff is generated by using the repr() string of each item of each container. left_formatting = sorted(repr(x) for x in left) right_formatting = sorted(repr(x) for x in right) - explanation = [u('Full diff (fallback to calling repr on each item):')] - explanation.extend(line.strip() for line in difflib.ndiff(left_formatting, right_formatting)) + explanation = [u("Full diff (fallback to calling repr on each item):")] + explanation.extend( + line.strip() for line in difflib.ndiff(left_formatting, right_formatting) + ) return explanation @@ -244,16 +258,18 @@ def _compare_eq_sequence(left, right, verbose=False): explanation = [] for i in range(min(len(left), len(right))): if left[i] != right[i]: - explanation += [u('At index %s diff: %r != %r') - % (i, left[i], right[i])] + explanation += [u("At index %s diff: %r != %r") % (i, left[i], right[i])] break if len(left) > len(right): - explanation += [u('Left contains more items, first extra item: %s') - % py.io.saferepr(left[len(right)],)] + explanation += [ + u("Left contains more items, first extra item: %s") + % py.io.saferepr(left[len(right)]) + ] elif len(left) < len(right): explanation += [ - u('Right contains more items, first extra item: %s') % - py.io.saferepr(right[len(left)],)] + u("Right contains more items, first extra item: %s") + % py.io.saferepr(right[len(left)]) + ] return explanation @@ -262,11 +278,11 @@ def _compare_eq_set(left, right, verbose=False): diff_left = left - right diff_right = right - left if diff_left: - explanation.append(u('Extra items in the left set:')) + explanation.append(u("Extra items in the left set:")) for item in diff_left: explanation.append(py.io.saferepr(item)) if diff_right: - explanation.append(u('Extra items in the right set:')) + explanation.append(u("Extra items in the right set:")) for item in diff_right: explanation.append(py.io.saferepr(item)) return explanation @@ -275,29 +291,31 @@ def _compare_eq_set(left, right, verbose=False): def _compare_eq_dict(left, right, verbose=False): explanation = [] common = set(left).intersection(set(right)) - same = dict((k, left[k]) for k in common if left[k] == right[k]) + same = {k: left[k] for k in common if left[k] == right[k]} if same and verbose < 2: - explanation += [u('Omitting %s identical items, use -vv to show') % - len(same)] + explanation += [u("Omitting %s identical items, use -vv to show") % len(same)] elif same: - explanation += [u('Common items:')] + explanation += [u("Common items:")] explanation += pprint.pformat(same).splitlines() - diff = set(k for k in common if left[k] != right[k]) + diff = {k for k in common if left[k] != right[k]} if diff: - explanation += [u('Differing items:')] + explanation += [u("Differing items:")] for k in diff: - explanation += [py.io.saferepr({k: left[k]}) + ' != ' + - py.io.saferepr({k: right[k]})] + explanation += [ + py.io.saferepr({k: left[k]}) + " != " + py.io.saferepr({k: right[k]}) + ] extra_left = set(left) - set(right) if extra_left: - explanation.append(u('Left contains more items:')) - explanation.extend(pprint.pformat( - dict((k, left[k]) for k in extra_left)).splitlines()) + explanation.append(u("Left contains more items:")) + explanation.extend( + pprint.pformat({k: left[k] for k in extra_left}).splitlines() + ) extra_right = set(right) - set(left) if extra_right: - explanation.append(u('Right contains more items:')) - explanation.extend(pprint.pformat( - dict((k, right[k]) for k in extra_right)).splitlines()) + explanation.append(u("Right contains more items:")) + explanation.extend( + pprint.pformat({k: right[k] for k in extra_right}).splitlines() + ) return explanation @@ -307,14 +325,14 @@ def _notin_text(term, text, verbose=False): tail = text[index + len(term):] correct_text = head + tail diff = _diff_text(correct_text, text, verbose) - newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)] + newdiff = [u("%s is contained here:") % py.io.saferepr(term, maxsize=42)] for line in diff: - if line.startswith(u('Skipping')): + if line.startswith(u("Skipping")): continue - if line.startswith(u('- ')): + if line.startswith(u("- ")): continue - if line.startswith(u('+ ')): - newdiff.append(u(' ') + line[2:]) + if line.startswith(u("+ ")): + newdiff.append(u(" ") + line[2:]) else: newdiff.append(line) return newdiff diff --git a/_pytest/cacheprovider.py b/_pytest/cacheprovider.py index 717c061d4..eb0fcc06f 100755 --- a/_pytest/cacheprovider.py +++ b/_pytest/cacheprovider.py @@ -18,6 +18,7 @@ from os.path import sep as _sep, altsep as _altsep class Cache(object): + def __init__(self, config): self.config = config self._cachedir = Cache.cache_dir_from_config(config) @@ -53,7 +54,7 @@ class Cache(object): return self._cachedir.ensure_dir("d", name) def _getvaluepath(self, key): - return self._cachedir.join('v', *key.split('/')) + return self._cachedir.join("v", *key.split("/")) def get(self, key, default): """ return cached value for the given key. If no value @@ -89,17 +90,18 @@ class Cache(object): path.dirpath().ensure_dir() except (py.error.EEXIST, py.error.EACCES): self.config.warn( - code='I9', message='could not create cache path %s' % (path,) + code="I9", message="could not create cache path %s" % (path,) ) return try: - f = path.open('w') + f = path.open("w") except py.error.ENOTDIR: self.config.warn( - code='I9', message='cache could not write path %s' % (path,)) + code="I9", message="cache could not write path %s" % (path,) + ) else: with f: - self.trace("cache-write %s: %r" % (key, value,)) + self.trace("cache-write %s: %r" % (key, value)) json.dump(value, f, indent=2, sort_keys=True) @@ -108,39 +110,38 @@ class LFPlugin(object): def __init__(self, config): self.config = config - active_keys = 'lf', 'failedfirst' + active_keys = "lf", "failedfirst" self.active = any(config.getoption(key) for key in active_keys) self.lastfailed = config.cache.get("cache/lastfailed", {}) self._previously_failed_count = None - self._no_failures_behavior = self.config.getoption('last_failed_no_failures') + self._no_failures_behavior = self.config.getoption("last_failed_no_failures") def pytest_report_collectionfinish(self): if self.active: if not self._previously_failed_count: - mode = "run {} (no recorded failures)".format(self._no_failures_behavior) + mode = "run {} (no recorded failures)".format( + self._no_failures_behavior + ) else: - noun = 'failure' if self._previously_failed_count == 1 else 'failures' - suffix = " first" if self.config.getoption( - "failedfirst") else "" + noun = "failure" if self._previously_failed_count == 1 else "failures" + suffix = " first" if self.config.getoption("failedfirst") else "" mode = "rerun previous {count} {noun}{suffix}".format( count=self._previously_failed_count, suffix=suffix, noun=noun ) return "run-last-failure: %s" % mode def pytest_runtest_logreport(self, report): - if (report.when == 'call' and report.passed) or report.skipped: + if (report.when == "call" and report.passed) or report.skipped: self.lastfailed.pop(report.nodeid, None) elif report.failed: self.lastfailed[report.nodeid] = True def pytest_collectreport(self, report): - passed = report.outcome in ('passed', 'skipped') + passed = report.outcome in ("passed", "skipped") if passed: if report.nodeid in self.lastfailed: self.lastfailed.pop(report.nodeid) - self.lastfailed.update( - (item.nodeid, True) - for item in report.result) + self.lastfailed.update((item.nodeid, True) for item in report.result) else: self.lastfailed[report.nodeid] = True @@ -164,7 +165,7 @@ class LFPlugin(object): config.hook.pytest_deselected(items=previously_passed) else: items[:] = previously_failed + previously_passed - elif self._no_failures_behavior == 'none': + elif self._no_failures_behavior == "none": config.hook.pytest_deselected(items=items) items[:] = [] @@ -196,8 +197,11 @@ class NFPlugin(object): else: other_items[item.nodeid] = item - items[:] = self._get_increasing_order(six.itervalues(new_items)) + \ - self._get_increasing_order(six.itervalues(other_items)) + items[:] = self._get_increasing_order( + six.itervalues(new_items) + ) + self._get_increasing_order( + six.itervalues(other_items) + ) self.cached_nodeids = [x.nodeid for x in items if isinstance(x, pytest.Item)] def _get_increasing_order(self, items): @@ -214,38 +218,59 @@ class NFPlugin(object): def pytest_addoption(parser): group = parser.getgroup("general") group.addoption( - '--lf', '--last-failed', action='store_true', dest="lf", + "--lf", + "--last-failed", + action="store_true", + dest="lf", help="rerun only the tests that failed " - "at the last run (or all if none failed)") + "at the last run (or all if none failed)", + ) group.addoption( - '--ff', '--failed-first', action='store_true', dest="failedfirst", + "--ff", + "--failed-first", + action="store_true", + dest="failedfirst", help="run all tests but run the last failures first. " - "This may re-order tests and thus lead to " - "repeated fixture setup/teardown") + "This may re-order tests and thus lead to " + "repeated fixture setup/teardown", + ) group.addoption( - '--nf', '--new-first', action='store_true', dest="newfirst", + "--nf", + "--new-first", + action="store_true", + dest="newfirst", help="run tests from new files first, then the rest of the tests " - "sorted by file mtime") + "sorted by file mtime", + ) group.addoption( - '--cache-show', action='store_true', dest="cacheshow", - help="show cache contents, don't perform collection or tests") + "--cache-show", + action="store_true", + dest="cacheshow", + help="show cache contents, don't perform collection or tests", + ) group.addoption( - '--cache-clear', action='store_true', dest="cacheclear", - help="remove all cache contents at start of test run.") - parser.addini( - "cache_dir", default='.pytest_cache', - help="cache directory path.") + "--cache-clear", + action="store_true", + dest="cacheclear", + help="remove all cache contents at start of test run.", + ) + parser.addini("cache_dir", default=".pytest_cache", help="cache directory path.") group.addoption( - '--lfnf', '--last-failed-no-failures', action='store', - dest='last_failed_no_failures', choices=('all', 'none'), default='all', - help='change the behavior when no test failed in the last run or no ' - 'information about the last failures was found in the cache' + "--lfnf", + "--last-failed-no-failures", + action="store", + dest="last_failed_no_failures", + choices=("all", "none"), + default="all", + help="change the behavior when no test failed in the last run or no " + "information about the last failures was found in the cache", ) def pytest_cmdline_main(config): if config.option.cacheshow: from _pytest.main import wrap_session + return wrap_session(config, cacheshow) @@ -280,6 +305,7 @@ def pytest_report_header(config): def cacheshow(config, session): from pprint import pprint + tw = py.io.TerminalWriter() tw.line("cachedir: " + str(config.cache._cachedir)) if not config.cache._cachedir.check(): @@ -293,8 +319,7 @@ def cacheshow(config, session): key = valpath.relto(vdir).replace(valpath.sep, "/") val = config.cache.get(key, dummy) if val is dummy: - tw.line("%s contains unreadable content, " - "will be ignored" % key) + tw.line("%s contains unreadable content, " "will be ignored" % key) else: tw.line("%s contains:" % key) stream = py.io.TextIO() @@ -310,6 +335,5 @@ def cacheshow(config, session): # print("%s/" % p.relto(basedir)) if p.isfile(): key = p.relto(basedir) - tw.line("%s is a file of length %d" % ( - key, p.size())) + tw.line("%s is a file of length %d" % (key, p.size())) return 0 diff --git a/_pytest/capture.py b/_pytest/capture.py index d71f59ac2..7a57adb75 100644 --- a/_pytest/capture.py +++ b/_pytest/capture.py @@ -17,19 +17,26 @@ import pytest from _pytest.compat import CaptureIO -patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'} +patchsysdict = {0: "stdin", 1: "stdout", 2: "stderr"} def pytest_addoption(parser): group = parser.getgroup("general") group._addoption( - '--capture', action="store", + "--capture", + action="store", default="fd" if hasattr(os, "dup") else "sys", - metavar="method", choices=['fd', 'sys', 'no'], - help="per-test capturing method: one of fd|sys|no.") + metavar="method", + choices=["fd", "sys", "no"], + help="per-test capturing method: one of fd|sys|no.", + ) group._addoption( - '-s', action="store_const", const="no", dest="capture", - help="shortcut for --capture=no.") + "-s", + action="store_const", + const="no", + dest="capture", + help="shortcut for --capture=no.", + ) @pytest.hookimpl(hookwrapper=True) @@ -50,6 +57,7 @@ def pytest_load_initial_conftests(early_config, parser, args): def silence_logging_at_shutdown(): if "logging" in sys.modules: sys.modules["logging"].raiseExceptions = False + early_config.add_cleanup(silence_logging_at_shutdown) # finally trigger conftest loading but while capturing (issue93) @@ -180,18 +188,16 @@ class CaptureManager(object): item.add_report_section(when, "stderr", err) -capture_fixtures = {'capfd', 'capfdbinary', 'capsys', 'capsysbinary'} +capture_fixtures = {"capfd", "capfdbinary", "capsys", "capsysbinary"} def _ensure_only_one_capture_fixture(request, name): - fixtures = set(request.fixturenames) & capture_fixtures - set((name,)) + fixtures = set(request.fixturenames) & capture_fixtures - {name} if fixtures: fixtures = sorted(fixtures) fixtures = fixtures[0] if len(fixtures) == 1 else fixtures raise request.raiseerror( - "cannot use {0} and {1} at the same time".format( - fixtures, name, - ), + "cannot use {} and {} at the same time".format(fixtures, name) ) @@ -202,7 +208,7 @@ def capsys(request): which return a ``(out, err)`` namedtuple. ``out`` and ``err`` will be ``text`` objects. """ - _ensure_only_one_capture_fixture(request, 'capsys') + _ensure_only_one_capture_fixture(request, "capsys") with _install_capture_fixture_on_item(request, SysCapture) as fixture: yield fixture @@ -214,11 +220,11 @@ def capsysbinary(request): which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``bytes`` objects. """ - _ensure_only_one_capture_fixture(request, 'capsysbinary') + _ensure_only_one_capture_fixture(request, "capsysbinary") # Currently, the implementation uses the python3 specific `.buffer` # property of CaptureIO. if sys.version_info < (3,): - raise request.raiseerror('capsysbinary is only supported on python 3') + raise request.raiseerror("capsysbinary is only supported on python 3") with _install_capture_fixture_on_item(request, SysCaptureBinary) as fixture: yield fixture @@ -230,9 +236,11 @@ def capfd(request): which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``text`` objects. """ - _ensure_only_one_capture_fixture(request, 'capfd') - if not hasattr(os, 'dup'): - pytest.skip("capfd fixture needs os.dup function which is not available in this system") + _ensure_only_one_capture_fixture(request, "capfd") + if not hasattr(os, "dup"): + pytest.skip( + "capfd fixture needs os.dup function which is not available in this system" + ) with _install_capture_fixture_on_item(request, FDCapture) as fixture: yield fixture @@ -244,9 +252,11 @@ def capfdbinary(request): which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``bytes`` objects. """ - _ensure_only_one_capture_fixture(request, 'capfdbinary') - if not hasattr(os, 'dup'): - pytest.skip("capfdbinary fixture needs os.dup function which is not available in this system") + _ensure_only_one_capture_fixture(request, "capfdbinary") + if not hasattr(os, "dup"): + pytest.skip( + "capfdbinary fixture needs os.dup function which is not available in this system" + ) with _install_capture_fixture_on_item(request, FDCaptureBinary) as fixture: yield fixture @@ -261,7 +271,7 @@ def _install_capture_fixture_on_item(request, capture_class): by ``CaptureManager`` during its ``pytest_runtest_*`` hooks. """ request.node._capture_fixture = fixture = CaptureFixture(capture_class, request) - capmanager = request.config.pluginmanager.getplugin('capturemanager') + capmanager = request.config.pluginmanager.getplugin("capturemanager") # need to active this fixture right away in case it is being used by another fixture (setup phase) # if this fixture is being used only by a test function (call phase), then we wouldn't need this # activation, but it doesn't hurt @@ -276,13 +286,15 @@ class CaptureFixture(object): Object returned by :py:func:`capsys`, :py:func:`capsysbinary`, :py:func:`capfd` and :py:func:`capfdbinary` fixtures. """ + def __init__(self, captureclass, request): self.captureclass = captureclass self.request = request def _start(self): - self._capture = MultiCapture(out=True, err=True, in_=False, - Capture=self.captureclass) + self._capture = MultiCapture( + out=True, err=True, in_=False, Capture=self.captureclass + ) self._capture.start_capturing() def close(self): @@ -305,7 +317,7 @@ class CaptureFixture(object): def disabled(self): """Temporarily disables capture while inside the 'with' block.""" self._capture.suspend_capturing() - capmanager = self.request.config.pluginmanager.getplugin('capturemanager') + capmanager = self.request.config.pluginmanager.getplugin("capturemanager") capmanager.suspend_global_capture(item=None, in_=False) try: yield @@ -346,7 +358,7 @@ class EncodedFile(object): self.buffer.write(obj) def writelines(self, linelist): - data = ''.join(linelist) + data = "".join(linelist) self.write(data) @property @@ -409,7 +421,7 @@ class MultiCapture(object): def stop_capturing(self): """ stop capturing and reset capturing streams """ - if hasattr(self, '_reset'): + if hasattr(self, "_reset"): raise ValueError("was already stopped") self._reset = True if self.out: @@ -421,8 +433,10 @@ class MultiCapture(object): def readouterr(self): """ return snapshot unicode value of stdout/stderr capturings. """ - return CaptureResult(self.out.snap() if self.out is not None else "", - self.err.snap() if self.err is not None else "") + return CaptureResult( + self.out.snap() if self.out is not None else "", + self.err.snap() if self.err is not None else "", + ) class NoCapture(object): @@ -507,6 +521,7 @@ class FDCapture(FDCaptureBinary): snap() produces text """ + def snap(self): res = FDCaptureBinary.snap(self) enc = getattr(self.tmpfile, "encoding", None) @@ -516,6 +531,7 @@ class FDCapture(FDCaptureBinary): class SysCapture(object): + def __init__(self, fd, tmpfile=None): name = patchsysdict[fd] self._old = getattr(sys, name) @@ -553,6 +569,7 @@ class SysCapture(object): class SysCaptureBinary(SysCapture): + def snap(self): res = self.tmpfile.buffer.getvalue() self.tmpfile.seek(0) @@ -572,6 +589,7 @@ class DontReadFromInput(six.Iterator): def read(self, *args): raise IOError("reading from stdin while output is captured") + readline = read readlines = read __next__ = read @@ -580,8 +598,7 @@ class DontReadFromInput(six.Iterator): return self def fileno(self): - raise UnsupportedOperation("redirected stdin is pseudofile, " - "has no fileno()") + raise UnsupportedOperation("redirected stdin is pseudofile, " "has no fileno()") def isatty(self): return False @@ -594,7 +611,7 @@ class DontReadFromInput(six.Iterator): if sys.version_info >= (3, 0): return self else: - raise AttributeError('redirected stdin has no attribute buffer') + raise AttributeError("redirected stdin has no attribute buffer") def _colorama_workaround(): @@ -607,7 +624,7 @@ def _colorama_workaround(): fail in various ways. """ - if not sys.platform.startswith('win32'): + if not sys.platform.startswith("win32"): return try: import colorama # noqa @@ -634,7 +651,7 @@ def _readline_workaround(): See https://github.com/pytest-dev/pytest/pull/1281 """ - if not sys.platform.startswith('win32'): + if not sys.platform.startswith("win32"): return try: import readline # noqa @@ -664,21 +681,21 @@ def _py36_windowsconsoleio_workaround(stream): See https://github.com/pytest-dev/py/issues/103 """ - if not sys.platform.startswith('win32') or sys.version_info[:2] < (3, 6): + if not sys.platform.startswith("win32") or sys.version_info[:2] < (3, 6): return # bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666) - if not hasattr(stream, 'buffer'): + if not hasattr(stream, "buffer"): return - buffered = hasattr(stream.buffer, 'raw') + buffered = hasattr(stream.buffer, "raw") raw_stdout = stream.buffer.raw if buffered else stream.buffer if not isinstance(raw_stdout, io._WindowsConsoleIO): return def _reopen_stdio(f, mode): - if not buffered and mode[0] == 'w': + if not buffered and mode[0] == "w": buffering = 0 else: buffering = -1 @@ -688,11 +705,12 @@ def _py36_windowsconsoleio_workaround(stream): f.encoding, f.errors, f.newlines, - f.line_buffering) + f.line_buffering, + ) - sys.__stdin__ = sys.stdin = _reopen_stdio(sys.stdin, 'rb') - sys.__stdout__ = sys.stdout = _reopen_stdio(sys.stdout, 'wb') - sys.__stderr__ = sys.stderr = _reopen_stdio(sys.stderr, 'wb') + sys.__stdin__ = sys.stdin = _reopen_stdio(sys.stdin, "rb") + sys.__stdout__ = sys.stdout = _reopen_stdio(sys.stdout, "wb") + sys.__stderr__ = sys.stderr = _reopen_stdio(sys.stderr, "wb") def _attempt_to_close_capture_file(f): diff --git a/_pytest/compat.py b/_pytest/compat.py index abad4f3c5..e4072ecdb 100644 --- a/_pytest/compat.py +++ b/_pytest/compat.py @@ -13,6 +13,8 @@ import py import _pytest from _pytest.outcomes import TEST_OUTCOME +from six import text_type +import six try: import enum @@ -36,7 +38,7 @@ NOTSET = object() PY35 = sys.version_info[:2] >= (3, 5) PY36 = sys.version_info[:2] >= (3, 6) -MODULE_NOT_FOUND_ERROR = 'ModuleNotFoundError' if PY36 else 'ImportError' +MODULE_NOT_FOUND_ERROR = "ModuleNotFoundError" if PY36 else "ImportError" if _PY3: from collections.abc import MutableMapping as MappingMixin # noqa @@ -54,9 +56,9 @@ def _format_args(func): isfunction = inspect.isfunction isclass = inspect.isclass # used to work around a python2 exception info leak -exc_clear = getattr(sys, 'exc_clear', lambda: None) +exc_clear = getattr(sys, "exc_clear", lambda: None) # The type of re.compile objects is not exposed in Python. -REGEX_TYPE = type(re.compile('')) +REGEX_TYPE = type(re.compile("")) def is_generator(func): @@ -70,8 +72,13 @@ def iscoroutinefunction(func): Note: copied and modified from Python 3.5's builtin couroutines.py to avoid import asyncio directly, which in turns also initializes the "logging" module as side-effect (see issue #8). """ - return (getattr(func, '_is_coroutine', False) or - (hasattr(inspect, 'iscoroutinefunction') and inspect.iscoroutinefunction(func))) + return ( + getattr(func, "_is_coroutine", False) + or ( + hasattr(inspect, "iscoroutinefunction") + and inspect.iscoroutinefunction(func) + ) + ) def getlocation(function, curdir): @@ -90,8 +97,9 @@ def num_mock_patch_args(function): mock_modules = [sys.modules.get("mock"), sys.modules.get("unittest.mock")] if any(mock_modules): sentinels = [m.DEFAULT for m in mock_modules if m is not None] - return len([p for p in patchings - if not p.attribute_name and p.new in sentinels]) + return len( + [p for p in patchings if not p.attribute_name and p.new in sentinels] + ) return len(patchings) @@ -118,16 +126,25 @@ def getfuncargnames(function, is_method=False, cls=None): # ordered mapping of parameter names to Parameter instances. This # creates a tuple of the names of the parameters that don't have # defaults. - arg_names = tuple(p.name for p in signature(function).parameters.values() - if (p.kind is Parameter.POSITIONAL_OR_KEYWORD or - p.kind is Parameter.KEYWORD_ONLY) and - p.default is Parameter.empty) + arg_names = tuple( + p.name + for p in signature(function).parameters.values() + if ( + p.kind is Parameter.POSITIONAL_OR_KEYWORD + or p.kind is Parameter.KEYWORD_ONLY + ) + and p.default is Parameter.empty + ) # If this function should be treated as a bound method even though # it's passed as an unbound method or function, remove the first # parameter name. - if (is_method or - (cls and not isinstance(cls.__dict__.get(function.__name__, None), - staticmethod))): + if ( + is_method + or ( + cls + and not isinstance(cls.__dict__.get(function.__name__, None), staticmethod) + ) + ): arg_names = arg_names[1:] # Remove any names that will be replaced with mocks. if hasattr(function, "__wrapped__"): @@ -138,27 +155,33 @@ def getfuncargnames(function, is_method=False, cls=None): def get_default_arg_names(function): # Note: this code intentionally mirrors the code at the beginning of getfuncargnames, # to get the arguments which were excluded from its result because they had default values - return tuple(p.name for p in signature(function).parameters.values() - if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) and - p.default is not Parameter.empty) + return tuple( + p.name + for p in signature(function).parameters.values() + if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) + and p.default is not Parameter.empty + ) if _PY3: STRING_TYPES = bytes, str - UNICODE_TYPES = str, + UNICODE_TYPES = six.text_type if PY35: + def _bytes_to_ascii(val): - return val.decode('ascii', 'backslashreplace') + return val.decode("ascii", "backslashreplace") + else: + def _bytes_to_ascii(val): if val: # source: http://goo.gl/bGsnwC encoded_bytes, _ = codecs.escape_encode(val) - return encoded_bytes.decode('ascii') + return encoded_bytes.decode("ascii") else: # empty bytes crashes codecs.escape_encode (#1087) - return '' + return "" def ascii_escaped(val): """If val is pure ascii, returns it as a str(). Otherwise, escapes @@ -181,10 +204,12 @@ if _PY3: if isinstance(val, bytes): return _bytes_to_ascii(val) else: - return val.encode('unicode_escape').decode('ascii') + return val.encode("unicode_escape").decode("ascii") + + else: - STRING_TYPES = bytes, str, unicode - UNICODE_TYPES = unicode, + STRING_TYPES = six.string_types + UNICODE_TYPES = six.text_type def ascii_escaped(val): """In py2 bytes and str are the same type, so return if it's a bytes @@ -197,11 +222,11 @@ else: """ if isinstance(val, bytes): try: - return val.encode('ascii') + return val.encode("ascii") except UnicodeDecodeError: - return val.encode('string-escape') + return val.encode("string-escape") else: - return val.encode('unicode-escape') + return val.encode("unicode-escape") def get_real_func(obj): @@ -210,16 +235,16 @@ def get_real_func(obj): """ start_obj = obj for i in range(100): - new_obj = getattr(obj, '__wrapped__', None) + new_obj = getattr(obj, "__wrapped__", None) if new_obj is None: break obj = new_obj else: raise ValueError( - ("could not find real function of {start}" - "\nstopped at {current}").format( - start=py.io.saferepr(start_obj), - current=py.io.saferepr(obj))) + ("could not find real function of {start}" "\nstopped at {current}").format( + start=py.io.saferepr(start_obj), current=py.io.saferepr(obj) + ) + ) if isinstance(obj, functools.partial): obj = obj.func return obj @@ -228,7 +253,7 @@ def get_real_func(obj): def getfslineno(obj): # xxx let decorators etc specify a sane ordering obj = get_real_func(obj) - if hasattr(obj, 'place_as'): + if hasattr(obj, "place_as"): obj = obj.place_as fslineno = _pytest._code.getfslineno(obj) assert isinstance(fslineno[1], int), obj @@ -267,39 +292,44 @@ def _is_unittest_unexpected_success_a_failure(): if _PY3: + def safe_str(v): """returns v as string""" return str(v) + + else: + def safe_str(v): """returns v as string, converting to ascii if necessary""" try: return str(v) except UnicodeError: - if not isinstance(v, unicode): - v = unicode(v) - errors = 'replace' - return v.encode('utf-8', errors) + if not isinstance(v, text_type): + v = text_type(v) + errors = "replace" + return v.encode("utf-8", errors) COLLECT_FAKEMODULE_ATTRIBUTES = ( - 'Collector', - 'Module', - 'Generator', - 'Function', - 'Instance', - 'Session', - 'Item', - 'Class', - 'File', - '_fillfuncargs', + "Collector", + "Module", + "Generator", + "Function", + "Instance", + "Session", + "Item", + "Class", + "File", + "_fillfuncargs", ) def _setup_collect_fakemodule(): from types import ModuleType import pytest - pytest.collect = ModuleType('pytest.collect') + + pytest.collect = ModuleType("pytest.collect") pytest.collect.__all__ = [] # used for setns for attr in COLLECT_FAKEMODULE_ATTRIBUTES: setattr(pytest.collect, attr, getattr(pytest, attr)) @@ -313,26 +343,28 @@ if _PY2: @property def encoding(self): - return getattr(self, '_encoding', 'UTF-8') + return getattr(self, "_encoding", "UTF-8") + else: import io class CaptureIO(io.TextIOWrapper): + def __init__(self): super(CaptureIO, self).__init__( - io.BytesIO(), - encoding='UTF-8', newline='', write_through=True, + io.BytesIO(), encoding="UTF-8", newline="", write_through=True ) def getvalue(self): - return self.buffer.getvalue().decode('UTF-8') + return self.buffer.getvalue().decode("UTF-8") class FuncargnamesCompatAttr(object): """ helper class so that Metafunc, Function and FixtureRequest don't need to each define the "funcargnames" compatibility attribute. """ + @property def funcargnames(self): """ alias attribute for ``fixturenames`` for pre-2.3 compatibility""" diff --git a/_pytest/config.py b/_pytest/config.py index 86632ed64..53e74cf1c 100644 --- a/_pytest/config.py +++ b/_pytest/config.py @@ -8,6 +8,7 @@ import warnings import copy import six import py + # DON't import pytest here because it causes import cycle troubles import sys import os @@ -27,6 +28,7 @@ hookspec = HookspecMarker("pytest") class ConftestImportFailure(Exception): + def __init__(self, path, excinfo): Exception.__init__(self, path, excinfo) self.path = path @@ -36,7 +38,7 @@ class ConftestImportFailure(Exception): etype, evalue, etb = self.excinfo formatted = traceback.format_tb(etb) # The level of the tracebacks we want to print is hand crafted :( - return repr(evalue) + '\n' + ''.join(formatted[2:]) + return repr(evalue) + "\n" + "".join(formatted[2:]) def main(args=None, plugins=None): @@ -89,7 +91,7 @@ def filename_arg(path, optname): :optname: name of the option """ if os.path.isdir(path): - raise UsageError("{0} must be a filename, given: {1}".format(optname, path)) + raise UsageError("{} must be a filename, given: {}".format(optname, path)) return path @@ -100,7 +102,7 @@ def directory_arg(path, optname): :optname: name of the option """ if not os.path.isdir(path): - raise UsageError("{0} must be a directory, given: {1}".format(optname, path)) + raise UsageError("{} must be a directory, given: {}".format(optname, path)) return path @@ -108,7 +110,8 @@ default_plugins = ( "mark main terminal runner python fixtures debugging unittest capture skipping " "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion " "junitxml resultlog doctest cacheprovider freeze_support " - "setuponly setupplan warnings logging").split() + "setuponly setupplan warnings logging" +).split() builtin_plugins = set(default_plugins) @@ -147,6 +150,7 @@ def _prepareconfig(args=None, plugins=None): raise ValueError("not a string or argument list: %r" % (args,)) args = shlex.split(args, posix=sys.platform != "win32") from _pytest import deprecated + warning = deprecated.MAIN_STR_ARGS config = get_config() pluginmanager = config.pluginmanager @@ -158,9 +162,10 @@ def _prepareconfig(args=None, plugins=None): else: pluginmanager.register(plugin) if warning: - config.warn('C1', warning) + config.warn("C1", warning) return pluginmanager.hook.pytest_cmdline_parse( - pluginmanager=pluginmanager, args=args) + pluginmanager=pluginmanager, args=args + ) except BaseException: config._ensure_unconfigure() raise @@ -189,9 +194,9 @@ class PytestPluginManager(PluginManager): self.add_hookspecs(_pytest.hookspec) self.register(self) - if os.environ.get('PYTEST_DEBUG'): + if os.environ.get("PYTEST_DEBUG"): err = sys.stderr - encoding = getattr(err, 'encoding', 'utf8') + encoding = getattr(err, "encoding", "utf8") try: err = py.io.dupfile(err, encoding=encoding) except Exception: @@ -211,11 +216,13 @@ class PytestPluginManager(PluginManager): Use :py:meth:`pluggy.PluginManager.add_hookspecs ` instead. """ - warning = dict(code="I2", - fslocation=_pytest._code.getfslineno(sys._getframe(1)), - nodeid=None, - message="use pluginmanager.add_hookspecs instead of " - "deprecated addhooks() method.") + warning = dict( + code="I2", + fslocation=_pytest._code.getfslineno(sys._getframe(1)), + nodeid=None, + message="use pluginmanager.add_hookspecs instead of " + "deprecated addhooks() method.", + ) self._warn(warning) return self.add_hookspecs(module_or_class) @@ -243,24 +250,31 @@ class PytestPluginManager(PluginManager): def parse_hookspec_opts(self, module_or_class, name): opts = super(PytestPluginManager, self).parse_hookspec_opts( - module_or_class, name) + module_or_class, name + ) if opts is None: method = getattr(module_or_class, name) if name.startswith("pytest_"): - opts = {"firstresult": hasattr(method, "firstresult"), - "historic": hasattr(method, "historic")} + opts = { + "firstresult": hasattr(method, "firstresult"), + "historic": hasattr(method, "historic"), + } return opts def register(self, plugin, name=None): - if name in ['pytest_catchlog', 'pytest_capturelog']: - self._warn('{0} plugin has been merged into the core, ' - 'please remove it from your requirements.'.format( - name.replace('_', '-'))) + if name in ["pytest_catchlog", "pytest_capturelog"]: + self._warn( + "{} plugin has been merged into the core, " + "please remove it from your requirements.".format( + name.replace("_", "-") + ) + ) return ret = super(PytestPluginManager, self).register(plugin, name) if ret: self.hook.pytest_plugin_registered.call_historic( - kwargs=dict(plugin=plugin, manager=self)) + kwargs=dict(plugin=plugin, manager=self) + ) if isinstance(plugin, types.ModuleType): self.consider_module(plugin) @@ -277,20 +291,21 @@ class PytestPluginManager(PluginManager): def pytest_configure(self, config): # XXX now that the pluginmanager exposes hookimpl(tryfirst...) # we should remove tryfirst/trylast as markers - config.addinivalue_line("markers", - "tryfirst: mark a hook implementation function such that the " - "plugin machinery will try to call it first/as early as possible.") - config.addinivalue_line("markers", - "trylast: mark a hook implementation function such that the " - "plugin machinery will try to call it last/as late as possible.") + config.addinivalue_line( + "markers", + "tryfirst: mark a hook implementation function such that the " + "plugin machinery will try to call it first/as early as possible.", + ) + config.addinivalue_line( + "markers", + "trylast: mark a hook implementation function such that the " + "plugin machinery will try to call it last/as late as possible.", + ) self._configured = True def _warn(self, message): kwargs = message if isinstance(message, dict) else { - 'code': 'I1', - 'message': message, - 'fslocation': None, - 'nodeid': None, + "code": "I1", "message": message, "fslocation": None, "nodeid": None } self.hook.pytest_logwarning.call_historic(kwargs=kwargs) @@ -306,8 +321,9 @@ class PytestPluginManager(PluginManager): here. """ current = py.path.local() - self._confcutdir = current.join(namespace.confcutdir, abs=True) \ - if namespace.confcutdir else None + self._confcutdir = current.join( + namespace.confcutdir, abs=True + ) if namespace.confcutdir else None self._noconftest = namespace.noconftest testpaths = namespace.file_or_dir foundanchor = False @@ -374,8 +390,9 @@ class PytestPluginManager(PluginManager): _ensure_removed_sysmodule(conftestpath.purebasename) try: mod = conftestpath.pyimport() - if hasattr(mod, 'pytest_plugins') and self._configured: + if hasattr(mod, "pytest_plugins") and self._configured: from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST + warnings.warn(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST) except Exception: raise ConftestImportFailure(conftestpath, sys.exc_info()) @@ -418,7 +435,7 @@ class PytestPluginManager(PluginManager): self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS")) def consider_module(self, mod): - self._import_plugin_specs(getattr(mod, 'pytest_plugins', [])) + self._import_plugin_specs(getattr(mod, "pytest_plugins", [])) def _import_plugin_specs(self, spec): plugins = _get_plugin_specs_as_list(spec) @@ -430,7 +447,9 @@ class PytestPluginManager(PluginManager): # "terminal" or "capture". Those plugins are registered under their # basename for historic purposes but must be imported with the # _pytest prefix. - assert isinstance(modname, (six.text_type, str)), "module name as text required, got %r" % modname + assert isinstance(modname, (six.text_type, str)), ( + "module name as text required, got %r" % modname + ) modname = str(modname) if self.is_blocked(modname) or self.get_plugin(modname) is not None: return @@ -443,7 +462,9 @@ class PytestPluginManager(PluginManager): __import__(importspec) except ImportError as e: new_exc_type = ImportError - new_exc_message = 'Error importing plugin "%s": %s' % (modname, safe_str(e.args[0])) + new_exc_message = 'Error importing plugin "%s": %s' % ( + modname, safe_str(e.args[0]) + ) new_exc = new_exc_type(new_exc_message) six.reraise(new_exc_type, new_exc, sys.exc_info()[2]) @@ -465,10 +486,12 @@ def _get_plugin_specs_as_list(specs): """ if specs is not None: if isinstance(specs, str): - specs = specs.split(',') if specs else [] + specs = specs.split(",") if specs else [] if not isinstance(specs, (list, tuple)): - raise UsageError("Plugin specs must be a ','-separated string or a " - "list/tuple of strings for plugin names. Given: %r" % specs) + raise UsageError( + "Plugin specs must be a ','-separated string or a " + "list/tuple of strings for plugin names. Given: %r" % specs + ) return list(specs) return [] @@ -535,12 +558,14 @@ class Parser(object): def parse(self, args, namespace=None): from _pytest._argcomplete import try_argcomplete + self.optparser = self._getparser() try_argcomplete(self.optparser) return self.optparser.parse_args([str(x) for x in args], namespace=namespace) def _getparser(self): from _pytest._argcomplete import filescompleter + optparser = MyOptionParser(self, self.extra_info) groups = self._groups + [self._anonymous] for group in groups: @@ -552,7 +577,7 @@ class Parser(object): a = option.attrs() arggroup.add_argument(*n, **a) # bash like autocompletion for dirs (appending '/') - optparser.add_argument(FILE_OR_DIR, nargs='*').completer = filescompleter + optparser.add_argument(FILE_OR_DIR, nargs="*").completer = filescompleter return optparser def parse_setoption(self, args, option, namespace=None): @@ -615,77 +640,74 @@ class Argument(object): and ignoring choices and integer prefixes https://docs.python.org/3/library/optparse.html#optparse-standard-option-types """ - _typ_map = { - 'int': int, - 'string': str, - 'float': float, - 'complex': complex, - } + _typ_map = {"int": int, "string": str, "float": float, "complex": complex} def __init__(self, *names, **attrs): """store parms in private vars for use in add_argument""" self._attrs = attrs self._short_opts = [] self._long_opts = [] - self.dest = attrs.get('dest') - if '%default' in (attrs.get('help') or ''): + self.dest = attrs.get("dest") + if "%default" in (attrs.get("help") or ""): warnings.warn( 'pytest now uses argparse. "%default" should be' ' changed to "%(default)s" ', DeprecationWarning, - stacklevel=3) + stacklevel=3, + ) try: - typ = attrs['type'] + typ = attrs["type"] except KeyError: pass else: # this might raise a keyerror as well, don't want to catch that if isinstance(typ, six.string_types): - if typ == 'choice': + if typ == "choice": warnings.warn( - 'type argument to addoption() is a string %r.' - ' For parsearg this is optional and when supplied' - ' should be a type.' - ' (options: %s)' % (typ, names), + "type argument to addoption() is a string %r." + " For parsearg this is optional and when supplied" + " should be a type." + " (options: %s)" % (typ, names), DeprecationWarning, - stacklevel=3) + stacklevel=3, + ) # argparse expects a type here take it from # the type of the first element - attrs['type'] = type(attrs['choices'][0]) + attrs["type"] = type(attrs["choices"][0]) else: warnings.warn( - 'type argument to addoption() is a string %r.' - ' For parsearg this should be a type.' - ' (options: %s)' % (typ, names), + "type argument to addoption() is a string %r." + " For parsearg this should be a type." + " (options: %s)" % (typ, names), DeprecationWarning, - stacklevel=3) - attrs['type'] = Argument._typ_map[typ] + stacklevel=3, + ) + attrs["type"] = Argument._typ_map[typ] # used in test_parseopt -> test_parse_defaultgetter - self.type = attrs['type'] + self.type = attrs["type"] else: self.type = typ try: # attribute existence is tested in Config._processopt - self.default = attrs['default'] + self.default = attrs["default"] except KeyError: pass self._set_opt_strings(names) if not self.dest: if self._long_opts: - self.dest = self._long_opts[0][2:].replace('-', '_') + self.dest = self._long_opts[0][2:].replace("-", "_") else: try: self.dest = self._short_opts[0][1:] except IndexError: - raise ArgumentError( - 'need a long or short option', self) + raise ArgumentError("need a long or short option", self) def names(self): return self._short_opts + self._long_opts def attrs(self): # update any attributes set by processopt - attrs = 'default dest help'.split() + attrs = "default dest help".split() if self.dest: attrs.append(self.dest) for attr in attrs: @@ -693,11 +715,11 @@ class Argument(object): self._attrs[attr] = getattr(self, attr) except AttributeError: pass - if self._attrs.get('help'): - a = self._attrs['help'] - a = a.replace('%default', '%(default)s') + if self._attrs.get("help"): + a = self._attrs["help"] + a = a.replace("%default", "%(default)s") # a = a.replace('%prog', '%(prog)s') - self._attrs['help'] = a + self._attrs["help"] = a return self._attrs def _set_opt_strings(self, opts): @@ -708,37 +730,42 @@ class Argument(object): if len(opt) < 2: raise ArgumentError( "invalid option string %r: " - "must be at least two characters long" % opt, self) + "must be at least two characters long" % opt, + self, + ) elif len(opt) == 2: if not (opt[0] == "-" and opt[1] != "-"): raise ArgumentError( "invalid short option string %r: " "must be of the form -x, (x any non-dash char)" % opt, - self) + self, + ) self._short_opts.append(opt) else: if not (opt[0:2] == "--" and opt[2] != "-"): raise ArgumentError( "invalid long option string %r: " "must start with --, followed by non-dash" % opt, - self) + self, + ) self._long_opts.append(opt) def __repr__(self): args = [] if self._short_opts: - args += ['_short_opts: ' + repr(self._short_opts)] + args += ["_short_opts: " + repr(self._short_opts)] if self._long_opts: - args += ['_long_opts: ' + repr(self._long_opts)] - args += ['dest: ' + repr(self.dest)] - if hasattr(self, 'type'): - args += ['type: ' + repr(self.type)] - if hasattr(self, 'default'): - args += ['default: ' + repr(self.default)] - return 'Argument({0})'.format(', '.join(args)) + args += ["_long_opts: " + repr(self._long_opts)] + args += ["dest: " + repr(self.dest)] + if hasattr(self, "type"): + args += ["type: " + repr(self.type)] + if hasattr(self, "default"): + args += ["default: " + repr(self.default)] + return "Argument({})".format(", ".join(args)) class OptionGroup(object): + def __init__(self, name, description="", parser=None): self.name = name self.description = description @@ -754,7 +781,8 @@ class OptionGroup(object): accepted **and** the automatic destination is in args.twowords """ conflict = set(optnames).intersection( - name for opt in self.options for name in opt.names()) + name for opt in self.options for name in opt.names() + ) if conflict: raise ValueError("option names %s already added" % conflict) option = Argument(*optnames, **attrs) @@ -767,7 +795,7 @@ class OptionGroup(object): def _addoption_instance(self, option, shortupper=False): if not shortupper: for opt in option._short_opts: - if opt[0] == '-' and opt[1].islower(): + if opt[0] == "-" and opt[1].islower(): raise ValueError("lowercase shortoptions reserved") if self.parser: self.parser.processoption(option) @@ -775,12 +803,17 @@ class OptionGroup(object): class MyOptionParser(argparse.ArgumentParser): + def __init__(self, parser, extra_info=None): if not extra_info: extra_info = {} self._parser = parser - argparse.ArgumentParser.__init__(self, usage=parser._usage, - add_help=False, formatter_class=DropShorterLongHelpFormatter) + argparse.ArgumentParser.__init__( + self, + usage=parser._usage, + add_help=False, + formatter_class=DropShorterLongHelpFormatter, + ) # extra_info is a dict of (param -> value) to display if there's # an usage error to provide more contextual information to the user self.extra_info = extra_info @@ -790,11 +823,11 @@ class MyOptionParser(argparse.ArgumentParser): args, argv = self.parse_known_args(args, namespace) if argv: for arg in argv: - if arg and arg[0] == '-': - lines = ['unrecognized arguments: %s' % (' '.join(argv))] + if arg and arg[0] == "-": + lines = ["unrecognized arguments: %s" % (" ".join(argv))] for k, v in sorted(self.extra_info.items()): - lines.append(' %s: %s' % (k, v)) - self.error('\n'.join(lines)) + lines.append(" %s: %s" % (k, v)) + self.error("\n".join(lines)) getattr(args, FILE_OR_DIR).extend(argv) return args @@ -811,41 +844,44 @@ class DropShorterLongHelpFormatter(argparse.HelpFormatter): def _format_action_invocation(self, action): orgstr = argparse.HelpFormatter._format_action_invocation(self, action) - if orgstr and orgstr[0] != '-': # only optional arguments + if orgstr and orgstr[0] != "-": # only optional arguments return orgstr - res = getattr(action, '_formatted_action_invocation', None) + res = getattr(action, "_formatted_action_invocation", None) if res: return res - options = orgstr.split(', ') + options = orgstr.split(", ") if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2): # a shortcut for '-h, --help' or '--abc', '-a' action._formatted_action_invocation = orgstr return orgstr return_list = [] - option_map = getattr(action, 'map_long_option', {}) + option_map = getattr(action, "map_long_option", {}) if option_map is None: option_map = {} short_long = {} for option in options: - if len(option) == 2 or option[2] == ' ': + if len(option) == 2 or option[2] == " ": continue - if not option.startswith('--'): - raise ArgumentError('long optional argument without "--": [%s]' - % (option), self) + if not option.startswith("--"): + raise ArgumentError( + 'long optional argument without "--": [%s]' % (option), self + ) xxoption = option[2:] if xxoption.split()[0] not in option_map: - shortened = xxoption.replace('-', '') - if shortened not in short_long or \ - len(short_long[shortened]) < len(xxoption): + shortened = xxoption.replace("-", "") + if ( + shortened not in short_long + or len(short_long[shortened]) < len(xxoption) + ): short_long[shortened] = xxoption # now short_long has been filled out to the longest with dashes # **and** we keep the right option ordering from add_argument for option in options: - if len(option) == 2 or option[2] == ' ': + if len(option) == 2 or option[2] == " ": return_list.append(option) - if option[2:] == short_long.get(option.replace('-', '')): - return_list.append(option.replace(' ', '=', 1)) - action._formatted_action_invocation = ', '.join(return_list) + if option[2:] == short_long.get(option.replace("-", "")): + return_list.append(option.replace(" ", "=", 1)) + action._formatted_action_invocation = ", ".join(return_list) return action._formatted_action_invocation @@ -857,18 +893,19 @@ def _ensure_removed_sysmodule(modname): class Notset(object): + def __repr__(self): return "" notset = Notset() -FILE_OR_DIR = 'file_or_dir' +FILE_OR_DIR = "file_or_dir" def _iter_rewritable_modules(package_files): for fn in package_files: - is_simple_module = '/' not in fn and fn.endswith('.py') - is_package = fn.count('/') == 1 and fn.endswith('__init__.py') + is_simple_module = "/" not in fn and fn.endswith(".py") + is_package = fn.count("/") == 1 and fn.endswith("__init__.py") if is_simple_module: module_name, _ = os.path.splitext(fn) yield module_name @@ -903,6 +940,7 @@ class Config(object): def do_setns(dic): import pytest + setns(pytest, dic) self.hook.pytest_namespace.call_historic(do_setns, {}) @@ -929,9 +967,11 @@ class Config(object): def warn(self, code, message, fslocation=None, nodeid=None): """ generate a warning for this test session. """ - self.hook.pytest_logwarning.call_historic(kwargs=dict( - code=code, message=message, - fslocation=fslocation, nodeid=nodeid)) + self.hook.pytest_logwarning.call_historic( + kwargs=dict( + code=code, message=message, fslocation=fslocation, nodeid=nodeid + ) + ) def get_terminal_writer(self): return self.pluginmanager.get_plugin("terminalreporter")._tw @@ -946,12 +986,10 @@ class Config(object): style = "long" else: style = "native" - excrepr = excinfo.getrepr(funcargs=True, - showlocals=getattr(option, 'showlocals', False), - style=style, - ) - res = self.hook.pytest_internalerror(excrepr=excrepr, - excinfo=excinfo) + excrepr = excinfo.getrepr( + funcargs=True, showlocals=getattr(option, "showlocals", False), style=style + ) + res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo) if not any(res): for line in str(excrepr).split("\n"): sys.stderr.write("INTERNALERROR> %s\n" % line) @@ -978,7 +1016,7 @@ class Config(object): for name in opt._short_opts + opt._long_opts: self._opt2dest[name] = opt.dest - if hasattr(opt, 'default') and opt.dest: + if hasattr(opt, "default") and opt.dest: if not hasattr(self.option, opt.dest): setattr(self.option, opt.dest, opt.default) @@ -987,15 +1025,21 @@ class Config(object): self.pluginmanager._set_initial_conftests(early_config.known_args_namespace) def _initini(self, args): - ns, unknown_args = self._parser.parse_known_and_unknown_args(args, namespace=copy.copy(self.option)) - r = determine_setup(ns.inifilename, ns.file_or_dir + unknown_args, warnfunc=self.warn, - rootdir_cmd_arg=ns.rootdir or None) + ns, unknown_args = self._parser.parse_known_and_unknown_args( + args, namespace=copy.copy(self.option) + ) + r = determine_setup( + ns.inifilename, + ns.file_or_dir + unknown_args, + warnfunc=self.warn, + rootdir_cmd_arg=ns.rootdir or None, + ) self.rootdir, self.inifile, self.inicfg = r - self._parser.extra_info['rootdir'] = self.rootdir - self._parser.extra_info['inifile'] = self.inifile + self._parser.extra_info["rootdir"] = self.rootdir + self._parser.extra_info["inifile"] = self.inifile self.invocation_dir = py.path.local() - self._parser.addini('addopts', 'extra command line options', 'args') - self._parser.addini('minversion', 'minimally required pytest version') + self._parser.addini("addopts", "extra command line options", "args") + self._parser.addini("minversion", "minimally required pytest version") self._override_ini = ns.override_ini or () def _consider_importhook(self, args): @@ -1007,11 +1051,11 @@ class Config(object): """ ns, unknown_args = self._parser.parse_known_and_unknown_args(args) mode = ns.assertmode - if mode == 'rewrite': + if mode == "rewrite": try: hook = _pytest.assertion.install_importhook(self) except SystemError: - mode = 'plain' + mode = "plain" else: self._mark_plugins_for_rewrite(hook) _warn_about_missing_assertion(mode) @@ -1023,17 +1067,18 @@ class Config(object): all pytest plugins. """ import pkg_resources + self.pluginmanager.rewrite_hook = hook # 'RECORD' available for plugins installed normally (pip install) # 'SOURCES.txt' available for plugins installed in dev mode (pip install -e) # for installed plugins 'SOURCES.txt' returns an empty list, and vice-versa # so it shouldn't be an issue - metadata_files = 'RECORD', 'SOURCES.txt' + metadata_files = "RECORD", "SOURCES.txt" package_files = ( - entry.split(',')[0] - for entrypoint in pkg_resources.iter_entry_points('pytest11') + entry.split(",")[0] + for entrypoint in pkg_resources.iter_entry_points("pytest11") for metadata in metadata_files for entry in entrypoint.dist._get_metadata(metadata) ) @@ -1043,23 +1088,25 @@ class Config(object): def _preparse(self, args, addopts=True): if addopts: - args[:] = shlex.split(os.environ.get('PYTEST_ADDOPTS', '')) + args + args[:] = shlex.split(os.environ.get("PYTEST_ADDOPTS", "")) + args self._initini(args) if addopts: args[:] = self.getini("addopts") + args self._checkversion() self._consider_importhook(args) self.pluginmanager.consider_preparse(args) - self.pluginmanager.load_setuptools_entrypoints('pytest11') + self.pluginmanager.load_setuptools_entrypoints("pytest11") self.pluginmanager.consider_env() self.known_args_namespace = ns = self._parser.parse_known_args( - args, namespace=copy.copy(self.option)) + args, namespace=copy.copy(self.option) + ) if self.known_args_namespace.confcutdir is None and self.inifile: confcutdir = py.path.local(self.inifile).dirname self.known_args_namespace.confcutdir = confcutdir try: - self.hook.pytest_load_initial_conftests(early_config=self, - args=args, parser=self._parser) + self.hook.pytest_load_initial_conftests( + early_config=self, args=args, parser=self._parser + ) except ConftestImportFailure: e = sys.exc_info()[1] if ns.help or ns.version: @@ -1071,33 +1118,43 @@ class Config(object): def _checkversion(self): import pytest - minver = self.inicfg.get('minversion', None) + + minver = self.inicfg.get("minversion", None) if minver: ver = minver.split(".") myver = pytest.__version__.split(".") if myver < ver: raise pytest.UsageError( - "%s:%d: requires pytest-%s, actual pytest-%s'" % ( - self.inicfg.config.path, self.inicfg.lineof('minversion'), - minver, pytest.__version__)) + "%s:%d: requires pytest-%s, actual pytest-%s'" + % ( + self.inicfg.config.path, + self.inicfg.lineof("minversion"), + minver, + pytest.__version__, + ) + ) def parse(self, args, addopts=True): # parse given cmdline arguments into this config object. - assert not hasattr(self, 'args'), ( - "can only parse cmdline args at most once per Config object") + assert not hasattr( + self, "args" + ), "can only parse cmdline args at most once per Config object" self._origargs = args self.hook.pytest_addhooks.call_historic( - kwargs=dict(pluginmanager=self.pluginmanager)) + kwargs=dict(pluginmanager=self.pluginmanager) + ) self._preparse(args, addopts=addopts) # XXX deprecated hook: self.hook.pytest_cmdline_preparse(config=self, args=args) self._parser.after_preparse = True try: - args = self._parser.parse_setoption(args, self.option, namespace=self.option) + args = self._parser.parse_setoption( + args, self.option, namespace=self.option + ) if not args: cwd = os.getcwd() if cwd == self.rootdir: - args = self.getini('testpaths') + args = self.getini("testpaths") if not args: args = [cwd] self.args = args @@ -1136,7 +1193,7 @@ class Config(object): if default is not None: return default if type is None: - return '' + return "" return [] if type == "pathlist": dp = py.path.local(self.inicfg.config.path).dirpath() @@ -1203,6 +1260,7 @@ class Config(object): return default if skip: import pytest + pytest.skip("no %r option found" % (name,)) raise ValueError("no option named %r" % (name,)) @@ -1226,16 +1284,20 @@ def _assertion_supported(): def _warn_about_missing_assertion(mode): if not _assertion_supported(): - if mode == 'plain': - sys.stderr.write("WARNING: ASSERTIONS ARE NOT EXECUTED" - " and FAILING TESTS WILL PASS. Are you" - " using python -O?") + if mode == "plain": + sys.stderr.write( + "WARNING: ASSERTIONS ARE NOT EXECUTED" + " and FAILING TESTS WILL PASS. Are you" + " using python -O?" + ) else: - sys.stderr.write("WARNING: assertions not in test modules or" - " plugins will be ignored" - " because assert statements are not executed " - "by the underlying Python interpreter " - "(are you using python -O?)\n") + sys.stderr.write( + "WARNING: assertions not in test modules or" + " plugins will be ignored" + " because assert statements are not executed " + "by the underlying Python interpreter " + "(are you using python -O?)\n" + ) def exists(path, ignore=EnvironmentError): @@ -1256,6 +1318,7 @@ def getcfg(args, warnfunc=None): adopts standard deprecation warnings (#1804). """ from _pytest.deprecated import CFG_PYTEST_SECTION + inibasenames = ["pytest.ini", "tox.ini", "setup.cfg"] args = [x for x in args if not str(x).startswith("-")] if not args: @@ -1267,12 +1330,17 @@ def getcfg(args, warnfunc=None): p = base.join(inibasename) if exists(p): iniconfig = py.iniconfig.IniConfig(p) - if 'pytest' in iniconfig.sections: - if inibasename == 'setup.cfg' and warnfunc: - warnfunc('C1', CFG_PYTEST_SECTION.format(filename=inibasename)) - return base, p, iniconfig['pytest'] - if inibasename == 'setup.cfg' and 'tool:pytest' in iniconfig.sections: - return base, p, iniconfig['tool:pytest'] + if "pytest" in iniconfig.sections: + if inibasename == "setup.cfg" and warnfunc: + warnfunc( + "C1", CFG_PYTEST_SECTION.format(filename=inibasename) + ) + return base, p, iniconfig["pytest"] + if ( + inibasename == "setup.cfg" + and "tool:pytest" in iniconfig.sections + ): + return base, p, iniconfig["tool:pytest"] elif inibasename == "pytest.ini": # allowed to be empty return base, p, {} @@ -1303,11 +1371,12 @@ def get_common_ancestor(paths): def get_dirs_from_args(args): + def is_option(x): - return str(x).startswith('-') + return str(x).startswith("-") def get_file_part_from_node_id(x): - return str(x).split('::')[0] + return str(x).split("::")[0] def get_dir_from_path(path): if path.isdir(): @@ -1321,26 +1390,23 @@ def get_dirs_from_args(args): if not is_option(arg) ) - return [ - get_dir_from_path(path) - for path in possible_paths - if path.exists() - ] + return [get_dir_from_path(path) for path in possible_paths if path.exists()] def determine_setup(inifile, args, warnfunc=None, rootdir_cmd_arg=None): dirs = get_dirs_from_args(args) if inifile: iniconfig = py.iniconfig.IniConfig(inifile) - is_cfg_file = str(inifile).endswith('.cfg') + is_cfg_file = str(inifile).endswith(".cfg") # TODO: [pytest] section in *.cfg files is depricated. Need refactoring. - sections = ['tool:pytest', 'pytest'] if is_cfg_file else ['pytest'] + sections = ["tool:pytest", "pytest"] if is_cfg_file else ["pytest"] for section in sections: try: inicfg = iniconfig[section] - if is_cfg_file and section == 'pytest' and warnfunc: + if is_cfg_file and section == "pytest" and warnfunc: from _pytest.deprecated import CFG_PYTEST_SECTION - warnfunc('C1', CFG_PYTEST_SECTION.format(filename=str(inifile))) + + warnfunc("C1", CFG_PYTEST_SECTION.format(filename=str(inifile))) break except KeyError: inicfg = None @@ -1356,19 +1422,24 @@ def determine_setup(inifile, args, warnfunc=None, rootdir_cmd_arg=None): rootdir, inifile, inicfg = getcfg(dirs, warnfunc=warnfunc) if rootdir is None: rootdir = get_common_ancestor([py.path.local(), ancestor]) - is_fs_root = os.path.splitdrive(str(rootdir))[1] == '/' + is_fs_root = os.path.splitdrive(str(rootdir))[1] == "/" if is_fs_root: rootdir = ancestor if rootdir_cmd_arg: rootdir_abs_path = py.path.local(os.path.expandvars(rootdir_cmd_arg)) if not os.path.isdir(str(rootdir_abs_path)): - raise UsageError("Directory '{}' not found. Check your '--rootdir' option.".format(rootdir_abs_path)) + raise UsageError( + "Directory '{}' not found. Check your '--rootdir' option.".format( + rootdir_abs_path + ) + ) rootdir = rootdir_abs_path return rootdir, inifile, inicfg or {} def setns(obj, dic): import pytest + for name, value in dic.items(): if isinstance(value, dict): mod = getattr(obj, name, None) @@ -1394,9 +1465,9 @@ def create_terminal_writer(config, *args, **kwargs): and has access to a config object should use this function. """ tw = py.io.TerminalWriter(*args, **kwargs) - if config.option.color == 'yes': + if config.option.color == "yes": tw.hasmarkup = True - if config.option.color == 'no': + if config.option.color == "no": tw.hasmarkup = False return tw @@ -1411,9 +1482,9 @@ def _strtobool(val): .. note:: copied from distutils.util """ val = val.lower() - if val in ('y', 'yes', 't', 'true', 'on', '1'): + if val in ("y", "yes", "t", "true", "on", "1"): return 1 - elif val in ('n', 'no', 'f', 'false', 'off', '0'): + elif val in ("n", "no", "f", "false", "off", "0"): return 0 else: raise ValueError("invalid truth value %r" % (val,)) diff --git a/_pytest/debugging.py b/_pytest/debugging.py index a30ca4753..2e253aaa2 100644 --- a/_pytest/debugging.py +++ b/_pytest/debugging.py @@ -7,6 +7,7 @@ from doctest import UnexpectedException try: from builtins import breakpoint # noqa + SUPPORTS_BREAKPOINT_BUILTIN = True except ImportError: SUPPORTS_BREAKPOINT_BUILTIN = False @@ -15,12 +16,18 @@ except ImportError: def pytest_addoption(parser): group = parser.getgroup("general") group._addoption( - '--pdb', dest="usepdb", action="store_true", - help="start the interactive Python debugger on errors or KeyboardInterrupt.") + "--pdb", + dest="usepdb", + action="store_true", + help="start the interactive Python debugger on errors or KeyboardInterrupt.", + ) group._addoption( - '--pdbcls', dest="usepdb_cls", metavar="modulename:classname", + "--pdbcls", + dest="usepdb_cls", + metavar="modulename:classname", help="start a custom interactive Python debugger on errors. " - "For example: --pdbcls=IPython.terminal.debugger:TerminalPdb") + "For example: --pdbcls=IPython.terminal.debugger:TerminalPdb", + ) def pytest_configure(config): @@ -32,12 +39,12 @@ def pytest_configure(config): pdb_cls = pdb.Pdb if config.getvalue("usepdb"): - config.pluginmanager.register(PdbInvoke(), 'pdbinvoke') + config.pluginmanager.register(PdbInvoke(), "pdbinvoke") # Use custom Pdb class set_trace instead of default Pdb on breakpoint() call if SUPPORTS_BREAKPOINT_BUILTIN: - _environ_pythonbreakpoint = os.environ.get('PYTHONBREAKPOINT', '') - if _environ_pythonbreakpoint == '': + _environ_pythonbreakpoint = os.environ.get("PYTHONBREAKPOINT", "") + if _environ_pythonbreakpoint == "": sys.breakpointhook = pytestPDB.set_trace old = (pdb.set_trace, pytestPDB._pluginmanager) @@ -66,6 +73,7 @@ class pytestPDB(object): def set_trace(cls): """ invoke PDB set_trace debugging, dropping any IO capturing. """ import _pytest.config + frame = sys._getframe().f_back if cls._pluginmanager is not None: capman = cls._pluginmanager.getplugin("capturemanager") @@ -79,6 +87,7 @@ class pytestPDB(object): class PdbInvoke(object): + def pytest_exception_interact(self, node, call, report): capman = node.config.pluginmanager.getplugin("capturemanager") if capman: @@ -104,10 +113,10 @@ def _enter_pdb(node, excinfo, rep): showcapture = node.config.option.showcapture - for sectionname, content in (('stdout', rep.capstdout), - ('stderr', rep.capstderr), - ('log', rep.caplog)): - if showcapture in (sectionname, 'all') and content: + for sectionname, content in ( + ("stdout", rep.capstdout), ("stderr", rep.capstderr), ("log", rep.caplog) + ): + if showcapture in (sectionname, "all") and content: tw.sep(">", "captured " + sectionname) if content[-1:] == "\n": content = content[:-1] @@ -139,12 +148,15 @@ def _find_last_non_hidden_frame(stack): def post_mortem(t): + class Pdb(pytestPDB._pdb_cls): + def get_stack(self, f, t): stack, i = pdb.Pdb.get_stack(self, f, t) if f is None: i = _find_last_non_hidden_frame(stack) return stack, i + p = Pdb() p.reset() p.interaction(None, t) diff --git a/_pytest/deprecated.py b/_pytest/deprecated.py index c85588d79..7ebdcf999 100644 --- a/_pytest/deprecated.py +++ b/_pytest/deprecated.py @@ -12,23 +12,23 @@ class RemovedInPytest4Warning(DeprecationWarning): """warning class for features removed in pytest 4.0""" -MAIN_STR_ARGS = 'passing a string to pytest.main() is deprecated, ' \ - 'pass a list of arguments instead.' +MAIN_STR_ARGS = "passing a string to pytest.main() is deprecated, " "pass a list of arguments instead." -YIELD_TESTS = 'yield tests are deprecated, and scheduled to be removed in pytest 4.0' +YIELD_TESTS = "yield tests are deprecated, and scheduled to be removed in pytest 4.0" FUNCARG_PREFIX = ( '{name}: declaring fixtures using "pytest_funcarg__" prefix is deprecated ' - 'and scheduled to be removed in pytest 4.0. ' - 'Please remove the prefix and use the @pytest.fixture decorator instead.') + "and scheduled to be removed in pytest 4.0. " + "Please remove the prefix and use the @pytest.fixture decorator instead." +) -CFG_PYTEST_SECTION = '[pytest] section in {filename} files is deprecated, use [tool:pytest] instead.' +CFG_PYTEST_SECTION = "[pytest] section in {filename} files is deprecated, use [tool:pytest] instead." GETFUNCARGVALUE = "use of getfuncargvalue is deprecated, use getfixturevalue" RESULT_LOG = ( - '--result-log is deprecated and scheduled for removal in pytest 4.0.\n' - 'See https://docs.pytest.org/en/latest/usage.html#creating-resultlog-format-files for more information.' + "--result-log is deprecated and scheduled for removal in pytest 4.0.\n" + "See https://docs.pytest.org/en/latest/usage.html#creating-resultlog-format-files for more information." ) MARK_INFO_ATTRIBUTE = RemovedInPytest4Warning( @@ -45,13 +45,12 @@ MARK_PARAMETERSET_UNPACKING = RemovedInPytest4Warning( RECORD_XML_PROPERTY = ( 'Fixture renamed from "record_xml_property" to "record_property" as user ' - 'properties are now available to all reporters.\n' + "properties are now available to all reporters.\n" '"record_xml_property" is now deprecated.' ) COLLECTOR_MAKEITEM = RemovedInPytest4Warning( - "pycollector makeitem was removed " - "as it is an accidentially leaked internal api" + "pycollector makeitem was removed " "as it is an accidentially leaked internal api" ) METAFUNC_ADD_CALL = ( diff --git a/_pytest/doctest.py b/_pytest/doctest.py index 4511f5889..3b58f955f 100644 --- a/_pytest/doctest.py +++ b/_pytest/doctest.py @@ -10,11 +10,11 @@ from _pytest._code.code import ExceptionInfo, ReprFileLocation, TerminalRepr from _pytest.fixtures import FixtureRequest -DOCTEST_REPORT_CHOICE_NONE = 'none' -DOCTEST_REPORT_CHOICE_CDIFF = 'cdiff' -DOCTEST_REPORT_CHOICE_NDIFF = 'ndiff' -DOCTEST_REPORT_CHOICE_UDIFF = 'udiff' -DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = 'only_first_failure' +DOCTEST_REPORT_CHOICE_NONE = "none" +DOCTEST_REPORT_CHOICE_CDIFF = "cdiff" +DOCTEST_REPORT_CHOICE_NDIFF = "ndiff" +DOCTEST_REPORT_CHOICE_UDIFF = "udiff" +DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure" DOCTEST_REPORT_CHOICES = ( DOCTEST_REPORT_CHOICE_NONE, @@ -29,31 +29,53 @@ RUNNER_CLASS = None def pytest_addoption(parser): - parser.addini('doctest_optionflags', 'option flags for doctests', - type="args", default=["ELLIPSIS"]) - parser.addini("doctest_encoding", 'encoding used for doctest files', default="utf-8") + parser.addini( + "doctest_optionflags", + "option flags for doctests", + type="args", + default=["ELLIPSIS"], + ) + parser.addini( + "doctest_encoding", "encoding used for doctest files", default="utf-8" + ) group = parser.getgroup("collect") - group.addoption("--doctest-modules", - action="store_true", default=False, - help="run doctests in all .py modules", - dest="doctestmodules") - group.addoption("--doctest-report", - type=str.lower, default="udiff", - help="choose another output format for diffs on doctest failure", - choices=DOCTEST_REPORT_CHOICES, - dest="doctestreport") - group.addoption("--doctest-glob", - action="append", default=[], metavar="pat", - help="doctests file matching pattern, default: test*.txt", - dest="doctestglob") - group.addoption("--doctest-ignore-import-errors", - action="store_true", default=False, - help="ignore doctest ImportErrors", - dest="doctest_ignore_import_errors") - group.addoption("--doctest-continue-on-failure", - action="store_true", default=False, - help="for a given doctest, continue to run after the first failure", - dest="doctest_continue_on_failure") + group.addoption( + "--doctest-modules", + action="store_true", + default=False, + help="run doctests in all .py modules", + dest="doctestmodules", + ) + group.addoption( + "--doctest-report", + type=str.lower, + default="udiff", + help="choose another output format for diffs on doctest failure", + choices=DOCTEST_REPORT_CHOICES, + dest="doctestreport", + ) + group.addoption( + "--doctest-glob", + action="append", + default=[], + metavar="pat", + help="doctests file matching pattern, default: test*.txt", + dest="doctestglob", + ) + group.addoption( + "--doctest-ignore-import-errors", + action="store_true", + default=False, + help="ignore doctest ImportErrors", + dest="doctest_ignore_import_errors", + ) + group.addoption( + "--doctest-continue-on-failure", + action="store_true", + default=False, + help="for a given doctest, continue to run after the first failure", + dest="doctest_continue_on_failure", + ) def pytest_collect_file(path, parent): @@ -69,13 +91,13 @@ def _is_setup_py(config, path, parent): if path.basename != "setup.py": return False contents = path.read() - return 'setuptools' in contents or 'distutils' in contents + return "setuptools" in contents or "distutils" in contents def _is_doctest(config, path, parent): - if path.ext in ('.txt', '.rst') and parent.session.isinitpath(path): + if path.ext in (".txt", ".rst") and parent.session.isinitpath(path): return True - globs = config.getoption("doctestglob") or ['test*.txt'] + globs = config.getoption("doctestglob") or ["test*.txt"] for glob in globs: if path.check(fnmatch=glob): return True @@ -96,6 +118,7 @@ class ReprFailDoctest(TerminalRepr): class MultipleDoctestFailures(Exception): + def __init__(self, failures): super(MultipleDoctestFailures, self).__init__() self.failures = failures @@ -109,10 +132,13 @@ def _init_runner_class(): Runner to collect failures. Note that the out variable in this case is a list instead of a stdout-like object """ - def __init__(self, checker=None, verbose=None, optionflags=0, - continue_on_failure=True): + + def __init__( + self, checker=None, verbose=None, optionflags=0, continue_on_failure=True + ): doctest.DebugRunner.__init__( - self, checker=checker, verbose=verbose, optionflags=optionflags) + self, checker=checker, verbose=verbose, optionflags=optionflags + ) self.continue_on_failure = continue_on_failure def report_failure(self, out, test, example, got): @@ -132,18 +158,21 @@ def _init_runner_class(): return PytestDoctestRunner -def _get_runner(checker=None, verbose=None, optionflags=0, - continue_on_failure=True): +def _get_runner(checker=None, verbose=None, optionflags=0, continue_on_failure=True): # We need this in order to do a lazy import on doctest global RUNNER_CLASS if RUNNER_CLASS is None: RUNNER_CLASS = _init_runner_class() return RUNNER_CLASS( - checker=checker, verbose=verbose, optionflags=optionflags, - continue_on_failure=continue_on_failure) + checker=checker, + verbose=verbose, + optionflags=optionflags, + continue_on_failure=continue_on_failure, + ) class DoctestItem(pytest.Item): + def __init__(self, name, parent, runner=None, dtest=None): super(DoctestItem, self).__init__(name, parent) self.runner = runner @@ -155,7 +184,9 @@ class DoctestItem(pytest.Item): if self.dtest is not None: self.fixture_request = _setup_fixtures(self) globs = dict(getfixture=self.fixture_request.getfixturevalue) - for name, value in self.fixture_request.getfixturevalue('doctest_namespace').items(): + for name, value in self.fixture_request.getfixturevalue( + "doctest_namespace" + ).items(): globs[name] = value self.dtest.globs.update(globs) @@ -171,7 +202,7 @@ class DoctestItem(pytest.Item): """ Disable output capturing. Otherwise, stdout is lost to doctest (#985) """ - if platform.system() != 'Darwin': + if platform.system() != "Darwin": return capman = self.config.pluginmanager.getplugin("capturemanager") if capman: @@ -181,9 +212,9 @@ class DoctestItem(pytest.Item): def repr_failure(self, excinfo): import doctest + failures = None - if excinfo.errisinstance((doctest.DocTestFailure, - doctest.UnexpectedException)): + if excinfo.errisinstance((doctest.DocTestFailure, doctest.UnexpectedException)): failures = [excinfo.value] elif excinfo.errisinstance(MultipleDoctestFailures): failures = excinfo.value.failures @@ -201,28 +232,35 @@ class DoctestItem(pytest.Item): message = type(failure).__name__ reprlocation = ReprFileLocation(filename, lineno, message) checker = _get_checker() - report_choice = _get_report_choice(self.config.getoption("doctestreport")) + report_choice = _get_report_choice( + self.config.getoption("doctestreport") + ) if lineno is not None: lines = failure.test.docstring.splitlines(False) # add line numbers to the left of the error message - lines = ["%03d %s" % (i + test.lineno + 1, x) - for (i, x) in enumerate(lines)] + lines = [ + "%03d %s" % (i + test.lineno + 1, x) + for (i, x) in enumerate(lines) + ] # trim docstring error lines to 10 lines = lines[max(example.lineno - 9, 0):example.lineno + 1] else: - lines = ['EXAMPLE LOCATION UNKNOWN, not showing all tests of that example'] - indent = '>>>' + lines = [ + "EXAMPLE LOCATION UNKNOWN, not showing all tests of that example" + ] + indent = ">>>" for line in example.source.splitlines(): - lines.append('??? %s %s' % (indent, line)) - indent = '...' + lines.append("??? %s %s" % (indent, line)) + indent = "..." if isinstance(failure, doctest.DocTestFailure): - lines += checker.output_difference(example, - failure.got, - report_choice).split("\n") + lines += checker.output_difference( + example, failure.got, report_choice + ).split( + "\n" + ) else: inner_excinfo = ExceptionInfo(failure.exc_info) - lines += ["UNEXPECTED EXCEPTION: %s" % - repr(inner_excinfo.value)] + lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)] lines += traceback.format_exception(*failure.exc_info) reprlocation_lines.append((reprlocation, lines)) return ReprFailDoctest(reprlocation_lines) @@ -235,15 +273,17 @@ class DoctestItem(pytest.Item): def _get_flag_lookup(): import doctest - return dict(DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1, - DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE, - NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE, - ELLIPSIS=doctest.ELLIPSIS, - IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL, - COMPARISON_FLAGS=doctest.COMPARISON_FLAGS, - ALLOW_UNICODE=_get_allow_unicode_flag(), - ALLOW_BYTES=_get_allow_bytes_flag(), - ) + + return dict( + DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1, + DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE, + NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE, + ELLIPSIS=doctest.ELLIPSIS, + IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL, + COMPARISON_FLAGS=doctest.COMPARISON_FLAGS, + ALLOW_UNICODE=_get_allow_unicode_flag(), + ALLOW_BYTES=_get_allow_bytes_flag(), + ) def get_optionflags(parent): @@ -256,7 +296,7 @@ def get_optionflags(parent): def _get_continue_on_failure(config): - continue_on_failure = config.getvalue('doctest_continue_on_failure') + continue_on_failure = config.getvalue("doctest_continue_on_failure") if continue_on_failure: # We need to turn off this if we use pdb since we should stop at # the first failure @@ -277,14 +317,16 @@ class DoctestTextfile(pytest.Module): text = self.fspath.read_text(encoding) filename = str(self.fspath) name = self.fspath.basename - globs = {'__name__': '__main__'} + globs = {"__name__": "__main__"} optionflags = get_optionflags(self) runner = _get_runner( - verbose=0, optionflags=optionflags, + verbose=0, + optionflags=optionflags, checker=_get_checker(), - continue_on_failure=_get_continue_on_failure(self.config)) + continue_on_failure=_get_continue_on_failure(self.config), + ) _fix_spoof_python2(runner, encoding) parser = doctest.DocTestParser() @@ -298,31 +340,36 @@ def _check_all_skipped(test): option set. """ import doctest + all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples) if all_skipped: - pytest.skip('all tests skipped by +SKIP option') + pytest.skip("all tests skipped by +SKIP option") class DoctestModule(pytest.Module): + def collect(self): import doctest + if self.fspath.basename == "conftest.py": module = self.config.pluginmanager._importconftest(self.fspath) else: try: module = self.fspath.pyimport() except ImportError: - if self.config.getvalue('doctest_ignore_import_errors'): - pytest.skip('unable to import module %r' % self.fspath) + if self.config.getvalue("doctest_ignore_import_errors"): + pytest.skip("unable to import module %r" % self.fspath) else: raise # uses internal doctest module parsing mechanism finder = doctest.DocTestFinder() optionflags = get_optionflags(self) runner = _get_runner( - verbose=0, optionflags=optionflags, + verbose=0, + optionflags=optionflags, checker=_get_checker(), - continue_on_failure=_get_continue_on_failure(self.config)) + continue_on_failure=_get_continue_on_failure(self.config), + ) for test in finder.find(module, module.__name__): if test.examples: # skip empty doctests @@ -333,13 +380,15 @@ def _setup_fixtures(doctest_item): """ Used by DoctestTextfile and DoctestItem to setup fixture information. """ + def func(): pass doctest_item.funcargs = {} fm = doctest_item.session._fixturemanager - doctest_item._fixtureinfo = fm.getfixtureinfo(node=doctest_item, func=func, - cls=None, funcargs=False) + doctest_item._fixtureinfo = fm.getfixtureinfo( + node=doctest_item, func=func, cls=None, funcargs=False + ) fixture_request = FixtureRequest(doctest_item) fixture_request._fillfixtures() return fixture_request @@ -355,7 +404,7 @@ def _get_checker(): An inner class is used to avoid importing "doctest" at the module level. """ - if hasattr(_get_checker, 'LiteralsOutputChecker'): + if hasattr(_get_checker, "LiteralsOutputChecker"): return _get_checker.LiteralsOutputChecker() import doctest @@ -373,8 +422,7 @@ def _get_checker(): _bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE) def check_output(self, want, got, optionflags): - res = doctest.OutputChecker.check_output(self, want, got, - optionflags) + res = doctest.OutputChecker.check_output(self, want, got, optionflags) if res: return True @@ -384,8 +432,9 @@ def _get_checker(): return False else: # pragma: no cover + def remove_prefixes(regex, txt): - return re.sub(regex, r'\1\2', txt) + return re.sub(regex, r"\1\2", txt) if allow_unicode: want = remove_prefixes(self._unicode_literal_re, want) @@ -393,8 +442,7 @@ def _get_checker(): if allow_bytes: want = remove_prefixes(self._bytes_literal_re, want) got = remove_prefixes(self._bytes_literal_re, got) - res = doctest.OutputChecker.check_output(self, want, got, - optionflags) + res = doctest.OutputChecker.check_output(self, want, got, optionflags) return res _get_checker.LiteralsOutputChecker = LiteralsOutputChecker @@ -406,7 +454,8 @@ def _get_allow_unicode_flag(): Registers and returns the ALLOW_UNICODE flag. """ import doctest - return doctest.register_optionflag('ALLOW_UNICODE') + + return doctest.register_optionflag("ALLOW_UNICODE") def _get_allow_bytes_flag(): @@ -414,7 +463,8 @@ def _get_allow_bytes_flag(): Registers and returns the ALLOW_BYTES flag. """ import doctest - return doctest.register_optionflag('ALLOW_BYTES') + + return doctest.register_optionflag("ALLOW_BYTES") def _get_report_choice(key): @@ -430,7 +480,9 @@ def _get_report_choice(key): DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF, DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE, DOCTEST_REPORT_CHOICE_NONE: 0, - }[key] + }[ + key + ] def _fix_spoof_python2(runner, encoding): @@ -443,6 +495,7 @@ def _fix_spoof_python2(runner, encoding): This fixes the problem related in issue #2434. """ from _pytest.compat import _PY2 + if not _PY2: return @@ -459,7 +512,7 @@ def _fix_spoof_python2(runner, encoding): runner._fakeout = UnicodeSpoof() -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def doctest_namespace(): """ Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests. diff --git a/_pytest/fixtures.py b/_pytest/fixtures.py index 6617c24a2..bfae4cb61 100644 --- a/_pytest/fixtures.py +++ b/_pytest/fixtures.py @@ -15,15 +15,23 @@ import _pytest from _pytest import nodes from _pytest._code.code import TerminalRepr from _pytest.compat import ( - NOTSET, exc_clear, _format_args, - getfslineno, get_real_func, - is_generator, isclass, getimfunc, - getlocation, getfuncargnames, + NOTSET, + exc_clear, + _format_args, + getfslineno, + get_real_func, + is_generator, + isclass, + getimfunc, + getlocation, + getfuncargnames, safe_getattr, FuncargnamesCompatAttr, ) from _pytest.outcomes import fail, TEST_OUTCOME +FIXTURE_MSG = 'fixtures cannot have "pytest_funcarg__" prefix and be decorated with @pytest.fixture:\n{}' + @attr.s(frozen=True) class PseudoFixtureDef(object): @@ -35,12 +43,14 @@ def pytest_sessionstart(session): import _pytest.python import _pytest.nodes - scopename2class.update({ - 'class': _pytest.python.Class, - 'module': _pytest.python.Module, - 'function': _pytest.nodes.Item, - 'session': _pytest.main.Session, - }) + scopename2class.update( + { + "class": _pytest.python.Class, + "module": _pytest.python.Module, + "function": _pytest.nodes.Item, + "session": _pytest.main.Session, + } + ) session._fixturemanager = FixtureManager(session) @@ -50,21 +60,24 @@ scopename2class = {} scope2props = dict(session=()) scope2props["module"] = ("fspath", "module") scope2props["class"] = scope2props["module"] + ("cls",) -scope2props["instance"] = scope2props["class"] + ("instance", ) +scope2props["instance"] = scope2props["class"] + ("instance",) scope2props["function"] = scope2props["instance"] + ("function", "keywords") def scopeproperty(name=None, doc=None): + def decoratescope(func): scopename = name or func.__name__ def provide(self): if func.__name__ in scope2props[self.scope]: return func(self) - raise AttributeError("%s not available in %s-scoped context" % ( - scopename, self.scope)) + raise AttributeError( + "%s not available in %s-scoped context" % (scopename, self.scope) + ) return property(provide, None, None, func.__doc__) + return decoratescope @@ -95,8 +108,7 @@ def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager): callspec.indices[argname] = len(arg2params_list) arg2params_list.append(argvalue) if argname not in arg2scope: - scopenum = callspec._arg2scopenum.get(argname, - scopenum_function) + scopenum = callspec._arg2scopenum.get(argname, scopenum_function) arg2scope[argname] = scopes[scopenum] callspec.funcargs.clear() @@ -119,10 +131,16 @@ def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager): if node and argname in node._name2pseudofixturedef: arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]] else: - fixturedef = FixtureDef(fixturemanager, '', argname, - get_direct_param_fixture_func, - arg2scope[argname], - valuelist, False, False) + fixturedef = FixtureDef( + fixturemanager, + "", + argname, + get_direct_param_fixture_func, + arg2scope[argname], + valuelist, + False, + False, + ) arg2fixturedefs[argname] = [fixturedef] if node is not None: node._name2pseudofixturedef[argname] = fixturedef @@ -154,7 +172,7 @@ def get_parametrized_fixture_keys(item, scopenum): for argname, param_index in sorted(cs.indices.items()): if cs._arg2scopenum[argname] != scopenum: continue - if scopenum == 0: # session + if scopenum == 0: # session key = (argname, param_index) elif scopenum == 1: # module key = (argname, param_index, item.fspath) @@ -168,6 +186,7 @@ def get_parametrized_fixture_keys(item, scopenum): # down to the lower scopes such as to minimize number of "high scope" # setups and teardowns + def reorder_items(items): argkeys_cache = {} items_by_argkey = {} @@ -205,20 +224,25 @@ def reorder_items_atscope(items, argkeys_cache, items_by_argkey, scopenum): item = items_deque.popleft() if item in items_done or item in no_argkey_group: continue - argkeys = OrderedDict.fromkeys(k for k in scoped_argkeys_cache.get(item, []) if k not in ignore) + argkeys = OrderedDict.fromkeys( + k for k in scoped_argkeys_cache.get(item, []) if k not in ignore + ) if not argkeys: no_argkey_group[item] = None else: slicing_argkey, _ = argkeys.popitem() # we don't have to remove relevant items from later in the deque because they'll just be ignored - matching_items = [i for i in scoped_items_by_argkey[slicing_argkey] if i in items] + matching_items = [ + i for i in scoped_items_by_argkey[slicing_argkey] if i in items + ] for i in reversed(matching_items): fix_cache_order(i, argkeys_cache, items_by_argkey) items_deque.appendleft(i) break if no_argkey_group: no_argkey_group = reorder_items_atscope( - no_argkey_group, argkeys_cache, items_by_argkey, scopenum + 1) + no_argkey_group, argkeys_cache, items_by_argkey, scopenum + 1 + ) for item in no_argkey_group: items_done[item] = None ignore.add(slicing_argkey) @@ -252,6 +276,7 @@ def get_direct_param_fixture_func(request): class FuncFixtureInfo(object): + def __init__(self, argnames, names_closure, name2fixturedefs): self.argnames = argnames self.names_closure = names_closure @@ -362,7 +387,8 @@ class FixtureRequest(FuncargnamesCompatAttr): def _addfinalizer(self, finalizer, scope): colitem = self._getscopeitem(scope) self._pyfuncitem.session._setupstate.addfinalizer( - finalizer=finalizer, colitem=colitem) + finalizer=finalizer, colitem=colitem + ) def applymarker(self, marker): """ Apply a marker to a single test function invocation. @@ -400,7 +426,7 @@ class FixtureRequest(FuncargnamesCompatAttr): or ``session`` indicating the caching lifecycle of the resource. :arg extrakey: added to internal caching key of (funcargname, scope). """ - if not hasattr(self.config, '_setupcache'): + if not hasattr(self.config, "_setupcache"): self.config._setupcache = {} # XXX weakref? cachekey = (self.fixturename, self._getscopeitem(scope), extrakey) cache = self.config._setupcache @@ -411,9 +437,11 @@ class FixtureRequest(FuncargnamesCompatAttr): val = setup() cache[cachekey] = val if teardown is not None: + def finalizer(): del cache[cachekey] teardown(val) + self._addfinalizer(finalizer, scope=scope) return val @@ -430,10 +458,8 @@ class FixtureRequest(FuncargnamesCompatAttr): def getfuncargvalue(self, argname): """ Deprecated, use getfixturevalue. """ from _pytest import deprecated - warnings.warn( - deprecated.GETFUNCARGVALUE, - DeprecationWarning, - stacklevel=2) + + warnings.warn(deprecated.GETFUNCARGVALUE, DeprecationWarning, stacklevel=2) return self.getfixturevalue(argname) def _get_active_fixturedef(self, argname): @@ -493,8 +519,8 @@ class FixtureRequest(FuncargnamesCompatAttr): source_path = source_path.relto(funcitem.config.rootdir) msg = ( "The requested fixture has no parameter defined for the " - "current test.\n\nRequested fixture '{0}' defined in:\n{1}" - "\n\nRequested here:\n{2}:{3}".format( + "current test.\n\nRequested fixture '{}' defined in:\n{}" + "\n\nRequested here:\n{}:{}".format( fixturedef.argname, getlocation(fixturedef.func, funcitem.config.rootdir), source_path, @@ -524,8 +550,10 @@ class FixtureRequest(FuncargnamesCompatAttr): fixturedef.execute(request=subrequest) finally: # if fixture function failed it might have registered finalizers - self.session._setupstate.addfinalizer(functools.partial(fixturedef.finish, request=subrequest), - subrequest.node) + self.session._setupstate.addfinalizer( + functools.partial(fixturedef.finish, request=subrequest), + subrequest.node, + ) def _check_scope(self, argname, invoking_scope, requested_scope): if argname == "request": @@ -533,11 +561,13 @@ class FixtureRequest(FuncargnamesCompatAttr): if scopemismatch(invoking_scope, requested_scope): # try to report something helpful lines = self._factorytraceback() - fail("ScopeMismatch: You tried to access the %r scoped " - "fixture %r with a %r scoped request object, " - "involved factories\n%s" % ( - (requested_scope, argname, invoking_scope, "\n".join(lines))), - pytrace=False) + fail( + "ScopeMismatch: You tried to access the %r scoped " + "fixture %r with a %r scoped request object, " + "involved factories\n%s" + % ((requested_scope, argname, invoking_scope, "\n".join(lines))), + pytrace=False, + ) def _factorytraceback(self): lines = [] @@ -546,8 +576,7 @@ class FixtureRequest(FuncargnamesCompatAttr): fs, lineno = getfslineno(factory) p = self._pyfuncitem.session.fspath.bestrelpath(fs) args = _format_args(factory) - lines.append("%s:%d: def %s%s" % ( - p, lineno, factory.__name__, args)) + lines.append("%s:%d: def %s%s" % (p, lineno, factory.__name__, args)) return lines def _getscopeitem(self, scope): @@ -558,7 +587,9 @@ class FixtureRequest(FuncargnamesCompatAttr): if node is None and scope == "class": # fallback to function item itself node = self._pyfuncitem - assert node, 'Could not obtain a node for scope "{}" for function {!r}'.format(scope, self._pyfuncitem) + assert node, 'Could not obtain a node for scope "{}" for function {!r}'.format( + scope, self._pyfuncitem + ) return node def __repr__(self): @@ -612,9 +643,9 @@ def scope2index(scope, descr, where=None): return scopes.index(scope) except ValueError: raise ValueError( - "{0} {1}has an unsupported scope value '{2}'".format( - descr, 'from {0} '.format(where) if where else '', - scope) + "{} {}has an unsupported scope value '{}'".format( + descr, "from {} ".format(where) if where else "", scope + ) ) @@ -649,7 +680,7 @@ class FixtureLookupError(LookupError): for i, line in enumerate(lines): line = line.rstrip() addline(" " + line) - if line.lstrip().startswith('def'): + if line.lstrip().startswith("def"): break if msg is None: @@ -668,6 +699,7 @@ class FixtureLookupError(LookupError): class FixtureLookupErrorRepr(TerminalRepr): + def __init__(self, filename, firstlineno, tblines, errorstring, argname): self.tblines = tblines self.errorstring = errorstring @@ -681,11 +713,15 @@ class FixtureLookupErrorRepr(TerminalRepr): tw.line(tbline.rstrip()) lines = self.errorstring.split("\n") if lines: - tw.line('{0} {1}'.format(FormattedExcinfo.fail_marker, - lines[0].strip()), red=True) + tw.line( + "{} {}".format(FormattedExcinfo.fail_marker, lines[0].strip()), + red=True, + ) for line in lines[1:]: - tw.line('{0} {1}'.format(FormattedExcinfo.flow_marker, - line.strip()), red=True) + tw.line( + "{} {}".format(FormattedExcinfo.flow_marker, line.strip()), + red=True, + ) tw.line() tw.line("%s:%d" % (self.filename, self.firstlineno + 1)) @@ -694,8 +730,7 @@ def fail_fixturefunc(fixturefunc, msg): fs, lineno = getfslineno(fixturefunc) location = "%s:%s" % (fs, lineno + 1) source = _pytest._code.Source(fixturefunc) - fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, - pytrace=False) + fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, pytrace=False) def call_fixture_func(fixturefunc, request, kwargs): @@ -710,8 +745,9 @@ def call_fixture_func(fixturefunc, request, kwargs): except StopIteration: pass else: - fail_fixturefunc(fixturefunc, - "yield_fixture function has more than one 'yield'") + fail_fixturefunc( + fixturefunc, "yield_fixture function has more than one 'yield'" + ) request.addfinalizer(teardown) else: @@ -722,18 +758,25 @@ def call_fixture_func(fixturefunc, request, kwargs): class FixtureDef(object): """ A container for a factory definition. """ - def __init__(self, fixturemanager, baseid, argname, func, scope, params, - unittest=False, ids=None): + def __init__( + self, + fixturemanager, + baseid, + argname, + func, + scope, + params, + unittest=False, + ids=None, + ): self._fixturemanager = fixturemanager - self.baseid = baseid or '' + self.baseid = baseid or "" self.has_location = baseid is not None self.func = func self.argname = argname self.scope = scope self.scopenum = scope2index( - scope or "function", - descr='fixture {0}'.format(func.__name__), - where=baseid + scope or "function", descr="fixture {}".format(func.__name__), where=baseid ) self.params = params self.argnames = getfuncargnames(func, is_method=unittest) @@ -795,8 +838,10 @@ class FixtureDef(object): return hook.pytest_fixture_setup(fixturedef=self, request=request) def __repr__(self): - return ("" % - (self.argname, self.scope, self.baseid)) + return ( + "" + % (self.argname, self.scope, self.baseid) + ) def pytest_fixture_setup(fixturedef, request): @@ -849,12 +894,12 @@ class FixtureFunctionMarker(object): def __call__(self, function): if isclass(function): - raise ValueError( - "class fixtures not supported (may be in the future)") + raise ValueError("class fixtures not supported (may be in the future)") if getattr(function, "_pytestfixturefunction", False): raise ValueError( - "fixture is being applied more than once to the same function") + "fixture is being applied more than once to the same function" + ) function._pytestfixturefunction = self return function @@ -900,8 +945,7 @@ def fixture(scope="function", params=None, autouse=False, ids=None, name=None): """ if callable(scope) and params is None and autouse is False: # direct decoration - return FixtureFunctionMarker( - "function", params, autouse, name=name)(scope) + return FixtureFunctionMarker("function", params, autouse, name=name)(scope) if params is not None and not isinstance(params, (list, tuple)): params = list(params) return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name) @@ -915,8 +959,9 @@ def yield_fixture(scope="function", params=None, autouse=False, ids=None, name=N """ if callable(scope) and params is None and not autouse: # direct decoration - return FixtureFunctionMarker( - "function", params, autouse, ids=ids, name=name)(scope) + return FixtureFunctionMarker("function", params, autouse, ids=ids, name=name)( + scope + ) else: return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name) @@ -988,12 +1033,13 @@ class FixtureManager(object): argnames = getfuncargnames(func, cls=cls) else: argnames = () - usefixtures = flatten(mark.args for mark in node.iter_markers(name="usefixtures")) + usefixtures = flatten( + mark.args for mark in node.iter_markers(name="usefixtures") + ) initialnames = argnames initialnames = tuple(usefixtures) + initialnames fm = node.session._fixturemanager - names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames, - node) + names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames, node) return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs) def pytest_plugin_registered(self, plugin): @@ -1058,7 +1104,7 @@ class FixtureManager(object): try: fixturedefs = arg2fixturedefs[arg_name] except KeyError: - return scopes.index('function') + return scopes.index("function") else: return fixturedefs[-1].scopenum @@ -1071,11 +1117,11 @@ class FixtureManager(object): if faclist: fixturedef = faclist[-1] if fixturedef.params is not None: - parametrize_func = getattr(metafunc.function, 'parametrize', None) + parametrize_func = getattr(metafunc.function, "parametrize", None) if parametrize_func is not None: parametrize_func = parametrize_func.combined - func_params = getattr(parametrize_func, 'args', [[None]]) - func_kwargs = getattr(parametrize_func, 'kwargs', {}) + func_params = getattr(parametrize_func, "args", [[None]]) + func_kwargs = getattr(parametrize_func, "kwargs", {}) # skip directly parametrized arguments if "argnames" in func_kwargs: argnames = parametrize_func.kwargs["argnames"] @@ -1084,9 +1130,13 @@ class FixtureManager(object): if not isinstance(argnames, (tuple, list)): argnames = [x.strip() for x in argnames.split(",") if x.strip()] if argname not in func_params and argname not in argnames: - metafunc.parametrize(argname, fixturedef.params, - indirect=True, scope=fixturedef.scope, - ids=fixturedef.ids) + metafunc.parametrize( + argname, + fixturedef.params, + indirect=True, + scope=fixturedef.scope, + ids=fixturedef.ids, + ) else: continue # will raise FixtureLookupError at setup time @@ -1118,7 +1168,10 @@ class FixtureManager(object): continue marker = defaultfuncargprefixmarker from _pytest import deprecated - self.config.warn('C1', deprecated.FUNCARG_PREFIX.format(name=name), nodeid=nodeid) + + self.config.warn( + "C1", deprecated.FUNCARG_PREFIX.format(name=name), nodeid=nodeid + ) name = name[len(self._argprefix):] elif not isinstance(marker, FixtureFunctionMarker): # magic globals with __getattr__ might have got us a wrong @@ -1127,13 +1180,18 @@ class FixtureManager(object): else: if marker.name: name = marker.name - msg = 'fixtures cannot have "pytest_funcarg__" prefix ' \ - 'and be decorated with @pytest.fixture:\n%s' % name - assert not name.startswith(self._argprefix), msg + assert not name.startswith(self._argprefix), FIXTURE_MSG.format(name) - fixture_def = FixtureDef(self, nodeid, name, obj, - marker.scope, marker.params, - unittest=unittest, ids=marker.ids) + fixture_def = FixtureDef( + self, + nodeid, + name, + obj, + marker.scope, + marker.params, + unittest=unittest, + ids=marker.ids, + ) faclist = self._arg2fixturedefs.setdefault(name, []) if fixture_def.has_location: @@ -1149,7 +1207,7 @@ class FixtureManager(object): autousenames.append(name) if autousenames: - self._nodeid_and_autousenames.append((nodeid or '', autousenames)) + self._nodeid_and_autousenames.append((nodeid or "", autousenames)) def getfixturedefs(self, argname, nodeid): """ diff --git a/_pytest/freeze_support.py b/_pytest/freeze_support.py index 52b84eb49..002e07730 100644 --- a/_pytest/freeze_support.py +++ b/_pytest/freeze_support.py @@ -12,12 +12,13 @@ def freeze_includes(): """ import py import _pytest + result = list(_iter_all_modules(py)) result += list(_iter_all_modules(_pytest)) return result -def _iter_all_modules(package, prefix=''): +def _iter_all_modules(package, prefix=""): """ Iterates over the names of all modules that can be found in the given package, recursively. @@ -31,13 +32,14 @@ def _iter_all_modules(package, prefix=''): """ import os import pkgutil + if type(package) is not str: - path, prefix = package.__path__[0], package.__name__ + '.' + path, prefix = package.__path__[0], package.__name__ + "." else: path = package for _, name, is_package in pkgutil.iter_modules([path]): if is_package: - for m in _iter_all_modules(os.path.join(path, name), prefix=name + '.'): + for m in _iter_all_modules(os.path.join(path, name), prefix=name + "."): yield prefix + m else: yield prefix + name diff --git a/_pytest/helpconfig.py b/_pytest/helpconfig.py index 42636bdb0..5514fec40 100644 --- a/_pytest/helpconfig.py +++ b/_pytest/helpconfig.py @@ -18,48 +18,69 @@ class HelpAction(Action): implemented by raising SystemExit. """ - def __init__(self, - option_strings, - dest=None, - default=False, - help=None): + def __init__(self, option_strings, dest=None, default=False, help=None): super(HelpAction, self).__init__( option_strings=option_strings, dest=dest, const=True, default=default, nargs=0, - help=help) + help=help, + ) def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, self.const) # We should only skip the rest of the parsing after preparse is done - if getattr(parser._parser, 'after_preparse', False): + if getattr(parser._parser, "after_preparse", False): raise PrintHelp def pytest_addoption(parser): - group = parser.getgroup('debugconfig') - group.addoption('--version', action="store_true", - help="display pytest lib version and import information.") - group._addoption("-h", "--help", action=HelpAction, dest="help", - help="show help message and configuration info") - group._addoption('-p', action="append", dest="plugins", default=[], - metavar="name", - help="early-load given plugin (multi-allowed). " - "To avoid loading of plugins, use the `no:` prefix, e.g. " - "`no:doctest`.") - group.addoption('--traceconfig', '--trace-config', - action="store_true", default=False, - help="trace considerations of conftest.py files."), - group.addoption('--debug', - action="store_true", dest="debug", default=False, - help="store internal tracing debug information in 'pytestdebug.log'.") + group = parser.getgroup("debugconfig") + group.addoption( + "--version", + action="store_true", + help="display pytest lib version and import information.", + ) group._addoption( - '-o', '--override-ini', dest="override_ini", + "-h", + "--help", + action=HelpAction, + dest="help", + help="show help message and configuration info", + ) + group._addoption( + "-p", action="append", - help='override ini option with "option=value" style, e.g. `-o xfail_strict=True -o cache_dir=cache`.') + dest="plugins", + default=[], + metavar="name", + help="early-load given plugin (multi-allowed). " + "To avoid loading of plugins, use the `no:` prefix, e.g. " + "`no:doctest`.", + ) + group.addoption( + "--traceconfig", + "--trace-config", + action="store_true", + default=False, + help="trace considerations of conftest.py files.", + ), + group.addoption( + "--debug", + action="store_true", + dest="debug", + default=False, + help="store internal tracing debug information in 'pytestdebug.log'.", + ) + group._addoption( + "-o", + "--override-ini", + dest="override_ini", + action="append", + help='override ini option with "option=value" style, e.g. `-o xfail_strict=True -o cache_dir=cache`.', + ) @pytest.hookimpl(hookwrapper=True) @@ -68,20 +89,25 @@ def pytest_cmdline_parse(): config = outcome.get_result() if config.option.debug: path = os.path.abspath("pytestdebug.log") - debugfile = open(path, 'w') - debugfile.write("versions pytest-%s, py-%s, " - "python-%s\ncwd=%s\nargs=%s\n\n" % ( - pytest.__version__, py.__version__, - ".".join(map(str, sys.version_info)), - os.getcwd(), config._origargs)) + debugfile = open(path, "w") + debugfile.write( + "versions pytest-%s, py-%s, " + "python-%s\ncwd=%s\nargs=%s\n\n" + % ( + pytest.__version__, + py.__version__, + ".".join(map(str, sys.version_info)), + os.getcwd(), + config._origargs, + ) + ) config.trace.root.setwriter(debugfile.write) undo_tracing = config.pluginmanager.enable_tracing() sys.stderr.write("writing pytestdebug information to %s\n" % path) def unset_tracing(): debugfile.close() - sys.stderr.write("wrote pytestdebug information to %s\n" % - debugfile.name) + sys.stderr.write("wrote pytestdebug information to %s\n" % debugfile.name) config.trace.root.setwriter(None) undo_tracing() @@ -91,8 +117,9 @@ def pytest_cmdline_parse(): def pytest_cmdline_main(config): if config.option.version: p = py.path.local(pytest.__file__) - sys.stderr.write("This is pytest version %s, imported from %s\n" % - (pytest.__version__, p)) + sys.stderr.write( + "This is pytest version %s, imported from %s\n" % (pytest.__version__, p) + ) plugininfo = getpluginversioninfo(config) if plugininfo: for line in plugininfo: @@ -106,13 +133,14 @@ def pytest_cmdline_main(config): def showhelp(config): - reporter = config.pluginmanager.get_plugin('terminalreporter') + reporter = config.pluginmanager.get_plugin("terminalreporter") tw = reporter._tw tw.write(config._parser.optparser.format_help()) tw.line() tw.line() - tw.line("[pytest] ini-options in the first " - "pytest.ini|tox.ini|setup.cfg file found:") + tw.line( + "[pytest] ini-options in the first " "pytest.ini|tox.ini|setup.cfg file found:" + ) tw.line() for name in config._parser._ininames: @@ -128,7 +156,7 @@ def showhelp(config): vars = [ ("PYTEST_ADDOPTS", "extra command line options"), ("PYTEST_PLUGINS", "comma-separated plugins to load during startup"), - ("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals") + ("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals"), ] for name, help in vars: tw.line(" %-24s %s" % (name, help)) @@ -137,18 +165,18 @@ def showhelp(config): tw.line("to see available markers type: pytest --markers") tw.line("to see available fixtures type: pytest --fixtures") - tw.line("(shown according to specified file_or_dir or current dir " - "if not specified; fixtures with leading '_' are only shown " - "with the '-v' option") + tw.line( + "(shown according to specified file_or_dir or current dir " + "if not specified; fixtures with leading '_' are only shown " + "with the '-v' option" + ) - for warningreport in reporter.stats.get('warnings', []): + for warningreport in reporter.stats.get("warnings", []): tw.line("warning : " + warningreport.message, red=True) return -conftest_options = [ - ('pytest_plugins', 'list of plugin names to load'), -] +conftest_options = [("pytest_plugins", "list of plugin names to load")] def getpluginversioninfo(config): @@ -157,7 +185,7 @@ def getpluginversioninfo(config): if plugininfo: lines.append("setuptools registered plugins:") for plugin, dist in plugininfo: - loc = getattr(plugin, '__file__', repr(plugin)) + loc = getattr(plugin, "__file__", repr(plugin)) content = "%s-%s at %s" % (dist.project_name, dist.version, loc) lines.append(" " + content) return lines @@ -166,8 +194,7 @@ def getpluginversioninfo(config): def pytest_report_header(config): lines = [] if config.option.debug or config.option.traceconfig: - lines.append("using: pytest-%s pylib-%s" % - (pytest.__version__, py.__version__)) + lines.append("using: pytest-%s pylib-%s" % (pytest.__version__, py.__version__)) verinfo = getpluginversioninfo(config) if verinfo: @@ -177,7 +204,7 @@ def pytest_report_header(config): lines.append("active plugins:") items = config.pluginmanager.list_name_plugin() for name, plugin in items: - if hasattr(plugin, '__file__'): + if hasattr(plugin, "__file__"): r = plugin.__file__ else: r = repr(plugin) diff --git a/_pytest/hookspec.py b/_pytest/hookspec.py index f5bdfabc5..fec43a400 100644 --- a/_pytest/hookspec.py +++ b/_pytest/hookspec.py @@ -98,6 +98,7 @@ def pytest_configure(config): :arg _pytest.config.Config config: pytest config object """ + # ------------------------------------------------------------------------- # Bootstrapping hooks called for plugins registered early enough: # internal and 3rd party plugins. @@ -163,6 +164,7 @@ def pytest_load_initial_conftests(early_config, parser, args): # collection hooks # ------------------------------------------------------------------------- + @hookspec(firstresult=True) def pytest_collection(session): """Perform the collection protocol for the given session. @@ -220,6 +222,7 @@ def pytest_collect_file(path, parent): :param str path: the path to collect """ + # logging hooks for collection @@ -245,6 +248,7 @@ def pytest_make_collect_report(collector): Stops at first non-None result, see :ref:`firstresult` """ + # ------------------------------------------------------------------------- # Python test function related hooks # ------------------------------------------------------------------------- @@ -291,6 +295,7 @@ def pytest_make_parametrize_id(config, val, argname): :param str argname: the automatic parameter name produced by pytest """ + # ------------------------------------------------------------------------- # generic runtest related hooks # ------------------------------------------------------------------------- @@ -382,6 +387,7 @@ def pytest_runtest_logreport(report): """ process a test setup/call/teardown report relating to the respective phase of executing a test. """ + # ------------------------------------------------------------------------- # Fixture related hooks # ------------------------------------------------------------------------- @@ -407,6 +413,7 @@ def pytest_fixture_post_finalizer(fixturedef, request): the fixture result cache ``fixturedef.cached_result`` can still be accessed.""" + # ------------------------------------------------------------------------- # test session related hooks # ------------------------------------------------------------------------- @@ -439,6 +446,7 @@ def pytest_unconfigure(config): # hooks for customizing the assert methods # ------------------------------------------------------------------------- + def pytest_assertrepr_compare(config, op, left, right): """return explanation for comparisons in failing assert expressions. @@ -450,6 +458,7 @@ def pytest_assertrepr_compare(config, op, left, right): :param _pytest.config.Config config: pytest config object """ + # ------------------------------------------------------------------------- # hooks for influencing reporting (invoked from _pytest_terminal) # ------------------------------------------------------------------------- @@ -511,6 +520,7 @@ def pytest_logwarning(message, code, nodeid, fslocation): This hook is incompatible with ``hookwrapper=True``. """ + # ------------------------------------------------------------------------- # doctest hooks # ------------------------------------------------------------------------- @@ -522,6 +532,7 @@ def pytest_doctest_prepare_content(content): Stops at first non-None result, see :ref:`firstresult` """ + # ------------------------------------------------------------------------- # error handling and internal debugging hooks # ------------------------------------------------------------------------- diff --git a/_pytest/junitxml.py b/_pytest/junitxml.py index a4603529b..29da27de7 100644 --- a/_pytest/junitxml.py +++ b/_pytest/junitxml.py @@ -39,15 +39,14 @@ class Junit(py.xml.Namespace): # chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] # | [#x10000-#x10FFFF] _legal_chars = (0x09, 0x0A, 0x0d) -_legal_ranges = ( - (0x20, 0x7E), (0x80, 0xD7FF), (0xE000, 0xFFFD), (0x10000, 0x10FFFF), -) +_legal_ranges = ((0x20, 0x7E), (0x80, 0xD7FF), (0xE000, 0xFFFD), (0x10000, 0x10FFFF)) _legal_xml_re = [ unicode("%s-%s") % (unichr(low), unichr(high)) - for (low, high) in _legal_ranges if low < sys.maxunicode + for (low, high) in _legal_ranges + if low < sys.maxunicode ] _legal_xml_re = [unichr(x) for x in _legal_chars] + _legal_xml_re -illegal_xml_re = re.compile(unicode('[^%s]') % unicode('').join(_legal_xml_re)) +illegal_xml_re = re.compile(unicode("[^%s]") % unicode("").join(_legal_xml_re)) del _legal_chars del _legal_ranges del _legal_xml_re @@ -56,17 +55,19 @@ _py_ext_re = re.compile(r"\.py$") def bin_xml_escape(arg): + def repl(matchobj): i = ord(matchobj.group()) if i <= 0xFF: - return unicode('#x%02X') % i + return unicode("#x%02X") % i else: - return unicode('#x%04X') % i + return unicode("#x%04X") % i return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg))) class _NodeReporter(object): + def __init__(self, nodeid, xml): self.id = nodeid @@ -92,11 +93,13 @@ class _NodeReporter(object): """Return a Junit node containing custom properties, if any. """ if self.properties: - return Junit.properties([ - Junit.property(name=name, value=value) - for name, value in self.properties - ]) - return '' + return Junit.properties( + [ + Junit.property(name=name, value=value) + for name, value in self.properties + ] + ) + return "" def record_testreport(self, testreport): assert not self.testcase @@ -135,53 +138,57 @@ class _NodeReporter(object): content_err = report.capstderr if content_log or content_out: - if content_log and self.xml.logging == 'system-out': + if content_log and self.xml.logging == "system-out": if content_out: # syncing stdout and the log-output is not done yet. It's # probably not worth the effort. Therefore, first the captured # stdout is shown and then the captured logs. - content = '\n'.join([ - ' Captured Stdout '.center(80, '-'), - content_out, - '', - ' Captured Log '.center(80, '-'), - content_log]) + content = "\n".join( + [ + " Captured Stdout ".center(80, "-"), + content_out, + "", + " Captured Log ".center(80, "-"), + content_log, + ] + ) else: content = content_log else: content = content_out if content: - tag = getattr(Junit, 'system-out') + tag = getattr(Junit, "system-out") self.append(tag(bin_xml_escape(content))) if content_log or content_err: - if content_log and self.xml.logging == 'system-err': + if content_log and self.xml.logging == "system-err": if content_err: - content = '\n'.join([ - ' Captured Stderr '.center(80, '-'), - content_err, - '', - ' Captured Log '.center(80, '-'), - content_log]) + content = "\n".join( + [ + " Captured Stderr ".center(80, "-"), + content_err, + "", + " Captured Log ".center(80, "-"), + content_log, + ] + ) else: content = content_log else: content = content_err if content: - tag = getattr(Junit, 'system-err') + tag = getattr(Junit, "system-err") self.append(tag(bin_xml_escape(content))) def append_pass(self, report): - self.add_stats('passed') + self.add_stats("passed") def append_failure(self, report): # msg = str(report.longrepr.reprtraceback.extraline) if hasattr(report, "wasxfail"): - self._add_simple( - Junit.skipped, - "xfail-marked test passes unexpectedly") + self._add_simple(Junit.skipped, "xfail-marked test passes unexpectedly") else: if hasattr(report.longrepr, "reprcrash"): message = report.longrepr.reprcrash.message @@ -196,34 +203,34 @@ class _NodeReporter(object): def append_collect_error(self, report): # msg = str(report.longrepr.reprtraceback.extraline) - self.append(Junit.error(bin_xml_escape(report.longrepr), - message="collection failure")) + self.append( + Junit.error(bin_xml_escape(report.longrepr), message="collection failure") + ) def append_collect_skipped(self, report): - self._add_simple( - Junit.skipped, "collection skipped", report.longrepr) + self._add_simple(Junit.skipped, "collection skipped", report.longrepr) def append_error(self, report): - if getattr(report, 'when', None) == 'teardown': + if getattr(report, "when", None) == "teardown": msg = "test teardown failure" else: msg = "test setup failure" - self._add_simple( - Junit.error, msg, report.longrepr) + self._add_simple(Junit.error, msg, report.longrepr) def append_skipped(self, report): if hasattr(report, "wasxfail"): - self._add_simple( - Junit.skipped, "expected test failure", report.wasxfail - ) + self._add_simple(Junit.skipped, "expected test failure", report.wasxfail) else: filename, lineno, skipreason = report.longrepr if skipreason.startswith("Skipped: "): skipreason = bin_xml_escape(skipreason[9:]) self.append( - Junit.skipped("%s:%s: %s" % (filename, lineno, skipreason), - type="pytest.skip", - message=skipreason)) + Junit.skipped( + "%s:%s: %s" % (filename, lineno, skipreason), + type="pytest.skip", + message=skipreason, + ) + ) self.write_captured_output(report) def finalize(self): @@ -245,8 +252,10 @@ def record_property(request): def test_function(record_property): record_property("example_key", 1) """ + def append_property(name, value): request.node.user_properties.append((name, value)) + return append_property @@ -255,11 +264,8 @@ def record_xml_property(record_property): """(Deprecated) use record_property.""" import warnings from _pytest import deprecated - warnings.warn( - deprecated.RECORD_XML_PROPERTY, - DeprecationWarning, - stacklevel=2 - ) + + warnings.warn(deprecated.RECORD_XML_PROPERTY, DeprecationWarning, stacklevel=2) return record_property @@ -271,14 +277,14 @@ def record_xml_attribute(request): automatically xml-encoded """ request.node.warn( - code='C3', - message='record_xml_attribute is an experimental feature', + code="C3", message="record_xml_attribute is an experimental feature" ) xml = getattr(request.config, "_xml", None) if xml is not None: node_reporter = xml.node_reporter(request.node.nodeid) return node_reporter.add_attribute else: + def add_attr_noop(name, value): pass @@ -288,51 +294,63 @@ def record_xml_attribute(request): def pytest_addoption(parser): group = parser.getgroup("terminal reporting") group.addoption( - '--junitxml', '--junit-xml', + "--junitxml", + "--junit-xml", action="store", dest="xmlpath", metavar="path", type=functools.partial(filename_arg, optname="--junitxml"), default=None, - help="create junit-xml style report file at given path.") + help="create junit-xml style report file at given path.", + ) group.addoption( - '--junitprefix', '--junit-prefix', + "--junitprefix", + "--junit-prefix", action="store", metavar="str", default=None, - help="prepend prefix to classnames in junit-xml output") - parser.addini("junit_suite_name", "Test suite name for JUnit report", default="pytest") - parser.addini("junit_logging", "Write captured log messages to JUnit report: " - "one of no|system-out|system-err", - default="no") # choices=['no', 'stdout', 'stderr']) + help="prepend prefix to classnames in junit-xml output", + ) + parser.addini( + "junit_suite_name", "Test suite name for JUnit report", default="pytest" + ) + parser.addini( + "junit_logging", + "Write captured log messages to JUnit report: " + "one of no|system-out|system-err", + default="no", + ) # choices=['no', 'stdout', 'stderr']) def pytest_configure(config): xmlpath = config.option.xmlpath # prevent opening xmllog on slave nodes (xdist) - if xmlpath and not hasattr(config, 'slaveinput'): - config._xml = LogXML(xmlpath, config.option.junitprefix, - config.getini("junit_suite_name"), - config.getini("junit_logging")) + if xmlpath and not hasattr(config, "slaveinput"): + config._xml = LogXML( + xmlpath, + config.option.junitprefix, + config.getini("junit_suite_name"), + config.getini("junit_logging"), + ) config.pluginmanager.register(config._xml) def pytest_unconfigure(config): - xml = getattr(config, '_xml', None) + xml = getattr(config, "_xml", None) if xml: del config._xml config.pluginmanager.unregister(xml) def mangle_test_address(address): - path, possible_open_bracket, params = address.partition('[') + path, possible_open_bracket, params = address.partition("[") names = path.split("::") try: - names.remove('()') + names.remove("()") except ValueError: pass # convert file path to dotted path - names[0] = names[0].replace(nodes.SEP, '.') + names[0] = names[0].replace(nodes.SEP, ".") names[0] = _py_ext_re.sub("", names[0]) # put any params back names[-1] += possible_open_bracket + params @@ -340,18 +358,14 @@ def mangle_test_address(address): class LogXML(object): + def __init__(self, logfile, prefix, suite_name="pytest", logging="no"): logfile = os.path.expanduser(os.path.expandvars(logfile)) self.logfile = os.path.normpath(os.path.abspath(logfile)) self.prefix = prefix self.suite_name = suite_name self.logging = logging - self.stats = dict.fromkeys([ - 'error', - 'passed', - 'failure', - 'skipped', - ], 0) + self.stats = dict.fromkeys(["error", "passed", "failure", "skipped"], 0) self.node_reporters = {} # nodeid -> _NodeReporter self.node_reporters_ordered = [] self.global_properties = [] @@ -360,17 +374,17 @@ class LogXML(object): self.cnt_double_fail_tests = 0 def finalize(self, report): - nodeid = getattr(report, 'nodeid', report) + nodeid = getattr(report, "nodeid", report) # local hack to handle xdist report order - slavenode = getattr(report, 'node', None) + slavenode = getattr(report, "node", None) reporter = self.node_reporters.pop((nodeid, slavenode)) if reporter is not None: reporter.finalize() def node_reporter(self, report): - nodeid = getattr(report, 'nodeid', report) + nodeid = getattr(report, "nodeid", report) # local hack to handle xdist report order - slavenode = getattr(report, 'node', None) + slavenode = getattr(report, "node", None) key = nodeid, slavenode @@ -428,12 +442,17 @@ class LogXML(object): report_wid = getattr(report, "worker_id", None) report_ii = getattr(report, "item_index", None) close_report = next( - (rep for rep in self.open_reports - if (rep.nodeid == report.nodeid and - getattr(rep, "item_index", None) == report_ii and - getattr(rep, "worker_id", None) == report_wid - ) - ), None) + ( + rep + for rep in self.open_reports + if ( + rep.nodeid == report.nodeid + and getattr(rep, "item_index", None) == report_ii + and getattr(rep, "worker_id", None) == report_wid + ) + ), + None, + ) if close_report: # We need to open new testcase in case we have failure in # call and error in teardown in order to follow junit @@ -461,12 +480,17 @@ class LogXML(object): report_wid = getattr(report, "worker_id", None) report_ii = getattr(report, "item_index", None) close_report = next( - (rep for rep in self.open_reports - if (rep.nodeid == report.nodeid and - getattr(rep, "item_index", None) == report_ii and - getattr(rep, "worker_id", None) == report_wid - ) - ), None) + ( + rep + for rep in self.open_reports + if ( + rep.nodeid == report.nodeid + and getattr(rep, "item_index", None) == report_ii + and getattr(rep, "worker_id", None) == report_wid + ) + ), + None, + ) if close_report: self.open_reports.remove(close_report) @@ -475,7 +499,7 @@ class LogXML(object): the Junit.testcase with the new total if already created. """ reporter = self.node_reporter(report) - reporter.duration += getattr(report, 'duration', 0.0) + reporter.duration += getattr(report, "duration", 0.0) def pytest_collectreport(self, report): if not report.passed: @@ -486,9 +510,9 @@ class LogXML(object): reporter.append_collect_skipped(report) def pytest_internalerror(self, excrepr): - reporter = self.node_reporter('internal') - reporter.attrs.update(classname="pytest", name='internal') - reporter._add_simple(Junit.error, 'internal error', excrepr) + reporter = self.node_reporter("internal") + reporter.attrs.update(classname="pytest", name="internal") + reporter._add_simple(Junit.error, "internal error", excrepr) def pytest_sessionstart(self): self.suite_start_time = time.time() @@ -497,29 +521,37 @@ class LogXML(object): dirname = os.path.dirname(os.path.abspath(self.logfile)) if not os.path.isdir(dirname): os.makedirs(dirname) - logfile = open(self.logfile, 'w', encoding='utf-8') + logfile = open(self.logfile, "w", encoding="utf-8") suite_stop_time = time.time() suite_time_delta = suite_stop_time - self.suite_start_time - numtests = (self.stats['passed'] + self.stats['failure'] + - self.stats['skipped'] + self.stats['error'] - - self.cnt_double_fail_tests) + numtests = ( + self.stats["passed"] + + self.stats["failure"] + + self.stats["skipped"] + + self.stats["error"] + - self.cnt_double_fail_tests + ) logfile.write('') - logfile.write(Junit.testsuite( - self._get_global_properties_node(), - [x.to_xml() for x in self.node_reporters_ordered], - name=self.suite_name, - errors=self.stats['error'], - failures=self.stats['failure'], - skips=self.stats['skipped'], - tests=numtests, - time="%.3f" % suite_time_delta, ).unicode(indent=0)) + logfile.write( + Junit.testsuite( + self._get_global_properties_node(), + [x.to_xml() for x in self.node_reporters_ordered], + name=self.suite_name, + errors=self.stats["error"], + failures=self.stats["failure"], + skips=self.stats["skipped"], + tests=numtests, + time="%.3f" % suite_time_delta, + ).unicode( + indent=0 + ) + ) logfile.close() def pytest_terminal_summary(self, terminalreporter): - terminalreporter.write_sep("-", - "generated xml file: %s" % (self.logfile)) + terminalreporter.write_sep("-", "generated xml file: %s" % (self.logfile)) def add_global_property(self, name, value): self.global_properties.append((str(name), bin_xml_escape(value))) @@ -534,4 +566,4 @@ class LogXML(object): for name, value in self.global_properties ] ) - return '' + return "" diff --git a/_pytest/logging.py b/_pytest/logging.py index 66ed6900b..00bb9aeb5 100644 --- a/_pytest/logging.py +++ b/_pytest/logging.py @@ -11,8 +11,8 @@ import pytest import py -DEFAULT_LOG_FORMAT = '%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s' -DEFAULT_LOG_DATE_FORMAT = '%H:%M:%S' +DEFAULT_LOG_FORMAT = "%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s" +DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S" class ColoredLevelFormatter(logging.Formatter): @@ -21,19 +21,18 @@ class ColoredLevelFormatter(logging.Formatter): """ LOGLEVEL_COLOROPTS = { - logging.CRITICAL: {'red'}, - logging.ERROR: {'red', 'bold'}, - logging.WARNING: {'yellow'}, - logging.WARN: {'yellow'}, - logging.INFO: {'green'}, - logging.DEBUG: {'purple'}, + logging.CRITICAL: {"red"}, + logging.ERROR: {"red", "bold"}, + logging.WARNING: {"yellow"}, + logging.WARN: {"yellow"}, + logging.INFO: {"green"}, + logging.DEBUG: {"purple"}, logging.NOTSET: set(), } - LEVELNAME_FMT_REGEX = re.compile(r'%\(levelname\)([+-]?\d*s)') + LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-]?\d*s)") def __init__(self, terminalwriter, *args, **kwargs): - super(ColoredLevelFormatter, self).__init__( - *args, **kwargs) + super(ColoredLevelFormatter, self).__init__(*args, **kwargs) if six.PY2: self._original_fmt = self._fmt else: @@ -47,19 +46,20 @@ class ColoredLevelFormatter(logging.Formatter): for level, color_opts in self.LOGLEVEL_COLOROPTS.items(): formatted_levelname = levelname_fmt % { - 'levelname': logging.getLevelName(level)} + "levelname": logging.getLevelName(level) + } # add ANSI escape sequences around the formatted levelname color_kwargs = {name: True for name in color_opts} colorized_formatted_levelname = terminalwriter.markup( - formatted_levelname, **color_kwargs) + formatted_levelname, **color_kwargs + ) self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub( - colorized_formatted_levelname, - self._fmt) + colorized_formatted_levelname, self._fmt + ) def format(self, record): - fmt = self._level_to_fmt_mapping.get( - record.levelno, self._original_fmt) + fmt = self._level_to_fmt_mapping.get(record.levelno, self._original_fmt) if six.PY2: self._fmt = fmt else: @@ -78,61 +78,86 @@ def get_option_ini(config, *names): def pytest_addoption(parser): """Add options to control log capturing.""" - group = parser.getgroup('logging') + group = parser.getgroup("logging") def add_option_ini(option, dest, default=None, type=None, **kwargs): - parser.addini(dest, default=default, type=type, - help='default value for ' + option) + parser.addini( + dest, default=default, type=type, help="default value for " + option + ) group.addoption(option, dest=dest, **kwargs) add_option_ini( - '--no-print-logs', - dest='log_print', action='store_const', const=False, default=True, - type='bool', - help='disable printing caught logs on failed tests.') + "--no-print-logs", + dest="log_print", + action="store_const", + const=False, + default=True, + type="bool", + help="disable printing caught logs on failed tests.", + ) add_option_ini( - '--log-level', - dest='log_level', default=None, - help='logging level used by the logging module') + "--log-level", + dest="log_level", + default=None, + help="logging level used by the logging module", + ) add_option_ini( - '--log-format', - dest='log_format', default=DEFAULT_LOG_FORMAT, - help='log format as used by the logging module.') + "--log-format", + dest="log_format", + default=DEFAULT_LOG_FORMAT, + help="log format as used by the logging module.", + ) add_option_ini( - '--log-date-format', - dest='log_date_format', default=DEFAULT_LOG_DATE_FORMAT, - help='log date format as used by the logging module.') + "--log-date-format", + dest="log_date_format", + default=DEFAULT_LOG_DATE_FORMAT, + help="log date format as used by the logging module.", + ) parser.addini( - 'log_cli', default=False, type='bool', - help='enable log display during test run (also known as "live logging").') + "log_cli", + default=False, + type="bool", + help='enable log display during test run (also known as "live logging").', + ) add_option_ini( - '--log-cli-level', - dest='log_cli_level', default=None, - help='cli logging level.') + "--log-cli-level", dest="log_cli_level", default=None, help="cli logging level." + ) add_option_ini( - '--log-cli-format', - dest='log_cli_format', default=None, - help='log format as used by the logging module.') + "--log-cli-format", + dest="log_cli_format", + default=None, + help="log format as used by the logging module.", + ) add_option_ini( - '--log-cli-date-format', - dest='log_cli_date_format', default=None, - help='log date format as used by the logging module.') + "--log-cli-date-format", + dest="log_cli_date_format", + default=None, + help="log date format as used by the logging module.", + ) add_option_ini( - '--log-file', - dest='log_file', default=None, - help='path to a file when logging will be written to.') + "--log-file", + dest="log_file", + default=None, + help="path to a file when logging will be written to.", + ) add_option_ini( - '--log-file-level', - dest='log_file_level', default=None, - help='log file logging level.') + "--log-file-level", + dest="log_file_level", + default=None, + help="log file logging level.", + ) add_option_ini( - '--log-file-format', - dest='log_file_format', default=DEFAULT_LOG_FORMAT, - help='log format as used by the logging module.') + "--log-file-format", + dest="log_file_format", + default=DEFAULT_LOG_FORMAT, + help="log format as used by the logging module.", + ) add_option_ini( - '--log-file-date-format', - dest='log_file_date_format', default=DEFAULT_LOG_DATE_FORMAT, - help='log date format as used by the logging module.') + "--log-file-date-format", + dest="log_file_date_format", + default=DEFAULT_LOG_DATE_FORMAT, + help="log date format as used by the logging module.", + ) @contextmanager @@ -318,15 +343,14 @@ def get_actual_log_level(config, *setting_names): except ValueError: # Python logging does not recognise this as a logging level raise pytest.UsageError( - "'{0}' is not recognized as a logging level name for " - "'{1}'. Please consider passing the " - "logging level num instead.".format( - log_level, - setting_name)) + "'{}' is not recognized as a logging level name for " + "'{}'. Please consider passing the " + "logging level num instead.".format(log_level, setting_name) + ) def pytest_configure(config): - config.pluginmanager.register(LoggingPlugin(config), 'logging-plugin') + config.pluginmanager.register(LoggingPlugin(config), "logging-plugin") @contextmanager @@ -347,25 +371,31 @@ class LoggingPlugin(object): self._config = config # enable verbose output automatically if live logging is enabled - if self._log_cli_enabled() and not config.getoption('verbose'): + if self._log_cli_enabled() and not config.getoption("verbose"): # sanity check: terminal reporter should not have been loaded at this point - assert self._config.pluginmanager.get_plugin('terminalreporter') is None + assert self._config.pluginmanager.get_plugin("terminalreporter") is None config.option.verbose = 1 - self.print_logs = get_option_ini(config, 'log_print') - self.formatter = logging.Formatter(get_option_ini(config, 'log_format'), - get_option_ini(config, 'log_date_format')) - self.log_level = get_actual_log_level(config, 'log_level') + self.print_logs = get_option_ini(config, "log_print") + self.formatter = logging.Formatter( + get_option_ini(config, "log_format"), + get_option_ini(config, "log_date_format"), + ) + self.log_level = get_actual_log_level(config, "log_level") - log_file = get_option_ini(config, 'log_file') + log_file = get_option_ini(config, "log_file") if log_file: - self.log_file_level = get_actual_log_level(config, 'log_file_level') + self.log_file_level = get_actual_log_level(config, "log_file_level") - log_file_format = get_option_ini(config, 'log_file_format', 'log_format') - log_file_date_format = get_option_ini(config, 'log_file_date_format', 'log_date_format') + log_file_format = get_option_ini(config, "log_file_format", "log_format") + log_file_date_format = get_option_ini( + config, "log_file_date_format", "log_date_format" + ) # Each pytest runtests session will write to a clean logfile - self.log_file_handler = logging.FileHandler(log_file, mode='w') - log_file_formatter = logging.Formatter(log_file_format, datefmt=log_file_date_format) + self.log_file_handler = logging.FileHandler(log_file, mode="w") + log_file_formatter = logging.Formatter( + log_file_format, datefmt=log_file_date_format + ) self.log_file_handler.setFormatter(log_file_formatter) else: self.log_file_handler = None @@ -377,14 +407,18 @@ class LoggingPlugin(object): """Return True if log_cli should be considered enabled, either explicitly or because --log-cli-level was given in the command-line. """ - return self._config.getoption('--log-cli-level') is not None or \ - self._config.getini('log_cli') + return self._config.getoption( + "--log-cli-level" + ) is not None or self._config.getini( + "log_cli" + ) @contextmanager def _runtest_for(self, item, when): """Implements the internals of pytest_runtest_xxx() hook.""" - with catching_logs(LogCaptureHandler(), - formatter=self.formatter, level=self.log_level) as log_handler: + with catching_logs( + LogCaptureHandler(), formatter=self.formatter, level=self.log_level + ) as log_handler: if self.log_cli_handler: self.log_cli_handler.set_when(when) @@ -392,7 +426,7 @@ class LoggingPlugin(object): yield # run the test return - if not hasattr(item, 'catch_log_handlers'): + if not hasattr(item, "catch_log_handlers"): item.catch_log_handlers = {} item.catch_log_handlers[when] = log_handler item.catch_log_handler = log_handler @@ -400,39 +434,39 @@ class LoggingPlugin(object): yield # run test finally: del item.catch_log_handler - if when == 'teardown': + if when == "teardown": del item.catch_log_handlers if self.print_logs: # Add a captured log section to the report. log = log_handler.stream.getvalue().strip() - item.add_report_section(when, 'log', log) + item.add_report_section(when, "log", log) @pytest.hookimpl(hookwrapper=True) def pytest_runtest_setup(self, item): - with self._runtest_for(item, 'setup'): + with self._runtest_for(item, "setup"): yield @pytest.hookimpl(hookwrapper=True) def pytest_runtest_call(self, item): - with self._runtest_for(item, 'call'): + with self._runtest_for(item, "call"): yield @pytest.hookimpl(hookwrapper=True) def pytest_runtest_teardown(self, item): - with self._runtest_for(item, 'teardown'): + with self._runtest_for(item, "teardown"): yield @pytest.hookimpl(hookwrapper=True) def pytest_runtest_logstart(self): if self.log_cli_handler: self.log_cli_handler.reset() - with self._runtest_for(None, 'start'): + with self._runtest_for(None, "start"): yield @pytest.hookimpl(hookwrapper=True) def pytest_runtest_logfinish(self): - with self._runtest_for(None, 'finish'): + with self._runtest_for(None, "finish"): yield @pytest.hookimpl(hookwrapper=True) @@ -442,8 +476,9 @@ class LoggingPlugin(object): with self.live_logs_context: if self.log_file_handler is not None: with closing(self.log_file_handler): - with catching_logs(self.log_file_handler, - level=self.log_file_level): + with catching_logs( + self.log_file_handler, level=self.log_file_level + ): yield # run all the tests else: yield # run all the tests @@ -453,20 +488,38 @@ class LoggingPlugin(object): This must be done right before starting the loop so we can access the terminal reporter plugin. """ - terminal_reporter = self._config.pluginmanager.get_plugin('terminalreporter') + terminal_reporter = self._config.pluginmanager.get_plugin("terminalreporter") if self._log_cli_enabled() and terminal_reporter is not None: - capture_manager = self._config.pluginmanager.get_plugin('capturemanager') - log_cli_handler = _LiveLoggingStreamHandler(terminal_reporter, capture_manager) - log_cli_format = get_option_ini(self._config, 'log_cli_format', 'log_format') - log_cli_date_format = get_option_ini(self._config, 'log_cli_date_format', 'log_date_format') - if self._config.option.color != 'no' and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(log_cli_format): - log_cli_formatter = ColoredLevelFormatter(create_terminal_writer(self._config), - log_cli_format, datefmt=log_cli_date_format) + capture_manager = self._config.pluginmanager.get_plugin("capturemanager") + log_cli_handler = _LiveLoggingStreamHandler( + terminal_reporter, capture_manager + ) + log_cli_format = get_option_ini( + self._config, "log_cli_format", "log_format" + ) + log_cli_date_format = get_option_ini( + self._config, "log_cli_date_format", "log_date_format" + ) + if ( + self._config.option.color != "no" + and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(log_cli_format) + ): + log_cli_formatter = ColoredLevelFormatter( + create_terminal_writer(self._config), + log_cli_format, + datefmt=log_cli_date_format, + ) else: - log_cli_formatter = logging.Formatter(log_cli_format, datefmt=log_cli_date_format) - log_cli_level = get_actual_log_level(self._config, 'log_cli_level', 'log_level') + log_cli_formatter = logging.Formatter( + log_cli_format, datefmt=log_cli_date_format + ) + log_cli_level = get_actual_log_level( + self._config, "log_cli_level", "log_level" + ) self.log_cli_handler = log_cli_handler - self.live_logs_context = catching_logs(log_cli_handler, formatter=log_cli_formatter, level=log_cli_level) + self.live_logs_context = catching_logs( + log_cli_handler, formatter=log_cli_formatter, level=log_cli_level + ) else: self.live_logs_context = _dummy_context_manager() @@ -499,7 +552,7 @@ class _LiveLoggingStreamHandler(logging.StreamHandler): """Prepares for the given test phase (setup/call/teardown)""" self._when = when self._section_name_shown = False - if when == 'start': + if when == "start": self._test_outcome_written = False def emit(self, record): @@ -507,14 +560,14 @@ class _LiveLoggingStreamHandler(logging.StreamHandler): self.capture_manager.suspend_global_capture() try: if not self._first_record_emitted: - self.stream.write('\n') + self.stream.write("\n") self._first_record_emitted = True - elif self._when in ('teardown', 'finish'): + elif self._when in ("teardown", "finish"): if not self._test_outcome_written: self._test_outcome_written = True - self.stream.write('\n') + self.stream.write("\n") if not self._section_name_shown and self._when: - self.stream.section('live log ' + self._when, sep='-', bold=True) + self.stream.section("live log " + self._when, sep="-", bold=True) self._section_name_shown = True logging.StreamHandler.emit(self, record) finally: diff --git a/_pytest/main.py b/_pytest/main.py index 6c4bd65bb..23562358d 100644 --- a/_pytest/main.py +++ b/_pytest/main.py @@ -28,69 +28,140 @@ EXIT_NOTESTSCOLLECTED = 5 def pytest_addoption(parser): - parser.addini("norecursedirs", "directory patterns to avoid for recursion", - type="args", default=['.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg', 'venv']) - parser.addini("testpaths", "directories to search for tests when no files or directories are given in the " - "command line.", - type="args", default=[]) + parser.addini( + "norecursedirs", + "directory patterns to avoid for recursion", + type="args", + default=[".*", "build", "dist", "CVS", "_darcs", "{arch}", "*.egg", "venv"], + ) + parser.addini( + "testpaths", + "directories to search for tests when no files or directories are given in the " + "command line.", + type="args", + default=[], + ) # parser.addini("dirpatterns", # "patterns specifying possible locations of test files", # type="linelist", default=["**/test_*.txt", # "**/test_*.py", "**/*_test.py"] # ) group = parser.getgroup("general", "running and selection options") - group._addoption('-x', '--exitfirst', action="store_const", - dest="maxfail", const=1, - help="exit instantly on first error or failed test."), - group._addoption('--maxfail', metavar="num", - action="store", type=int, dest="maxfail", default=0, - help="exit after first num failures or errors.") - group._addoption('--strict', action="store_true", - help="marks not registered in configuration file raise errors.") - group._addoption("-c", metavar="file", type=str, dest="inifilename", - help="load configuration from `file` instead of trying to locate one of the implicit " - "configuration files.") - group._addoption("--continue-on-collection-errors", action="store_true", - default=False, dest="continue_on_collection_errors", - help="Force test execution even if collection errors occur.") - group._addoption("--rootdir", action="store", - dest="rootdir", - help="Define root directory for tests. Can be relative path: 'root_dir', './root_dir', " - "'root_dir/another_dir/'; absolute path: '/home/user/root_dir'; path with variables: " - "'$HOME/root_dir'.") + group._addoption( + "-x", + "--exitfirst", + action="store_const", + dest="maxfail", + const=1, + help="exit instantly on first error or failed test.", + ), + group._addoption( + "--maxfail", + metavar="num", + action="store", + type=int, + dest="maxfail", + default=0, + help="exit after first num failures or errors.", + ) + group._addoption( + "--strict", + action="store_true", + help="marks not registered in configuration file raise errors.", + ) + group._addoption( + "-c", + metavar="file", + type=str, + dest="inifilename", + help="load configuration from `file` instead of trying to locate one of the implicit " + "configuration files.", + ) + group._addoption( + "--continue-on-collection-errors", + action="store_true", + default=False, + dest="continue_on_collection_errors", + help="Force test execution even if collection errors occur.", + ) + group._addoption( + "--rootdir", + action="store", + dest="rootdir", + help="Define root directory for tests. Can be relative path: 'root_dir', './root_dir', " + "'root_dir/another_dir/'; absolute path: '/home/user/root_dir'; path with variables: " + "'$HOME/root_dir'.", + ) group = parser.getgroup("collect", "collection") - group.addoption('--collectonly', '--collect-only', action="store_true", - help="only collect tests, don't execute them."), - group.addoption('--pyargs', action="store_true", - help="try to interpret all arguments as python packages.") - group.addoption("--ignore", action="append", metavar="path", - help="ignore path during collection (multi-allowed).") - group.addoption("--deselect", action="append", metavar="nodeid_prefix", - help="deselect item during collection (multi-allowed).") + group.addoption( + "--collectonly", + "--collect-only", + action="store_true", + help="only collect tests, don't execute them.", + ), + group.addoption( + "--pyargs", + action="store_true", + help="try to interpret all arguments as python packages.", + ) + group.addoption( + "--ignore", + action="append", + metavar="path", + help="ignore path during collection (multi-allowed).", + ) + group.addoption( + "--deselect", + action="append", + metavar="nodeid_prefix", + help="deselect item during collection (multi-allowed).", + ) # when changing this to --conf-cut-dir, config.py Conftest.setinitial # needs upgrading as well - group.addoption('--confcutdir', dest="confcutdir", default=None, - metavar="dir", type=functools.partial(directory_arg, optname="--confcutdir"), - help="only load conftest.py's relative to specified dir.") - group.addoption('--noconftest', action="store_true", - dest="noconftest", default=False, - help="Don't load any conftest.py files.") - group.addoption('--keepduplicates', '--keep-duplicates', action="store_true", - dest="keepduplicates", default=False, - help="Keep duplicate tests.") - group.addoption('--collect-in-virtualenv', action='store_true', - dest='collect_in_virtualenv', default=False, - help="Don't ignore tests in a local virtualenv directory") + group.addoption( + "--confcutdir", + dest="confcutdir", + default=None, + metavar="dir", + type=functools.partial(directory_arg, optname="--confcutdir"), + help="only load conftest.py's relative to specified dir.", + ) + group.addoption( + "--noconftest", + action="store_true", + dest="noconftest", + default=False, + help="Don't load any conftest.py files.", + ) + group.addoption( + "--keepduplicates", + "--keep-duplicates", + action="store_true", + dest="keepduplicates", + default=False, + help="Keep duplicate tests.", + ) + group.addoption( + "--collect-in-virtualenv", + action="store_true", + dest="collect_in_virtualenv", + default=False, + help="Don't ignore tests in a local virtualenv directory", + ) - group = parser.getgroup("debugconfig", - "test session debugging and configuration") - group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir", - help="base temporary directory for this test run.") + group = parser.getgroup("debugconfig", "test session debugging and configuration") + group.addoption( + "--basetemp", + dest="basetemp", + default=None, + metavar="dir", + help="base temporary directory for this test run.", + ) def pytest_configure(config): - __import__('pytest').config = config # compatibility + __import__("pytest").config = config # compatibility def wrap_session(config, doit): @@ -112,8 +183,7 @@ def wrap_session(config, doit): except KeyboardInterrupt: excinfo = _pytest._code.ExceptionInfo() if initstate < 2 and isinstance(excinfo.value, exit.Exception): - sys.stderr.write('{0}: {1}\n'.format( - excinfo.typename, excinfo.value.msg)) + sys.stderr.write("{}: {}\n".format(excinfo.typename, excinfo.value.msg)) config.hook.pytest_keyboard_interrupt(excinfo=excinfo) session.exitstatus = EXIT_INTERRUPTED except: # noqa @@ -128,8 +198,8 @@ def wrap_session(config, doit): session.startdir.chdir() if initstate >= 2: config.hook.pytest_sessionfinish( - session=session, - exitstatus=session.exitstatus) + session=session, exitstatus=session.exitstatus + ) config._ensure_unconfigure() return session.exitstatus @@ -155,10 +225,8 @@ def pytest_collection(session): def pytest_runtestloop(session): - if (session.testsfailed and - not session.config.option.continue_on_collection_errors): - raise session.Interrupted( - "%d errors during collection" % session.testsfailed) + if session.testsfailed and not session.config.option.continue_on_collection_errors: + raise session.Interrupted("%d errors during collection" % session.testsfailed) if session.config.option.collectonly: return True @@ -176,11 +244,17 @@ def pytest_runtestloop(session): def _in_venv(path): """Attempts to detect if ``path`` is the root of a Virtual Environment by checking for the existence of the appropriate activate script""" - bindir = path.join('Scripts' if sys.platform.startswith('win') else 'bin') + bindir = path.join("Scripts" if sys.platform.startswith("win") else "bin") if not bindir.isdir(): return False - activates = ('activate', 'activate.csh', 'activate.fish', - 'Activate', 'Activate.bat', 'Activate.ps1') + activates = ( + "activate", + "activate.csh", + "activate.fish", + "Activate", + "Activate.bat", + "Activate.ps1", + ) return any([fname.basename in activates for fname in bindir.listdir()]) @@ -241,6 +315,7 @@ def _patched_find_module(): The only supported python<3.4 by pytest is python 2.7. """ if six.PY2: # python 3.4+ uses importlib instead + def find_module_patched(self, fullname, path=None): # Note: we ignore 'path' argument since it is only used via meta_path subname = fullname.split(".")[-1] @@ -252,8 +327,7 @@ def _patched_find_module(): # original: path = [os.path.realpath(self.path)] path = [self.path] try: - file, filename, etc = pkgutil.imp.find_module(subname, - path) + file, filename, etc = pkgutil.imp.find_module(subname, path) except ImportError: return None return pkgutil.ImpLoader(fullname, file, filename, etc) @@ -269,6 +343,7 @@ def _patched_find_module(): class FSHookProxy(object): + def __init__(self, fspath, pm, remove_mods): self.fspath = fspath self.pm = pm @@ -286,7 +361,7 @@ class NoMatch(Exception): class Interrupted(KeyboardInterrupt): """ signals an interrupted test run. """ - __module__ = 'builtins' # for py3 + __module__ = "builtins" # for py3 class Failed(Exception): @@ -299,8 +374,8 @@ class Session(nodes.FSCollector): def __init__(self, config): nodes.FSCollector.__init__( - self, config.rootdir, parent=None, - config=config, session=self, nodeid="") + self, config.rootdir, parent=None, config=config, session=self, nodeid="" + ) self.testsfailed = 0 self.testscollected = 0 self.shouldstop = False @@ -320,12 +395,12 @@ class Session(nodes.FSCollector): @hookimpl(tryfirst=True) def pytest_runtest_logreport(self, report): - if report.failed and not hasattr(report, 'wasxfail'): + if report.failed and not hasattr(report, "wasxfail"): self.testsfailed += 1 maxfail = self.config.getvalue("maxfail") if maxfail and self.testsfailed >= maxfail: - self.shouldfail = "stopping after %d failures" % ( - self.testsfailed) + self.shouldfail = "stopping after %d failures" % (self.testsfailed) + pytest_collectreport = pytest_runtest_logreport def isinitpath(self, path): @@ -350,8 +425,9 @@ class Session(nodes.FSCollector): try: items = self._perform_collect(args, genitems) self.config.pluginmanager.check_pending() - hook.pytest_collection_modifyitems(session=self, - config=self.config, items=items) + hook.pytest_collection_modifyitems( + session=self, config=self.config, items=items + ) finally: hook.pytest_collection_finish(session=self) self.testscollected = len(items) @@ -408,8 +484,9 @@ class Session(nodes.FSCollector): path = names.pop(0) if path.check(dir=1): assert not names, "invalid arg %r" % (arg,) - for path in path.visit(fil=lambda x: x.check(file=1), - rec=self._recurse, bf=True, sort=True): + for path in path.visit( + fil=lambda x: x.check(file=1), rec=self._recurse, bf=True, sort=True + ): for x in self._collectfile(path): yield x else: @@ -469,8 +546,8 @@ class Session(nodes.FSCollector): if not path.check(): if self.config.option.pyargs: raise UsageError( - "file or package not found: " + arg + - " (missing __init__.py?)") + "file or package not found: " + arg + " (missing __init__.py?)" + ) else: raise UsageError("file not found: " + arg) parts[0] = path diff --git a/_pytest/mark/__init__.py b/_pytest/mark/__init__.py index 7c96116d1..e3918ca6a 100644 --- a/_pytest/mark/__init__.py +++ b/_pytest/mark/__init__.py @@ -2,15 +2,25 @@ from __future__ import absolute_import, division, print_function from _pytest.config import UsageError from .structures import ( - ParameterSet, EMPTY_PARAMETERSET_OPTION, MARK_GEN, - Mark, MarkInfo, MarkDecorator, MarkGenerator, - transfer_markers, get_empty_parameterset_mark + ParameterSet, + EMPTY_PARAMETERSET_OPTION, + MARK_GEN, + Mark, + MarkInfo, + MarkDecorator, + MarkGenerator, + transfer_markers, + get_empty_parameterset_mark, ) from .legacy import matchkeyword, matchmark __all__ = [ - 'Mark', 'MarkInfo', 'MarkDecorator', 'MarkGenerator', - 'transfer_markers', 'get_empty_parameterset_mark' + "Mark", + "MarkInfo", + "MarkDecorator", + "MarkGenerator", + "transfer_markers", + "get_empty_parameterset_mark", ] @@ -42,47 +52,53 @@ def param(*values, **kw): def pytest_addoption(parser): group = parser.getgroup("general") group._addoption( - '-k', - action="store", dest="keyword", default='', metavar="EXPRESSION", + "-k", + action="store", + dest="keyword", + default="", + metavar="EXPRESSION", help="only run tests which match the given substring expression. " - "An expression is a python evaluatable expression " - "where all names are substring-matched against test names " - "and their parent classes. Example: -k 'test_method or test_" - "other' matches all test functions and classes whose name " - "contains 'test_method' or 'test_other', while -k 'not test_method' " - "matches those that don't contain 'test_method' in their names. " - "Additionally keywords are matched to classes and functions " - "containing extra names in their 'extra_keyword_matches' set, " - "as well as functions which have names assigned directly to them." + "An expression is a python evaluatable expression " + "where all names are substring-matched against test names " + "and their parent classes. Example: -k 'test_method or test_" + "other' matches all test functions and classes whose name " + "contains 'test_method' or 'test_other', while -k 'not test_method' " + "matches those that don't contain 'test_method' in their names. " + "Additionally keywords are matched to classes and functions " + "containing extra names in their 'extra_keyword_matches' set, " + "as well as functions which have names assigned directly to them.", ) group._addoption( "-m", - action="store", dest="markexpr", default="", metavar="MARKEXPR", + action="store", + dest="markexpr", + default="", + metavar="MARKEXPR", help="only run tests matching given mark expression. " - "example: -m 'mark1 and not mark2'." + "example: -m 'mark1 and not mark2'.", ) group.addoption( - "--markers", action="store_true", - help="show markers (builtin, plugin and per-project ones)." + "--markers", + action="store_true", + help="show markers (builtin, plugin and per-project ones).", ) - parser.addini("markers", "markers for test functions", 'linelist') - parser.addini( - EMPTY_PARAMETERSET_OPTION, - "default marker for empty parametersets") + parser.addini("markers", "markers for test functions", "linelist") + parser.addini(EMPTY_PARAMETERSET_OPTION, "default marker for empty parametersets") def pytest_cmdline_main(config): import _pytest.config + if config.option.markers: config._do_configure() tw = _pytest.config.create_terminal_writer(config) for line in config.getini("markers"): parts = line.split(":", 1) name = parts[0] - rest = parts[1] if len(parts) == 2 else '' + rest = parts[1] if len(parts) == 2 else "" tw.write("@pytest.mark.%s:" % name, bold=True) tw.line(rest) tw.line() @@ -147,11 +163,12 @@ def pytest_configure(config): empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION) - if empty_parameterset not in ('skip', 'xfail', None, ''): + if empty_parameterset not in ("skip", "xfail", None, ""): raise UsageError( "{!s} must be one of skip and xfail," - " but it is {!r}".format(EMPTY_PARAMETERSET_OPTION, empty_parameterset)) + " but it is {!r}".format(EMPTY_PARAMETERSET_OPTION, empty_parameterset) + ) def pytest_unconfigure(config): - MARK_GEN._config = getattr(config, '_old_mark_config', None) + MARK_GEN._config = getattr(config, "_old_mark_config", None) diff --git a/_pytest/mark/evaluate.py b/_pytest/mark/evaluate.py index 0afbc56e7..a3d11ee0f 100644 --- a/_pytest/mark/evaluate.py +++ b/_pytest/mark/evaluate.py @@ -8,18 +8,20 @@ from ..outcomes import fail, TEST_OUTCOME def cached_eval(config, expr, d): - if not hasattr(config, '_evalcache'): + if not hasattr(config, "_evalcache"): config._evalcache = {} try: return config._evalcache[expr] except KeyError: import _pytest._code + exprcode = _pytest._code.compile(expr, mode="eval") config._evalcache[expr] = x = eval(exprcode, d) return x class MarkEvaluator(object): + def __init__(self, item, name): self.item = item self._marks = None @@ -29,16 +31,17 @@ class MarkEvaluator(object): def __bool__(self): # dont cache here to prevent staleness return bool(self._get_marks()) + __nonzero__ = __bool__ def wasvalid(self): - return not hasattr(self, 'exc') + return not hasattr(self, "exc") def _get_marks(self): return list(self.item.iter_markers(name=self._mark_name)) def invalidraise(self, exc): - raises = self.get('raises') + raises = self.get("raises") if not raises: return return not isinstance(exc, raises) @@ -49,24 +52,25 @@ class MarkEvaluator(object): except TEST_OUTCOME: self.exc = sys.exc_info() if isinstance(self.exc[1], SyntaxError): - msg = [" " * (self.exc[1].offset + 4) + "^", ] + msg = [" " * (self.exc[1].offset + 4) + "^"] msg.append("SyntaxError: invalid syntax") else: msg = traceback.format_exception_only(*self.exc[:2]) - fail("Error evaluating %r expression\n" - " %s\n" - "%s" - % (self._mark_name, self.expr, "\n".join(msg)), - pytrace=False) + fail( + "Error evaluating %r expression\n" + " %s\n" + "%s" % (self._mark_name, self.expr, "\n".join(msg)), + pytrace=False, + ) def _getglobals(self): - d = {'os': os, 'sys': sys, 'platform': platform, 'config': self.item.config} - if hasattr(self.item, 'obj'): + d = {"os": os, "sys": sys, "platform": platform, "config": self.item.config} + if hasattr(self.item, "obj"): d.update(self.item.obj.__globals__) return d def _istrue(self): - if hasattr(self, 'result'): + if hasattr(self, "result"): return self.result self._marks = self._get_marks() @@ -74,8 +78,8 @@ class MarkEvaluator(object): self.result = False for mark in self._marks: self._mark = mark - if 'condition' in mark.kwargs: - args = (mark.kwargs['condition'],) + if "condition" in mark.kwargs: + args = (mark.kwargs["condition"],) else: args = mark.args @@ -87,19 +91,18 @@ class MarkEvaluator(object): else: if "reason" not in mark.kwargs: # XXX better be checked at collection time - msg = "you need to specify reason=STRING " \ - "when using booleans as conditions." + msg = "you need to specify reason=STRING " "when using booleans as conditions." fail(msg) result = bool(expr) if result: self.result = True - self.reason = mark.kwargs.get('reason', None) + self.reason = mark.kwargs.get("reason", None) self.expr = expr return self.result if not args: self.result = True - self.reason = mark.kwargs.get('reason', None) + self.reason = mark.kwargs.get("reason", None) return self.result return False @@ -109,9 +112,9 @@ class MarkEvaluator(object): return self._mark.kwargs.get(attr, default) def getexplanation(self): - expl = getattr(self, 'reason', None) or self.get('reason', None) + expl = getattr(self, "reason", None) or self.get("reason", None) if not expl: - if not hasattr(self, 'expr'): + if not hasattr(self, "expr"): return "" else: return "condition: " + str(self.expr) diff --git a/_pytest/mark/legacy.py b/_pytest/mark/legacy.py index 5c7b8d001..ab016a035 100644 --- a/_pytest/mark/legacy.py +++ b/_pytest/mark/legacy.py @@ -17,7 +17,7 @@ class MarkMapping(object): @classmethod def from_item(cls, item): - mark_names = set(mark.name for mark in item.iter_markers()) + mark_names = {mark.name for mark in item.iter_markers()} return cls(mark_names) def __getitem__(self, name): @@ -38,6 +38,7 @@ class KeywordMapping(object): # Add the names of the current item and any parent items import pytest + for item in item.listchain(): if not isinstance(item, pytest.Instance): mapped_names.add(item.name) @@ -47,7 +48,7 @@ class KeywordMapping(object): mapped_names.add(name) # Add the names attached to the current function through direct assignment - if hasattr(item, 'function'): + if hasattr(item, "function"): for name in item.function.__dict__: mapped_names.add(name) @@ -85,7 +86,11 @@ def matchkeyword(colitem, keywordexpr): return not mapping[keywordexpr[4:]] for kwd in keywordexpr.split(): if keyword.iskeyword(kwd) and kwd not in python_keywords_allowed_list: - raise UsageError("Python keyword '{}' not accepted in expressions passed to '-k'".format(kwd)) + raise UsageError( + "Python keyword '{}' not accepted in expressions passed to '-k'".format( + kwd + ) + ) try: return eval(keywordexpr, {}, mapping) except SyntaxError: diff --git a/_pytest/mark/structures.py b/_pytest/mark/structures.py index 72fd264b2..7e86aee44 100644 --- a/_pytest/mark/structures.py +++ b/_pytest/mark/structures.py @@ -20,32 +20,35 @@ def alias(name, warning=None): warnings.warn(warning, stacklevel=2) return getter(self) - return property(getter if warning is None else warned, doc='alias for ' + name) + return property(getter if warning is None else warned, doc="alias for " + name) def istestfunc(func): - return hasattr(func, "__call__") and \ - getattr(func, "__name__", "") != "" + return hasattr(func, "__call__") and getattr( + func, "__name__", "" + ) != "" def get_empty_parameterset_mark(config, argnames, func): requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION) - if requested_mark in ('', None, 'skip'): + if requested_mark in ("", None, "skip"): mark = MARK_GEN.skip - elif requested_mark == 'xfail': + elif requested_mark == "xfail": mark = MARK_GEN.xfail(run=False) else: raise LookupError(requested_mark) fs, lineno = getfslineno(func) reason = "got empty parameter set %r, function %s at %s:%d" % ( - argnames, func.__name__, fs, lineno) + argnames, func.__name__, fs, lineno + ) return mark(reason=reason) -class ParameterSet(namedtuple('ParameterSet', 'values, marks, id')): +class ParameterSet(namedtuple("ParameterSet", "values, marks, id")): + @classmethod def param(cls, *values, **kw): - marks = kw.pop('marks', ()) + marks = kw.pop("marks", ()) if isinstance(marks, MarkDecorator): marks = marks, else: @@ -78,8 +81,9 @@ class ParameterSet(namedtuple('ParameterSet', 'values, marks, id')): newmarks = [] argval = parameterset while isinstance(argval, MarkDecorator): - newmarks.append(MarkDecorator(Mark( - argval.markname, argval.args[:-1], argval.kwargs))) + newmarks.append( + MarkDecorator(Mark(argval.markname, argval.args[:-1], argval.kwargs)) + ) argval = argval.args[-1] assert not isinstance(argval, ParameterSet) if legacy_force_tuple: @@ -99,16 +103,15 @@ class ParameterSet(namedtuple('ParameterSet', 'values, marks, id')): force_tuple = False parameters = [ ParameterSet.extract_from(x, legacy_force_tuple=force_tuple) - for x in argvalues] + for x in argvalues + ] del argvalues if not parameters: mark = get_empty_parameterset_mark(config, argnames, func) - parameters.append(ParameterSet( - values=(NOTSET,) * len(argnames), - marks=[mark], - id=None, - )) + parameters.append( + ParameterSet(values=(NOTSET,) * len(argnames), marks=[mark], id=None) + ) return argnames, parameters @@ -131,8 +134,8 @@ class Mark(object): """ assert self.name == other.name return Mark( - self.name, self.args + other.args, - dict(self.kwargs, **other.kwargs)) + self.name, self.args + other.args, dict(self.kwargs, **other.kwargs) + ) @attr.s @@ -172,9 +175,9 @@ class MarkDecorator(object): mark = attr.ib(validator=attr.validators.instance_of(Mark)) - name = alias('mark.name') - args = alias('mark.args') - kwargs = alias('mark.kwargs') + name = alias("mark.name") + args = alias("mark.args") + kwargs = alias("mark.kwargs") @property def markname(self): @@ -217,14 +220,11 @@ def get_unpacked_marks(obj): """ obtain the unpacked marks that are stored on an object """ - mark_list = getattr(obj, 'pytestmark', []) + mark_list = getattr(obj, "pytestmark", []) if not isinstance(mark_list, list): mark_list = [mark_list] - return [ - getattr(mark, 'mark', mark) # unpack MarkDecorator - for mark in mark_list - ] + return [getattr(mark, "mark", mark) for mark in mark_list] # unpack MarkDecorator def store_mark(obj, mark): @@ -271,7 +271,7 @@ def _marked(func, mark): invoked more than once. """ try: - func_mark = getattr(func, getattr(mark, 'combined', mark).name) + func_mark = getattr(func, getattr(mark, "combined", mark).name) except AttributeError: return False return any(mark == info.combined for info in func_mark) @@ -284,19 +284,21 @@ class MarkInfo(object): _marks = attr.ib() combined = attr.ib( repr=False, - default=attr.Factory(lambda self: reduce(Mark.combined_with, self._marks), - takes_self=True)) + default=attr.Factory( + lambda self: reduce(Mark.combined_with, self._marks), takes_self=True + ), + ) - name = alias('combined.name', warning=MARK_INFO_ATTRIBUTE) - args = alias('combined.args', warning=MARK_INFO_ATTRIBUTE) - kwargs = alias('combined.kwargs', warning=MARK_INFO_ATTRIBUTE) + name = alias("combined.name", warning=MARK_INFO_ATTRIBUTE) + args = alias("combined.args", warning=MARK_INFO_ATTRIBUTE) + kwargs = alias("combined.kwargs", warning=MARK_INFO_ATTRIBUTE) @classmethod def for_mark(cls, mark): return cls([mark]) def __repr__(self): - return "".format(self.combined) + return "".format(self.combined) def add_mark(self, mark): """ add a MarkInfo with the given args and kwargs. """ @@ -348,6 +350,7 @@ MARK_GEN = MarkGenerator() class NodeKeywords(MappingMixin): + def __init__(self, node): self.node = node self.parent = node.parent @@ -381,7 +384,7 @@ class NodeKeywords(MappingMixin): return len(self._seen()) def __repr__(self): - return "" % (self.node, ) + return "" % (self.node,) @attr.s(cmp=False, hash=False) diff --git a/_pytest/monkeypatch.py b/_pytest/monkeypatch.py index 78db6064d..16080b5d5 100644 --- a/_pytest/monkeypatch.py +++ b/_pytest/monkeypatch.py @@ -38,12 +38,12 @@ def monkeypatch(): def resolve(name): # simplified from zope.dottedname - parts = name.split('.') + parts = name.split(".") used = parts.pop(0) found = __import__(used) for part in parts: - used += '.' + part + used += "." + part try: found = getattr(found, part) except AttributeError: @@ -60,9 +60,7 @@ def resolve(name): if expected == used: raise else: - raise ImportError( - 'import error in %s: %s' % (used, ex) - ) + raise ImportError("import error in %s: %s" % (used, ex)) found = annotated_getattr(found, part, used) return found @@ -72,18 +70,15 @@ def annotated_getattr(obj, name, ann): obj = getattr(obj, name) except AttributeError: raise AttributeError( - '%r object at %s has no attribute %r' % ( - type(obj).__name__, ann, name - ) + "%r object at %s has no attribute %r" % (type(obj).__name__, ann, name) ) return obj def derive_importpath(import_path, raising): if not isinstance(import_path, six.string_types) or "." not in import_path: - raise TypeError("must be absolute import path string, not %r" % - (import_path,)) - module, attr = import_path.rsplit('.', 1) + raise TypeError("must be absolute import path string, not %r" % (import_path,)) + module, attr = import_path.rsplit(".", 1) target = resolve(module) if raising: annotated_getattr(target, attr, ann=module) @@ -91,6 +86,7 @@ def derive_importpath(import_path, raising): class Notset(object): + def __repr__(self): return "" @@ -150,9 +146,11 @@ class MonkeyPatch(object): if value is notset: if not isinstance(target, six.string_types): - raise TypeError("use setattr(target, name, value) or " - "setattr(target, value) with target being a dotted " - "import string") + raise TypeError( + "use setattr(target, name, value) or " + "setattr(target, value) with target being a dotted " + "import string" + ) value = name name, target = derive_importpath(target, raising) @@ -180,9 +178,11 @@ class MonkeyPatch(object): __tracebackhide__ = True if name is notset: if not isinstance(target, six.string_types): - raise TypeError("use delattr(target, name) or " - "delattr(target) with target being a dotted " - "import string") + raise TypeError( + "use delattr(target, name) or " + "delattr(target) with target being a dotted " + "import string" + ) name, target = derive_importpath(target, raising) if not hasattr(target, name): diff --git a/_pytest/nodes.py b/_pytest/nodes.py index 43e81da9b..8d82bf606 100644 --- a/_pytest/nodes.py +++ b/_pytest/nodes.py @@ -30,7 +30,7 @@ def _splitnode(nodeid): ['testing', 'code', 'test_excinfo.py'] ['testing', 'code', 'test_excinfo.py', 'TestFormattedExcinfo', '()'] """ - if nodeid == '': + if nodeid == "": # If there is no root node at all, return an empty list so the caller's logic can remain sane return [] parts = nodeid.split(SEP) @@ -64,14 +64,16 @@ class _CompatProperty(object): # "usage of {owner!r}.{name} is deprecated, please use pytest.{name} instead".format( # name=self.name, owner=type(owner).__name__), # PendingDeprecationWarning, stacklevel=2) - return getattr(__import__('pytest'), self.name) + return getattr(__import__("pytest"), self.name) class Node(object): """ base class for Collector and Item the test collection tree. Collector subclasses have children, Items are terminal nodes.""" - def __init__(self, name, parent=None, config=None, session=None, fspath=None, nodeid=None): + def __init__( + self, name, parent=None, config=None, session=None, fspath=None, nodeid=None + ): #: a unique name within the scope of the parent node self.name = name @@ -85,7 +87,7 @@ class Node(object): self.session = session or parent.session #: filesystem path where this node was collected from (can be None) - self.fspath = fspath or getattr(parent, 'fspath', None) + self.fspath = fspath or getattr(parent, "fspath", None) #: keywords/markers collected from all scopes self.keywords = NodeKeywords(self) @@ -120,7 +122,7 @@ class Node(object): def _getcustomclass(self, name): maybe_compatprop = getattr(type(self), name) if isinstance(maybe_compatprop, _CompatProperty): - return getattr(__import__('pytest'), name) + return getattr(__import__("pytest"), name) else: cls = getattr(self, name) # TODO: reenable in the features branch @@ -130,8 +132,7 @@ class Node(object): return cls def __repr__(self): - return "<%s %r>" % (self.__class__.__name__, - getattr(self, 'name', None)) + return "<%s %r>" % (self.__class__.__name__, getattr(self, "name", None)) def warn(self, code, message): """ generate a warning with the given code and message for this @@ -140,9 +141,11 @@ class Node(object): fslocation = getattr(self, "location", None) if fslocation is None: fslocation = getattr(self, "fspath", None) - self.ihook.pytest_logwarning.call_historic(kwargs=dict( - code=code, message=message, - nodeid=self.nodeid, fslocation=fslocation)) + self.ihook.pytest_logwarning.call_historic( + kwargs=dict( + code=code, message=message, nodeid=self.nodeid, fslocation=fslocation + ) + ) # methods for ordering nodes @property @@ -176,6 +179,7 @@ class Node(object): ``marker`` can be a string or pytest.mark.* instance. """ from _pytest.mark import MarkDecorator, MARK_GEN + if isinstance(marker, six.string_types): marker = getattr(MARK_GEN, marker) elif not isinstance(marker, MarkDecorator): @@ -200,7 +204,7 @@ class Node(object): """ for node in reversed(self.listchain()): for mark in node.own_markers: - if name is None or getattr(mark, 'name', None) == name: + if name is None or getattr(mark, "name", None) == name: yield node, mark def get_closest_marker(self, name, default=None): @@ -283,9 +287,13 @@ class Node(object): except OSError: abspath = True - return excinfo.getrepr(funcargs=True, abspath=abspath, - showlocals=self.config.option.showlocals, - style=style, tbfilter=tbfilter) + return excinfo.getrepr( + funcargs=True, + abspath=abspath, + showlocals=self.config.option.showlocals, + style=style, + tbfilter=tbfilter, + ) repr_failure = _repr_failure_py @@ -312,7 +320,7 @@ class Collector(Node): return self._repr_failure_py(excinfo, style="short") def _prunetraceback(self, excinfo): - if hasattr(self, 'fspath'): + if hasattr(self, "fspath"): traceback = excinfo.traceback ntraceback = traceback.cut(path=self.fspath) if ntraceback == traceback: @@ -327,6 +335,7 @@ def _check_initialpaths_for_relpath(session, fspath): class FSCollector(Collector): + def __init__(self, fspath, parent=None, config=None, session=None, nodeid=None): fspath = py.path.local(fspath) # xxx only for test_resultlog.py? name = fspath.basename @@ -347,7 +356,9 @@ class FSCollector(Collector): if os.sep != SEP: nodeid = nodeid.replace(os.sep, SEP) - super(FSCollector, self).__init__(name, parent, config, session, nodeid=nodeid, fspath=fspath) + super(FSCollector, self).__init__( + name, parent, config, session, nodeid=nodeid, fspath=fspath + ) class File(FSCollector): diff --git a/_pytest/nose.py b/_pytest/nose.py index c81542ead..bb2e4277d 100644 --- a/_pytest/nose.py +++ b/_pytest/nose.py @@ -9,9 +9,9 @@ from _pytest.config import hookimpl def get_skip_exceptions(): skip_classes = set() - for module_name in ('unittest', 'unittest2', 'nose'): + for module_name in ("unittest", "unittest2", "nose"): mod = sys.modules.get(module_name) - if hasattr(mod, 'SkipTest'): + if hasattr(mod, "SkipTest"): skip_classes.add(mod.SkipTest) return tuple(skip_classes) @@ -19,8 +19,7 @@ def get_skip_exceptions(): def pytest_runtest_makereport(item, call): if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()): # let's substitute the excinfo with a pytest.skip one - call2 = call.__class__( - lambda: runner.skip(str(call.excinfo.value)), call.when) + call2 = call.__class__(lambda: runner.skip(str(call.excinfo.value)), call.when) call.excinfo = call2.excinfo @@ -29,22 +28,22 @@ def pytest_runtest_setup(item): if is_potential_nosetest(item): if isinstance(item.parent, python.Generator): gen = item.parent - if not hasattr(gen, '_nosegensetup'): - call_optional(gen.obj, 'setup') + if not hasattr(gen, "_nosegensetup"): + call_optional(gen.obj, "setup") if isinstance(gen.parent, python.Instance): - call_optional(gen.parent.obj, 'setup') + call_optional(gen.parent.obj, "setup") gen._nosegensetup = True - if not call_optional(item.obj, 'setup'): + if not call_optional(item.obj, "setup"): # call module level setup if there is no object level one - call_optional(item.parent.obj, 'setup') + call_optional(item.parent.obj, "setup") # XXX this implies we only call teardown when setup worked item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item) def teardown_nose(item): if is_potential_nosetest(item): - if not call_optional(item.obj, 'teardown'): - call_optional(item.parent.obj, 'teardown') + if not call_optional(item.obj, "teardown"): + call_optional(item.parent.obj, "teardown") # if hasattr(item.parent, '_nosegensetup'): # #call_optional(item._nosegensetup, 'teardown') # del item.parent._nosegensetup @@ -52,14 +51,15 @@ def teardown_nose(item): def pytest_make_collect_report(collector): if isinstance(collector, python.Generator): - call_optional(collector.obj, 'setup') + call_optional(collector.obj, "setup") def is_potential_nosetest(item): # extra check needed since we do not do nose style setup/teardown # on direct unittest style classes - return isinstance(item, python.Function) and \ - not isinstance(item, unittest.TestCaseFunction) + return isinstance(item, python.Function) and not isinstance( + item, unittest.TestCaseFunction + ) def call_optional(obj, name): diff --git a/_pytest/outcomes.py b/_pytest/outcomes.py index 640c5773a..8a3662e1b 100644 --- a/_pytest/outcomes.py +++ b/_pytest/outcomes.py @@ -11,6 +11,7 @@ class OutcomeException(BaseException): """ OutcomeException and its subclass instances indicate and contain info about test and collection outcomes. """ + def __init__(self, msg=None, pytrace=True): BaseException.__init__(self, msg) self.msg = msg @@ -20,9 +21,10 @@ class OutcomeException(BaseException): if self.msg: val = self.msg if isinstance(val, bytes): - val = py._builtin._totext(val, errors='replace') + val = py._builtin._totext(val, errors="replace") return val return "<%s instance>" % (self.__class__.__name__,) + __str__ = __repr__ @@ -32,7 +34,7 @@ TEST_OUTCOME = (OutcomeException, Exception) class Skipped(OutcomeException): # XXX hackish: on 3k we fake to live in the builtins # in order to have Skipped exception printing shorter/nicer - __module__ = 'builtins' + __module__ = "builtins" def __init__(self, msg=None, pytrace=True, allow_module_level=False): OutcomeException.__init__(self, msg=msg, pytrace=pytrace) @@ -41,15 +43,17 @@ class Skipped(OutcomeException): class Failed(OutcomeException): """ raised from an explicit call to pytest.fail() """ - __module__ = 'builtins' + __module__ = "builtins" class Exit(KeyboardInterrupt): """ raised for immediate program exits (no tracebacks/summaries)""" + def __init__(self, msg="unknown reason"): self.msg = msg KeyboardInterrupt.__init__(self, msg) + # exposed helper methods @@ -72,10 +76,10 @@ def skip(msg="", **kwargs): module level, skipping the rest of the module. Default to False. """ __tracebackhide__ = True - allow_module_level = kwargs.pop('allow_module_level', False) + allow_module_level = kwargs.pop("allow_module_level", False) if kwargs: keys = [k for k in kwargs.keys()] - raise TypeError('unexpected keyword arguments: {0}'.format(keys)) + raise TypeError("unexpected keyword arguments: {}".format(keys)) raise Skipped(msg=msg, allow_module_level=allow_module_level) @@ -114,15 +118,16 @@ def importorskip(modname, minversion=None): is only triggered if the module can not be imported. """ import warnings + __tracebackhide__ = True - compile(modname, '', 'eval') # to catch syntaxerrors + compile(modname, "", "eval") # to catch syntaxerrors should_skip = False with warnings.catch_warnings(): # make sure to ignore ImportWarnings that might happen because # of existing directories with the same name we're trying to # import but without a __init__.py file - warnings.simplefilter('ignore') + warnings.simplefilter("ignore") try: __import__(modname) except ImportError: @@ -133,15 +138,20 @@ def importorskip(modname, minversion=None): mod = sys.modules[modname] if minversion is None: return mod - verattr = getattr(mod, '__version__', None) + verattr = getattr(mod, "__version__", None) if minversion is not None: try: from pkg_resources import parse_version as pv except ImportError: - raise Skipped("we have a required version for %r but can not import " - "pkg_resources to parse version strings." % (modname,), - allow_module_level=True) + raise Skipped( + "we have a required version for %r but can not import " + "pkg_resources to parse version strings." % (modname,), + allow_module_level=True, + ) if verattr is None or pv(verattr) < pv(minversion): - raise Skipped("module %r has __version__ %r, required is: %r" % ( - modname, verattr, minversion), allow_module_level=True) + raise Skipped( + "module %r has __version__ %r, required is: %r" + % (modname, verattr, minversion), + allow_module_level=True, + ) return mod diff --git a/_pytest/pastebin.py b/_pytest/pastebin.py index b588b021b..6af202d1f 100644 --- a/_pytest/pastebin.py +++ b/_pytest/pastebin.py @@ -9,43 +9,48 @@ import tempfile def pytest_addoption(parser): group = parser.getgroup("terminal reporting") - group._addoption('--pastebin', metavar="mode", - action='store', dest="pastebin", default=None, - choices=['failed', 'all'], - help="send failed|all info to bpaste.net pastebin service.") + group._addoption( + "--pastebin", + metavar="mode", + action="store", + dest="pastebin", + default=None, + choices=["failed", "all"], + help="send failed|all info to bpaste.net pastebin service.", + ) @pytest.hookimpl(trylast=True) def pytest_configure(config): if config.option.pastebin == "all": - tr = config.pluginmanager.getplugin('terminalreporter') + tr = config.pluginmanager.getplugin("terminalreporter") # if no terminal reporter plugin is present, nothing we can do here; # this can happen when this function executes in a slave node # when using pytest-xdist, for example if tr is not None: # pastebin file will be utf-8 encoded binary file - config._pastebinfile = tempfile.TemporaryFile('w+b') + config._pastebinfile = tempfile.TemporaryFile("w+b") oldwrite = tr._tw.write def tee_write(s, **kwargs): oldwrite(s, **kwargs) if isinstance(s, six.text_type): - s = s.encode('utf-8') + s = s.encode("utf-8") config._pastebinfile.write(s) tr._tw.write = tee_write def pytest_unconfigure(config): - if hasattr(config, '_pastebinfile'): + if hasattr(config, "_pastebinfile"): # get terminal contents and delete file config._pastebinfile.seek(0) sessionlog = config._pastebinfile.read() config._pastebinfile.close() del config._pastebinfile # undo our patching in the terminal reporter - tr = config.pluginmanager.getplugin('terminalreporter') - del tr._tw.__dict__['write'] + tr = config.pluginmanager.getplugin("terminalreporter") + del tr._tw.__dict__["write"] # write summary tr.write_sep("=", "Sending information to Paste Service") pastebinurl = create_new_paste(sessionlog) @@ -60,6 +65,7 @@ def create_new_paste(contents): :returns: url to the pasted contents """ import re + if sys.version_info < (3, 0): from urllib import urlopen, urlencode else: @@ -67,32 +73,35 @@ def create_new_paste(contents): from urllib.parse import urlencode params = { - 'code': contents, - 'lexer': 'python3' if sys.version_info[0] == 3 else 'python', - 'expiry': '1week', + "code": contents, + "lexer": "python3" if sys.version_info[0] == 3 else "python", + "expiry": "1week", } - url = 'https://bpaste.net' - response = urlopen(url, data=urlencode(params).encode('ascii')).read() - m = re.search(r'href="/raw/(\w+)"', response.decode('utf-8')) + url = "https://bpaste.net" + response = urlopen(url, data=urlencode(params).encode("ascii")).read() + m = re.search(r'href="/raw/(\w+)"', response.decode("utf-8")) if m: - return '%s/show/%s' % (url, m.group(1)) + return "%s/show/%s" % (url, m.group(1)) else: - return 'bad response: ' + response + return "bad response: " + response def pytest_terminal_summary(terminalreporter): import _pytest.config + if terminalreporter.config.option.pastebin != "failed": return tr = terminalreporter - if 'failed' in tr.stats: + if "failed" in tr.stats: terminalreporter.write_sep("=", "Sending information to Paste Service") - for rep in terminalreporter.stats.get('failed'): + for rep in terminalreporter.stats.get("failed"): try: msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc except AttributeError: msg = tr._getfailureheadline(rep) - tw = _pytest.config.create_terminal_writer(terminalreporter.config, stringio=True) + tw = _pytest.config.create_terminal_writer( + terminalreporter.config, stringio=True + ) rep.toterminal(tw) s = tw.stringio.getvalue() assert len(s) diff --git a/_pytest/pytester.py b/_pytest/pytester.py index 27dd8289d..c9defe03a 100644 --- a/_pytest/pytester.py +++ b/_pytest/pytester.py @@ -23,23 +23,35 @@ from _pytest.main import Session, EXIT_OK from _pytest.assertion.rewrite import AssertionRewritingHook -PYTEST_FULLPATH = os.path.abspath(pytest.__file__.rstrip("oc")).replace("$py.class", ".py") +PYTEST_FULLPATH = os.path.abspath(pytest.__file__.rstrip("oc")).replace( + "$py.class", ".py" +) IGNORE_PAM = [ # filenames added when obtaining details about the current user - u'/var/lib/sss/mc/passwd' + u"/var/lib/sss/mc/passwd" ] def pytest_addoption(parser): - parser.addoption('--lsof', - action="store_true", dest="lsof", default=False, - help=("run FD checks if lsof is available")) + parser.addoption( + "--lsof", + action="store_true", + dest="lsof", + default=False, + help=("run FD checks if lsof is available"), + ) - parser.addoption('--runpytest', default="inprocess", dest="runpytest", - choices=("inprocess", "subprocess"), - help=("run pytest sub runs in tests using an 'inprocess' " - "or 'subprocess' (python -m main) method")) + parser.addoption( + "--runpytest", + default="inprocess", + dest="runpytest", + choices=("inprocess", "subprocess"), + help=( + "run pytest sub runs in tests using an 'inprocess' " + "or 'subprocess' (python -m main) method" + ), + ) def pytest_configure(config): @@ -50,6 +62,7 @@ def pytest_configure(config): class LsofFdLeakChecker(object): + def get_open_files(self): out = self._exec_lsof() open_files = self._parse_lsof_output(out) @@ -60,20 +73,25 @@ class LsofFdLeakChecker(object): return py.process.cmdexec("lsof -Ffn0 -p %d" % pid) def _parse_lsof_output(self, out): + def isopen(line): - return line.startswith('f') and ("deleted" not in line and - 'mem' not in line and "txt" not in line and 'cwd' not in line) + return line.startswith("f") and ( + "deleted" not in line + and "mem" not in line + and "txt" not in line + and "cwd" not in line + ) open_files = [] for line in out.split("\n"): if isopen(line): - fields = line.split('\0') + fields = line.split("\0") fd = fields[0][1:] filename = fields[1][1:] if filename in IGNORE_PAM: continue - if filename.startswith('/'): + if filename.startswith("/"): open_files.append((fd, filename)) return open_files @@ -97,7 +115,7 @@ class LsofFdLeakChecker(object): gc.collect() lines2 = self.get_open_files() - new_fds = set([t[0] for t in lines2]) - set([t[0] for t in lines1]) + new_fds = {t[0] for t in lines2} - {t[0] for t in lines1} leaked_files = [t for t in lines2 if t[0] in new_fds] if leaked_files: error = [] @@ -110,15 +128,15 @@ class LsofFdLeakChecker(object): error.append(error[0]) error.append("*** function %s:%s: %s " % item.location) error.append("See issue #2366") - item.warn('', "\n".join(error)) + item.warn("", "\n".join(error)) # XXX copied from execnet's conftest.py - needs to be merged winpymap = { - 'python2.7': r'C:\Python27\python.exe', - 'python3.4': r'C:\Python34\python.exe', - 'python3.5': r'C:\Python35\python.exe', - 'python3.6': r'C:\Python36\python.exe', + "python2.7": r"C:\Python27\python.exe", + "python3.4": r"C:\Python34\python.exe", + "python3.5": r"C:\Python35\python.exe", + "python3.6": r"C:\Python36\python.exe", } @@ -129,8 +147,12 @@ def getexecutable(name, cache={}): executable = py.path.local.sysfind(name) if executable: import subprocess - popen = subprocess.Popen([str(executable), "--version"], - universal_newlines=True, stderr=subprocess.PIPE) + + popen = subprocess.Popen( + [str(executable), "--version"], + universal_newlines=True, + stderr=subprocess.PIPE, + ) out, err = popen.communicate() if name == "jython": if not err or "2.5" not in err: @@ -144,7 +166,7 @@ def getexecutable(name, cache={}): return executable -@pytest.fixture(params=['python2.7', 'python3.4', 'pypy', 'pypy3']) +@pytest.fixture(params=["python2.7", "python3.4", "pypy", "pypy3"]) def anypython(request): name = request.param executable = getexecutable(name) @@ -158,6 +180,7 @@ def anypython(request): pytest.skip("no suitable %s found" % (name,)) return executable + # used at least by pytest-xdist plugin @@ -172,6 +195,7 @@ def _pytest(request): class PytestArg(object): + def __init__(self, request): self.request = request @@ -187,13 +211,14 @@ def get_public_names(values): class ParsedCall(object): + def __init__(self, name, kwargs): self.__dict__.update(kwargs) self._name = name def __repr__(self): d = self.__dict__.copy() - del d['_name'] + del d["_name"] return "" % (self._name, d) @@ -263,12 +288,15 @@ class HookRecorder(object): # functionality for test reports - def getreports(self, - names="pytest_runtest_logreport pytest_collectreport"): + def getreports(self, names="pytest_runtest_logreport pytest_collectreport"): return [x.report for x in self.getcalls(names)] - def matchreport(self, inamepart="", - names="pytest_runtest_logreport pytest_collectreport", when=None): + def matchreport( + self, + inamepart="", + names="pytest_runtest_logreport pytest_collectreport", + when=None, + ): """return a testreport whose dotted import path matches""" values = [] for rep in self.getreports(names=names): @@ -278,31 +306,32 @@ class HookRecorder(object): continue except AttributeError: pass - if when and getattr(rep, 'when', None) != when: + if when and getattr(rep, "when", None) != when: continue if not inamepart or inamepart in rep.nodeid.split("::"): values.append(rep) if not values: - raise ValueError("could not find test report matching %r: " - "no test reports at all!" % (inamepart,)) + raise ValueError( + "could not find test report matching %r: " + "no test reports at all!" % (inamepart,) + ) if len(values) > 1: raise ValueError( - "found 2 or more testreports matching %r: %s" % (inamepart, values)) + "found 2 or more testreports matching %r: %s" % (inamepart, values) + ) return values[0] - def getfailures(self, - names='pytest_runtest_logreport pytest_collectreport'): + def getfailures(self, names="pytest_runtest_logreport pytest_collectreport"): return [rep for rep in self.getreports(names) if rep.failed] def getfailedcollections(self): - return self.getfailures('pytest_collectreport') + return self.getfailures("pytest_collectreport") def listoutcomes(self): passed = [] skipped = [] failed = [] - for rep in self.getreports( - "pytest_collectreport pytest_runtest_logreport"): + for rep in self.getreports("pytest_collectreport pytest_runtest_logreport"): if rep.passed: if getattr(rep, "when", None) == "call": passed.append(rep) @@ -330,7 +359,7 @@ def linecomp(request): return LineComp() -@pytest.fixture(name='LineMatcher') +@pytest.fixture(name="LineMatcher") def LineMatcher_fixture(request): return LineMatcher @@ -373,7 +402,7 @@ class RunResult(object): """ for line in reversed(self.outlines): - if 'seconds' in line: + if "seconds" in line: outcomes = rex_outcome.findall(line) if outcomes: d = {} @@ -389,15 +418,18 @@ class RunResult(object): """ d = self.parseoutcomes() obtained = { - 'passed': d.get('passed', 0), - 'skipped': d.get('skipped', 0), - 'failed': d.get('failed', 0), - 'error': d.get('error', 0), + "passed": d.get("passed", 0), + "skipped": d.get("skipped", 0), + "failed": d.get("failed", 0), + "error": d.get("error", 0), } - assert obtained == dict(passed=passed, skipped=skipped, failed=failed, error=error) + assert ( + obtained == dict(passed=passed, skipped=skipped, failed=failed, error=error) + ) class CwdSnapshot(object): + def __init__(self): self.__saved = os.getcwd() @@ -406,6 +438,7 @@ class CwdSnapshot(object): class SysModulesSnapshot(object): + def __init__(self, preserve=None): self.__preserve = preserve self.__saved = dict(sys.modules) @@ -413,12 +446,14 @@ class SysModulesSnapshot(object): def restore(self): if self.__preserve: self.__saved.update( - (k, m) for k, m in sys.modules.items() if self.__preserve(k)) + (k, m) for k, m in sys.modules.items() if self.__preserve(k) + ) sys.modules.clear() sys.modules.update(self.__saved) class SysPathsSnapshot(object): + def __init__(self): self.__saved = list(sys.path), list(sys.meta_path) @@ -482,6 +517,7 @@ class Testdir(object): # `zope.interface` for example def preserve_module(name): return name.startswith("zope") + return SysModulesSnapshot(preserve=preserve_module) def make_hook_recorder(self, pluginmanager): @@ -499,7 +535,7 @@ class Testdir(object): """ self.tmpdir.chdir() - def _makefile(self, ext, args, kwargs, encoding='utf-8'): + def _makefile(self, ext, args, kwargs, encoding="utf-8"): items = list(kwargs.items()) def to_text(s): @@ -544,20 +580,20 @@ class Testdir(object): def makeini(self, source): """Write a tox.ini file with 'source' as contents.""" - return self.makefile('.ini', tox=source) + return self.makefile(".ini", tox=source) def getinicfg(self, source): """Return the pytest section from the tox.ini config file.""" p = self.makeini(source) - return py.iniconfig.IniConfig(p)['pytest'] + return py.iniconfig.IniConfig(p)["pytest"] def makepyfile(self, *args, **kwargs): """Shortcut for .makefile() with a .py extension.""" - return self._makefile('.py', args, kwargs) + return self._makefile(".py", args, kwargs) def maketxtfile(self, *args, **kwargs): """Shortcut for .makefile() with a .txt extension.""" - return self._makefile('.txt', args, kwargs) + return self._makefile(".txt", args, kwargs) def syspathinsert(self, path=None): """Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`. @@ -612,7 +648,7 @@ class Testdir(object): """ session = Session(config) - assert '::' not in str(arg) + assert "::" not in str(arg) p = py.path.local(arg) config.hook.pytest_sessionstart(session=session) res = session.perform_collect([str(p)], genitems=False)[0] @@ -722,6 +758,7 @@ class Testdir(object): def revert_warn_already_imported(): AssertionRewritingHook._warn_already_imported = orig_warn + finalizers.append(revert_warn_already_imported) AssertionRewritingHook._warn_already_imported = lambda *a: None @@ -741,6 +778,7 @@ class Testdir(object): rec = [] class Collect(object): + def pytest_configure(x, config): rec.append(self.make_hook_recorder(config.pluginmanager)) @@ -750,8 +788,10 @@ class Testdir(object): if len(rec) == 1: reprec = rec.pop() else: + class reprec(object): pass + reprec.ret = ret # typically we reraise keyboard interrupts from the child run @@ -788,15 +828,14 @@ class Testdir(object): class reprec(object): ret = 3 + finally: out, err = capture.readouterr() capture.stop_capturing() sys.stdout.write(out) sys.stderr.write(err) - res = RunResult(reprec.ret, - out.split("\n"), err.split("\n"), - time.time() - now) + res = RunResult(reprec.ret, out.split("\n"), err.split("\n"), time.time() - now) res.reprec = reprec return res @@ -811,11 +850,11 @@ class Testdir(object): def _ensure_basetemp(self, args): args = [str(x) for x in args] for x in args: - if str(x).startswith('--basetemp'): + if str(x).startswith("--basetemp"): # print("basedtemp exists: %s" %(args,)) break else: - args.append("--basetemp=%s" % self.tmpdir.dirpath('basetemp')) + args.append("--basetemp=%s" % self.tmpdir.dirpath("basetemp")) # print("added basetemp: %s" %(args,)) return args @@ -834,6 +873,7 @@ class Testdir(object): args = self._ensure_basetemp(args) import _pytest.config + config = _pytest.config._prepareconfig(args, self.plugins) # we don't know what the test will do with this half-setup config # object and thus we make sure it gets unconfigured properly in any @@ -870,8 +910,9 @@ class Testdir(object): for item in items: if item.name == funcname: return item - assert 0, "%r item not found in module:\n%s\nitems: %s" % ( - funcname, source, items) + assert 0, ( + "%r item not found in module:\n%s\nitems: %s" % (funcname, source, items) + ) def getitems(self, source): """Return all test items collected from the module. @@ -935,11 +976,14 @@ class Testdir(object): """ env = os.environ.copy() - env['PYTHONPATH'] = os.pathsep.join(filter(None, [ - str(os.getcwd()), env.get('PYTHONPATH', '')])) - kw['env'] = env + env["PYTHONPATH"] = os.pathsep.join( + filter(None, [str(os.getcwd()), env.get("PYTHONPATH", "")]) + ) + kw["env"] = env - popen = subprocess.Popen(cmdargs, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, **kw) + popen = subprocess.Popen( + cmdargs, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, **kw + ) popen.stdin.close() return popen @@ -958,14 +1002,15 @@ class Testdir(object): cmdargs = [str(x) for x in cmdargs] p1 = self.tmpdir.join("stdout") p2 = self.tmpdir.join("stderr") - print("running:", ' '.join(cmdargs)) + print("running:", " ".join(cmdargs)) print(" in:", str(py.path.local())) f1 = codecs.open(str(p1), "w", encoding="utf8") f2 = codecs.open(str(p2), "w", encoding="utf8") try: now = time.time() - popen = self.popen(cmdargs, stdout=f1, stderr=f2, - close_fds=(sys.platform != "win32")) + popen = self.popen( + cmdargs, stdout=f1, stderr=f2, close_fds=(sys.platform != "win32") + ) ret = popen.wait() finally: f1.close() @@ -992,7 +1037,7 @@ class Testdir(object): def _getpytestargs(self): # we cannot use `(sys.executable, script)` because on Windows the # script is e.g. `pytest.exe` - return (sys.executable, PYTEST_FULLPATH) # noqa + return (sys.executable, PYTEST_FULLPATH) # noqa def runpython(self, script): """Run a python script using sys.executable as interpreter. @@ -1018,12 +1063,13 @@ class Testdir(object): Returns a :py:class:`RunResult`. """ - p = py.path.local.make_numbered_dir(prefix="runpytest-", - keep=None, rootdir=self.tmpdir) - args = ('--basetemp=%s' % p,) + args + p = py.path.local.make_numbered_dir( + prefix="runpytest-", keep=None, rootdir=self.tmpdir + ) + args = ("--basetemp=%s" % p,) + args plugins = [x for x in self.plugins if isinstance(x, str)] if plugins: - args = ('-p', plugins[0]) + args + args = ("-p", plugins[0]) + args args = self._getpytestargs() + args return self.run(*args) @@ -1048,7 +1094,7 @@ class Testdir(object): """ pexpect = pytest.importorskip("pexpect", "3.0") - if hasattr(sys, 'pypy_version_info') and '64' in platform.machine(): + if hasattr(sys, "pypy_version_info") and "64" in platform.machine(): pytest.skip("pypy-64 bit not supported") if sys.platform.startswith("freebsd"): pytest.xfail("pexpect does not work reliably on freebsd") @@ -1064,10 +1110,12 @@ def getdecoded(out): return out.decode("utf-8") except UnicodeDecodeError: return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % ( - py.io.saferepr(out),) + py.io.saferepr(out), + ) class LineComp(object): + def __init__(self): self.stringio = py.io.TextIO() @@ -1158,11 +1206,11 @@ class LineMatcher(object): raise ValueError("line %r not found in output" % fnline) def _log(self, *args): - self._log_output.append(' '.join((str(x) for x in args))) + self._log_output.append(" ".join((str(x) for x in args))) @property def _log_text(self): - return '\n'.join(self._log_output) + return "\n".join(self._log_output) def fnmatch_lines(self, lines2): """Search captured text for matching lines using ``fnmatch.fnmatch``. @@ -1172,7 +1220,7 @@ class LineMatcher(object): matches and non-matches are also printed on stdout. """ - self._match_lines(lines2, fnmatch, 'fnmatch') + self._match_lines(lines2, fnmatch, "fnmatch") def re_match_lines(self, lines2): """Search captured text for matching lines using ``re.match``. @@ -1183,7 +1231,7 @@ class LineMatcher(object): The matches and non-matches are also printed on stdout. """ - self._match_lines(lines2, lambda name, pat: re.match(pat, name), 're.match') + self._match_lines(lines2, lambda name, pat: re.match(pat, name), "re.match") def _match_lines(self, lines2, match_func, match_nickname): """Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``. diff --git a/_pytest/python.py b/_pytest/python.py index 2b37067d5..48516199f 100644 --- a/_pytest/python.py +++ b/_pytest/python.py @@ -22,10 +22,21 @@ from _pytest import fixtures from _pytest import nodes from _pytest import deprecated from _pytest.compat import ( - isclass, isfunction, is_generator, ascii_escaped, - REGEX_TYPE, STRING_TYPES, NoneType, NOTSET, - get_real_func, getfslineno, safe_getattr, - safe_str, getlocation, enum, get_default_arg_names + isclass, + isfunction, + is_generator, + ascii_escaped, + REGEX_TYPE, + STRING_TYPES, + NoneType, + NOTSET, + get_real_func, + getfslineno, + safe_getattr, + safe_str, + getlocation, + enum, + get_default_arg_names, ) from _pytest.outcomes import fail from _pytest.mark.structures import transfer_markers, get_unpacked_marks @@ -37,7 +48,7 @@ from _pytest.mark.structures import transfer_markers, get_unpacked_marks # for better maintenance _pluggy_dir = py.path.local(pluggy.__file__.rstrip("oc")) # pluggy is either a package or a single module depending on the version -if _pluggy_dir.basename == '__init__.py': +if _pluggy_dir.basename == "__init__.py": _pluggy_dir = _pluggy_dir.dirpath() _pytest_dir = py.path.local(_pytest.__file__).dirpath() _py_dir = py.path.local(py.__file__).dirpath() @@ -52,53 +63,81 @@ def filter_traceback(entry): # points to dynamically generated code # see https://bitbucket.org/pytest-dev/py/issues/71 raw_filename = entry.frame.code.raw.co_filename - is_generated = '<' in raw_filename and '>' in raw_filename + is_generated = "<" in raw_filename and ">" in raw_filename if is_generated: return False # entry.path might point to a non-existing file, in which case it will # also return a str object. see #1133 p = py.path.local(entry.path) - return not p.relto(_pluggy_dir) and not p.relto(_pytest_dir) and not p.relto(_py_dir) + return not p.relto(_pluggy_dir) and not p.relto(_pytest_dir) and not p.relto( + _py_dir + ) def pyobj_property(name): + def get(self): - node = self.getparent(getattr(__import__('pytest'), name)) + node = self.getparent(getattr(__import__("pytest"), name)) if node is not None: return node.obj + doc = "python %s object this node was collected from (can be None)." % ( - name.lower(),) + name.lower(), + ) return property(get, None, None, doc) def pytest_addoption(parser): group = parser.getgroup("general") - group.addoption('--fixtures', '--funcargs', - action="store_true", dest="showfixtures", default=False, - help="show available fixtures, sorted by plugin appearance " - "(fixtures with leading '_' are only shown with '-v')") group.addoption( - '--fixtures-per-test', + "--fixtures", + "--funcargs", + action="store_true", + dest="showfixtures", + default=False, + help="show available fixtures, sorted by plugin appearance " + "(fixtures with leading '_' are only shown with '-v')", + ) + group.addoption( + "--fixtures-per-test", action="store_true", dest="show_fixtures_per_test", default=False, help="show fixtures per test", ) - parser.addini("usefixtures", type="args", default=[], - help="list of default fixtures to be used with this project") - parser.addini("python_files", type="args", - default=['test_*.py', '*_test.py'], - help="glob-style file patterns for Python test module discovery") - parser.addini("python_classes", type="args", default=["Test", ], - help="prefixes or glob names for Python test class discovery") - parser.addini("python_functions", type="args", default=["test", ], - help="prefixes or glob names for Python test function and " - "method discovery") + parser.addini( + "usefixtures", + type="args", + default=[], + help="list of default fixtures to be used with this project", + ) + parser.addini( + "python_files", + type="args", + default=["test_*.py", "*_test.py"], + help="glob-style file patterns for Python test module discovery", + ) + parser.addini( + "python_classes", + type="args", + default=["Test"], + help="prefixes or glob names for Python test class discovery", + ) + parser.addini( + "python_functions", + type="args", + default=["test"], + help="prefixes or glob names for Python test function and " "method discovery", + ) - group.addoption("--import-mode", default="prepend", - choices=["prepend", "append"], dest="importmode", - help="prepend/append to sys.path when importing test modules, " - "default is to prepend.") + group.addoption( + "--import-mode", + default="prepend", + choices=["prepend", "append"], + dest="importmode", + help="prepend/append to sys.path when importing test modules, " + "default is to prepend.", + ) def pytest_cmdline_main(config): @@ -113,30 +152,32 @@ def pytest_cmdline_main(config): def pytest_generate_tests(metafunc): # those alternative spellings are common - raise a specific error to alert # the user - alt_spellings = ['parameterize', 'parametrise', 'parameterise'] + alt_spellings = ["parameterize", "parametrise", "parameterise"] for attr in alt_spellings: if hasattr(metafunc.function, attr): msg = "{0} has '{1}', spelling should be 'parametrize'" raise MarkerError(msg.format(metafunc.function.__name__, attr)) - for marker in metafunc.definition.iter_markers(name='parametrize'): + for marker in metafunc.definition.iter_markers(name="parametrize"): metafunc.parametrize(*marker.args, **marker.kwargs) def pytest_configure(config): - config.addinivalue_line("markers", - "parametrize(argnames, argvalues): call a test function multiple " - "times passing in different arguments in turn. argvalues generally " - "needs to be a list of values if argnames specifies only one name " - "or a list of tuples of values if argnames specifies multiple names. " - "Example: @parametrize('arg1', [1,2]) would lead to two calls of the " - "decorated test function, one with arg1=1 and another with arg1=2." - "see http://pytest.org/latest/parametrize.html for more info and " - "examples." - ) - config.addinivalue_line("markers", - "usefixtures(fixturename1, fixturename2, ...): mark tests as needing " - "all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures " - ) + config.addinivalue_line( + "markers", + "parametrize(argnames, argvalues): call a test function multiple " + "times passing in different arguments in turn. argvalues generally " + "needs to be a list of values if argnames specifies only one name " + "or a list of tuples of values if argnames specifies multiple names. " + "Example: @parametrize('arg1', [1,2]) would lead to two calls of the " + "decorated test function, one with arg1=1 and another with arg1=2." + "see http://pytest.org/latest/parametrize.html for more info and " + "examples.", + ) + config.addinivalue_line( + "markers", + "usefixtures(fixturename1, fixturename2, ...): mark tests as needing " + "all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures ", + ) @hookimpl(trylast=True) @@ -157,7 +198,7 @@ def pytest_collect_file(path, parent): ext = path.ext if ext == ".py": if not parent.session.isinitpath(path): - for pat in parent.config.getini('python_files'): + for pat in parent.config.getini("python_files"): if path.fnmatch(pat): break else: @@ -188,8 +229,10 @@ def pytest_pycollect_makeitem(collector, name, obj): # or a funtools.wrapped. # We musn't if it's been wrapped with mock.patch (python 2 only) if not (isfunction(obj) or isfunction(get_real_func(obj))): - collector.warn(code="C2", message="cannot collect %r because it is not a function." - % name, ) + collector.warn( + code="C2", + message="cannot collect %r because it is not a function." % name, + ) elif getattr(obj, "__test__", True): if is_generator(obj): res = Generator(name, parent=collector) @@ -215,8 +258,9 @@ class PyobjMixin(PyobjContext): super(PyobjMixin, self).__init__(*k, **kw) def obj(): + def fget(self): - obj = getattr(self, '_obj', None) + obj = getattr(self, "_obj", None) if obj is None: self._obj = obj = self._getobj() # XXX evil hack @@ -261,7 +305,7 @@ class PyobjMixin(PyobjContext): def reportinfo(self): # XXX caching? obj = self.obj - compat_co_firstlineno = getattr(obj, 'compat_co_firstlineno', None) + compat_co_firstlineno = getattr(obj, "compat_co_firstlineno", None) if isinstance(compat_co_firstlineno, int): # nose compatibility fspath = sys.modules[obj.__module__].__file__ @@ -278,7 +322,7 @@ class PyobjMixin(PyobjContext): class PyCollector(PyobjMixin, nodes.Collector): def funcnamefilter(self, name): - return self._matches_prefix_or_glob_option('python_functions', name) + return self._matches_prefix_or_glob_option("python_functions", name) def isnosetest(self, obj): """ Look for the __test__ attribute, which is applied by the @@ -287,25 +331,24 @@ class PyCollector(PyobjMixin, nodes.Collector): # We explicitly check for "is True" here to not mistakenly treat # classes with a custom __getattr__ returning something truthy (like a # function) as test classes. - return safe_getattr(obj, '__test__', False) is True + return safe_getattr(obj, "__test__", False) is True def classnamefilter(self, name): - return self._matches_prefix_or_glob_option('python_classes', name) + return self._matches_prefix_or_glob_option("python_classes", name) def istestfunction(self, obj, name): if self.funcnamefilter(name) or self.isnosetest(obj): if isinstance(obj, staticmethod): # static methods need to be unwrapped - obj = safe_getattr(obj, '__func__', False) + obj = safe_getattr(obj, "__func__", False) if obj is False: # Python 2.6 wraps in a different way that we won't try to handle - msg = "cannot collect static method %r because " \ - "it is not a function (always the case in Python 2.6)" - self.warn( - code="C2", message=msg % name) + msg = "cannot collect static method %r because it is not a function" + self.warn(code="C2", message=msg % name) return False return ( - safe_getattr(obj, "__call__", False) and fixtures.getfixturemarker(obj) is None + safe_getattr(obj, "__call__", False) + and fixtures.getfixturemarker(obj) is None ) else: return False @@ -324,8 +367,9 @@ class PyCollector(PyobjMixin, nodes.Collector): # check that name looks like a glob-string before calling fnmatch # because this is called for every name in each collected module, # and fnmatch is somewhat expensive to call - elif ('*' in option or '?' in option or '[' in option) and \ - fnmatch.fnmatch(name, option): + elif ("*" in option or "?" in option or "[" in option) and fnmatch.fnmatch( + name, option + ): return True return False @@ -335,7 +379,7 @@ class PyCollector(PyobjMixin, nodes.Collector): # NB. we avoid random getattrs and peek in the __dict__ instead # (XXX originally introduced from a PyPy need, still true?) - dicts = [getattr(self.obj, '__dict__', {})] + dicts = [getattr(self.obj, "__dict__", {})] for basecls in inspect.getmro(self.obj.__class__): dicts.append(basecls.__dict__) seen = {} @@ -360,8 +404,7 @@ class PyCollector(PyobjMixin, nodes.Collector): def _makeitem(self, name, obj): # assert self.ihook.fspath == self.fspath, self - return self.ihook.pytest_pycollect_makeitem( - collector=self, name=name, obj=obj) + return self.ihook.pytest_pycollect_makeitem(collector=self, name=name, obj=obj) def _genfunctions(self, name, funcobj): module = self.getparent(Module).obj @@ -370,22 +413,21 @@ class PyCollector(PyobjMixin, nodes.Collector): transfer_markers(funcobj, cls, module) fm = self.session._fixturemanager - definition = FunctionDefinition( - name=name, - parent=self, - callobj=funcobj, - ) + definition = FunctionDefinition(name=name, parent=self, callobj=funcobj) fixtureinfo = fm.getfixtureinfo(definition, funcobj, cls) - metafunc = Metafunc(definition, fixtureinfo, self.config, cls=cls, module=module) + metafunc = Metafunc( + definition, fixtureinfo, self.config, cls=cls, module=module + ) methods = [] if hasattr(module, "pytest_generate_tests"): methods.append(module.pytest_generate_tests) if hasattr(cls, "pytest_generate_tests"): methods.append(cls().pytest_generate_tests) if methods: - self.ihook.pytest_generate_tests.call_extra(methods, - dict(metafunc=metafunc)) + self.ihook.pytest_generate_tests.call_extra( + methods, dict(metafunc=metafunc) + ) else: self.ihook.pytest_generate_tests(metafunc=metafunc) @@ -398,12 +440,15 @@ class PyCollector(PyobjMixin, nodes.Collector): for callspec in metafunc._calls: subname = "%s[%s]" % (name, callspec.id) - yield Function(name=subname, parent=self, - callspec=callspec, callobj=funcobj, - fixtureinfo=fixtureinfo, - keywords={callspec.id: True}, - originalname=name, - ) + yield Function( + name=subname, + parent=self, + callspec=callspec, + callobj=funcobj, + fixtureinfo=fixtureinfo, + keywords={callspec.id: True}, + originalname=name, + ) class Module(nodes.File, PyCollector): @@ -423,7 +468,8 @@ class Module(nodes.File, PyCollector): mod = self.fspath.pyimport(ensuresyspath=importmode) except SyntaxError: raise self.CollectError( - _pytest._code.ExceptionInfo().getrepr(style="short")) + _pytest._code.ExceptionInfo().getrepr(style="short") + ) except self.fspath.ImportMismatchError: e = sys.exc_info()[1] raise self.CollectError( @@ -433,15 +479,17 @@ class Module(nodes.File, PyCollector): "which is not the same as the test file we want to collect:\n" " %s\n" "HINT: remove __pycache__ / .pyc files and/or use a " - "unique basename for your test file modules" - % e.args + "unique basename for your test file modules" % e.args ) except ImportError: from _pytest._code.code import ExceptionInfo + exc_info = ExceptionInfo() - if self.config.getoption('verbose') < 2: + if self.config.getoption("verbose") < 2: exc_info.traceback = exc_info.traceback.filter(filter_traceback) - exc_repr = exc_info.getrepr(style='short') if exc_info.traceback else exc_info.exconly() + exc_repr = exc_info.getrepr( + style="short" + ) if exc_info.traceback else exc_info.exconly() formatted_tb = safe_str(exc_repr) raise self.CollectError( "ImportError while importing test module '{fspath}'.\n" @@ -468,9 +516,9 @@ class Module(nodes.File, PyCollector): if setup_module is not None: setup_module() - teardown_module = _get_xunit_setup_teardown(self.obj, 'tearDownModule') + teardown_module = _get_xunit_setup_teardown(self.obj, "tearDownModule") if teardown_module is None: - teardown_module = _get_xunit_setup_teardown(self.obj, 'teardown_module') + teardown_module = _get_xunit_setup_teardown(self.obj, "teardown_module") if teardown_module is not None: self.addfinalizer(teardown_module) @@ -512,26 +560,32 @@ class Class(PyCollector): if not safe_getattr(self.obj, "__test__", True): return [] if hasinit(self.obj): - self.warn("C1", "cannot collect test class %r because it has a " - "__init__ constructor" % self.obj.__name__) + self.warn( + "C1", + "cannot collect test class %r because it has a " + "__init__ constructor" % self.obj.__name__, + ) return [] elif hasnew(self.obj): - self.warn("C1", "cannot collect test class %r because it has a " - "__new__ constructor" % self.obj.__name__) + self.warn( + "C1", + "cannot collect test class %r because it has a " + "__new__ constructor" % self.obj.__name__, + ) return [] return [self._getcustomclass("Instance")(name="()", parent=self)] def setup(self): - setup_class = _get_xunit_func(self.obj, 'setup_class') + setup_class = _get_xunit_func(self.obj, "setup_class") if setup_class is not None: - setup_class = getattr(setup_class, 'im_func', setup_class) - setup_class = getattr(setup_class, '__func__', setup_class) + setup_class = getattr(setup_class, "im_func", setup_class) + setup_class = getattr(setup_class, "__func__", setup_class) setup_class(self.obj) - fin_class = getattr(self.obj, 'teardown_class', None) + fin_class = getattr(self.obj, "teardown_class", None) if fin_class is not None: - fin_class = getattr(fin_class, 'im_func', fin_class) - fin_class = getattr(fin_class, '__func__', fin_class) + fin_class = getattr(fin_class, "im_func", fin_class) + fin_class = getattr(fin_class, "__func__", fin_class) self.addfinalizer(lambda: fin_class(self.obj)) @@ -559,7 +613,7 @@ class FunctionMixin(PyobjMixin): def setup(self): """ perform setup for this test function. """ - if hasattr(self, '_preservedparent'): + if hasattr(self, "_preservedparent"): obj = self._preservedparent elif isinstance(self.parent, Instance): obj = self.parent.newinstance() @@ -567,20 +621,24 @@ class FunctionMixin(PyobjMixin): else: obj = self.parent.obj if inspect.ismethod(self.obj): - setup_name = 'setup_method' - teardown_name = 'teardown_method' + setup_name = "setup_method" + teardown_name = "teardown_method" else: - setup_name = 'setup_function' - teardown_name = 'teardown_function' - setup_func_or_method = _get_xunit_setup_teardown(obj, setup_name, param_obj=self.obj) + setup_name = "setup_function" + teardown_name = "teardown_function" + setup_func_or_method = _get_xunit_setup_teardown( + obj, setup_name, param_obj=self.obj + ) if setup_func_or_method is not None: setup_func_or_method() - teardown_func_or_method = _get_xunit_setup_teardown(obj, teardown_name, param_obj=self.obj) + teardown_func_or_method = _get_xunit_setup_teardown( + obj, teardown_name, param_obj=self.obj + ) if teardown_func_or_method is not None: self.addfinalizer(teardown_func_or_method) def _prunetraceback(self, excinfo): - if hasattr(self, '_obj') and not self.config.option.fulltrace: + if hasattr(self, "_obj") and not self.config.option.fulltrace: code = _pytest._code.Code(get_real_func(self.obj)) path, firstlineno = code.path, code.firstlineno traceback = excinfo.traceback @@ -598,14 +656,13 @@ class FunctionMixin(PyobjMixin): if self.config.option.tbstyle == "auto": if len(excinfo.traceback) > 2: for entry in excinfo.traceback[1:-1]: - entry.set_repr_style('short') + entry.set_repr_style("short") def _repr_failure_py(self, excinfo, style="long"): if excinfo.errisinstance(fail.Exception): if not excinfo.value.pytrace: return py._builtin._totext(excinfo.value) - return super(FunctionMixin, self)._repr_failure_py(excinfo, - style=style) + return super(FunctionMixin, self)._repr_failure_py(excinfo, style=style) def repr_failure(self, excinfo, outerr=None): assert outerr is None, "XXX outerr usage is deprecated" @@ -616,11 +673,13 @@ class FunctionMixin(PyobjMixin): class Generator(FunctionMixin, PyCollector): + def collect(self): # test generators are seen as collectors but they also # invoke setup/teardown on popular request # (induced by the common "test_*" naming shared with normal tests) from _pytest import deprecated + self.session._setupstate.prepare(self) # see FunctionMixin.setup and test_setupstate_is_preserved_134 self._preservedparent = self.parent.obj @@ -629,16 +688,18 @@ class Generator(FunctionMixin, PyCollector): for i, x in enumerate(self.obj()): name, call, args = self.getcallargs(x) if not callable(call): - raise TypeError("%r yielded non callable test %r" % (self.obj, call,)) + raise TypeError("%r yielded non callable test %r" % (self.obj, call)) if name is None: name = "[%d]" % i else: name = "['%s']" % name if name in seen: - raise ValueError("%r generated tests with non-unique name %r" % (self, name)) + raise ValueError( + "%r generated tests with non-unique name %r" % (self, name) + ) seen[name] = True values.append(self.Function(name, self, args=args, callobj=call)) - self.warn('C1', deprecated.YIELD_TESTS) + self.warn("C1", deprecated.YIELD_TESTS) return values def getcallargs(self, obj): @@ -655,18 +716,19 @@ class Generator(FunctionMixin, PyCollector): def hasinit(obj): - init = getattr(obj, '__init__', None) + init = getattr(obj, "__init__", None) if init: return init != object.__init__ def hasnew(obj): - new = getattr(obj, '__new__', None) + new = getattr(obj, "__new__", None) if new: return new != object.__new__ class CallSpec2(object): + def __init__(self, metafunc): self.metafunc = metafunc self.funcargs = {} @@ -708,8 +770,7 @@ class CallSpec2(object): def id(self): return "-".join(map(str, filter(None, self._idlist))) - def setmulti2(self, valtypes, argnames, valset, id, marks, scopenum, - param_index): + def setmulti2(self, valtypes, argnames, valset, id, marks, scopenum, param_index): for arg, val in zip(argnames, valset): self._checkargnotcontained(arg) valtype_for_arg = valtypes[arg] @@ -742,7 +803,10 @@ class Metafunc(fixtures.FuncargnamesCompatAttr): def __init__(self, definition, fixtureinfo, config, cls=None, module=None): #: access to the :class:`_pytest.config.Config` object for the test session - assert isinstance(definition, FunctionDefinition) or type(definition).__name__ == "DefinitionMock" + assert ( + isinstance(definition, FunctionDefinition) + or type(definition).__name__ == "DefinitionMock" + ) self.definition = definition self.config = config @@ -762,8 +826,7 @@ class Metafunc(fixtures.FuncargnamesCompatAttr): self._ids = set() self._arg2fixturedefs = fixtureinfo.name2fixturedefs - def parametrize(self, argnames, argvalues, indirect=False, ids=None, - scope=None): + def parametrize(self, argnames, argvalues, indirect=False, ids=None, scope=None): """ Add new invocations to the underlying test function using the list of argvalues for the given argnames. Parametrization is performed during the collection phase. If you need to setup expensive resources @@ -806,27 +869,29 @@ class Metafunc(fixtures.FuncargnamesCompatAttr): from py.io import saferepr argnames, parameters = ParameterSet._for_parametrize( - argnames, argvalues, self.function, self.config) + argnames, argvalues, self.function, self.config + ) del argvalues default_arg_names = set(get_default_arg_names(self.function)) if scope is None: scope = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect) - scopenum = scope2index(scope, descr='call to {0}'.format(self.parametrize)) + scopenum = scope2index(scope, descr="call to {}".format(self.parametrize)) valtypes = {} for arg in argnames: if arg not in self.fixturenames: if arg in default_arg_names: - raise ValueError("%r already takes an argument %r with a default value" % (self.function, arg)) + raise ValueError( + "%r already takes an argument %r with a default value" + % (self.function, arg) + ) else: if isinstance(indirect, (tuple, list)): - name = 'fixture' if arg in indirect else 'argument' + name = "fixture" if arg in indirect else "argument" else: - name = 'fixture' if indirect else 'argument' - raise ValueError( - "%r uses no %s %r" % ( - self.function, name, arg)) + name = "fixture" if indirect else "argument" + raise ValueError("%r uses no %s %r" % (self.function, name, arg)) if indirect is True: valtypes = dict.fromkeys(argnames, "params") @@ -836,8 +901,10 @@ class Metafunc(fixtures.FuncargnamesCompatAttr): valtypes = dict.fromkeys(argnames, "funcargs") for arg in indirect: if arg not in argnames: - raise ValueError("indirect given to %r: fixture %r doesn't exist" % ( - self.function, arg)) + raise ValueError( + "indirect given to %r: fixture %r doesn't exist" + % (self.function, arg) + ) valtypes[arg] = "params" idfn = None if callable(ids): @@ -845,12 +912,15 @@ class Metafunc(fixtures.FuncargnamesCompatAttr): ids = None if ids: if len(ids) != len(parameters): - raise ValueError('%d tests specified with %d ids' % ( - len(parameters), len(ids))) + raise ValueError( + "%d tests specified with %d ids" % (len(parameters), len(ids)) + ) for id_value in ids: if id_value is not None and not isinstance(id_value, six.string_types): - msg = 'ids must be list of strings, found: %s (type: %s)' - raise ValueError(msg % (saferepr(id_value), type(id_value).__name__)) + msg = "ids must be list of strings, found: %s (type: %s)" + raise ValueError( + msg % (saferepr(id_value), type(id_value).__name__) + ) ids = idmaker(argnames, parameters, idfn, ids, self.config) newcalls = [] for callspec in self._calls or [CallSpec2(self)]: @@ -858,12 +928,21 @@ class Metafunc(fixtures.FuncargnamesCompatAttr): for a_id, param, param_index in elements: if len(param.values) != len(argnames): raise ValueError( - 'In "parametrize" the number of values ({0}) must be ' - 'equal to the number of names ({1})'.format( - param.values, argnames)) + 'In "parametrize" the number of values ({}) must be ' + "equal to the number of names ({})".format( + param.values, argnames + ) + ) newcallspec = callspec.copy(self) - newcallspec.setmulti2(valtypes, argnames, param.values, a_id, - param.marks, scopenum, param_index) + newcallspec.setmulti2( + valtypes, + argnames, + param.values, + a_id, + param.marks, + scopenum, + param_index, + ) newcalls.append(newcallspec) self._calls = newcalls @@ -888,7 +967,9 @@ class Metafunc(fixtures.FuncargnamesCompatAttr): invocation through the ``request.param`` attribute. """ if self.config: - self.config.warn('C1', message=deprecated.METAFUNC_ADD_CALL, fslocation=None) + self.config.warn( + "C1", message=deprecated.METAFUNC_ADD_CALL, fslocation=None + ) assert funcargs is None or isinstance(funcargs, dict) if funcargs is not None: for name in funcargs: @@ -921,9 +1002,11 @@ def _find_parametrized_scope(argnames, arg2fixturedefs, indirect): Related to issue #1832, based on code posted by @Kingdread. """ from _pytest.fixtures import scopes + indirect_as_list = isinstance(indirect, (list, tuple)) - all_arguments_are_fixtures = indirect is True or \ - indirect_as_list and len(indirect) == argnames + all_arguments_are_fixtures = indirect is True or indirect_as_list and len( + indirect + ) == argnames if all_arguments_are_fixtures: fixturedefs = arg2fixturedefs or {} used_scopes = [fixturedef[0].scope for name, fixturedef in fixturedefs.items()] @@ -933,7 +1016,7 @@ def _find_parametrized_scope(argnames, arg2fixturedefs, indirect): if scope in used_scopes: return scope - return 'function' + return "function" def _idval(val, argname, idx, idfn, config=None): @@ -944,15 +1027,19 @@ def _idval(val, argname, idx, idfn, config=None): except Exception: # See issue https://github.com/pytest-dev/pytest/issues/2169 import warnings - msg = "Raised while trying to determine id of parameter %s at position %d." % (argname, idx) - msg += '\nUpdate your code as this will raise an error in pytest-4.0.' + + msg = "Raised while trying to determine id of parameter %s at position %d." % ( + argname, idx + ) + msg += "\nUpdate your code as this will raise an error in pytest-4.0." warnings.warn(msg, DeprecationWarning) if s: return ascii_escaped(s) if config: hook_id = config.hook.pytest_make_parametrize_id( - config=config, val=val, argname=argname) + config=config, val=val, argname=argname + ) if hook_id: return hook_id @@ -964,7 +1051,7 @@ def _idval(val, argname, idx, idfn, config=None): return ascii_escaped(val.pattern) elif enum is not None and isinstance(val, enum.Enum): return str(val) - elif (isclass(val) or isfunction(val)) and hasattr(val, '__name__'): + elif (isclass(val) or isfunction(val)) and hasattr(val, "__name__"): return val.__name__ return str(argname) + str(idx) @@ -973,16 +1060,20 @@ def _idvalset(idx, parameterset, argnames, idfn, ids, config=None): if parameterset.id is not None: return parameterset.id if ids is None or (idx >= len(ids) or ids[idx] is None): - this_id = [_idval(val, argname, idx, idfn, config) - for val, argname in zip(parameterset.values, argnames)] + this_id = [ + _idval(val, argname, idx, idfn, config) + for val, argname in zip(parameterset.values, argnames) + ] return "-".join(this_id) else: return ascii_escaped(ids[idx]) def idmaker(argnames, parametersets, idfn=None, ids=None, config=None): - ids = [_idvalset(valindex, parameterset, argnames, idfn, ids, config) - for valindex, parameterset in enumerate(parametersets)] + ids = [ + _idvalset(valindex, parameterset, argnames, idfn, ids, config) + for valindex, parameterset in enumerate(parametersets) + ] if len(set(ids)) != len(ids): # The ids are not unique duplicates = [testid for testid in ids if ids.count(testid) > 1] @@ -996,11 +1087,13 @@ def idmaker(argnames, parametersets, idfn=None, ids=None, config=None): def show_fixtures_per_test(config): from _pytest.main import wrap_session + return wrap_session(config, _show_fixtures_per_test) def _show_fixtures_per_test(config, session): import _pytest.config + session.perform_collect() curdir = py.path.local() tw = _pytest.config.create_terminal_writer(config) @@ -1016,7 +1109,7 @@ def _show_fixtures_per_test(config, session): return if verbose > 0: bestrel = get_best_relpath(fixture_def.func) - funcargspec = "{0} -- {1}".format(argname, bestrel) + funcargspec = "{} -- {}".format(argname, bestrel) else: funcargspec = argname tw.line(funcargspec, green=True) @@ -1024,7 +1117,7 @@ def _show_fixtures_per_test(config, session): if fixture_doc: write_docstring(tw, fixture_doc) else: - tw.line(' no docstring available', red=True) + tw.line(" no docstring available", red=True) def write_item(item): try: @@ -1036,8 +1129,8 @@ def _show_fixtures_per_test(config, session): # this test item does not use any fixtures return tw.line() - tw.sep('-', 'fixtures used by {0}'.format(item.name)) - tw.sep('-', '({0})'.format(get_best_relpath(item.function))) + tw.sep("-", "fixtures used by {}".format(item.name)) + tw.sep("-", "({})".format(get_best_relpath(item.function))) # dict key not used in loop but needed for sorting for _, fixturedefs in sorted(info.name2fixturedefs.items()): assert fixturedefs is not None @@ -1052,11 +1145,13 @@ def _show_fixtures_per_test(config, session): def showfixtures(config): from _pytest.main import wrap_session + return wrap_session(config, _showfixtures_main) def _showfixtures_main(config, session): import _pytest.config + session.perform_collect() curdir = py.path.local() tw = _pytest.config.create_terminal_writer(config) @@ -1076,10 +1171,15 @@ def _showfixtures_main(config, session): if (fixturedef.argname, loc) in seen: continue seen.add((fixturedef.argname, loc)) - available.append((len(fixturedef.baseid), - fixturedef.func.__module__, - curdir.bestrelpath(loc), - fixturedef.argname, fixturedef)) + available.append( + ( + len(fixturedef.baseid), + fixturedef.func.__module__, + curdir.bestrelpath(loc), + fixturedef.argname, + fixturedef, + ) + ) available.sort() currentmodule = None @@ -1092,7 +1192,7 @@ def _showfixtures_main(config, session): if verbose <= 0 and argname[0] == "_": continue if verbose > 0: - funcargspec = "%s -- %s" % (argname, bestrel,) + funcargspec = "%s -- %s" % (argname, bestrel) else: funcargspec = argname tw.line(funcargspec, green=True) @@ -1101,8 +1201,7 @@ def _showfixtures_main(config, session): if doc: write_docstring(tw, doc) else: - tw.line(" %s: no docstring available" % (loc,), - red=True) + tw.line(" %s: no docstring available" % (loc,), red=True) def write_docstring(tw, doc): @@ -1129,11 +1228,20 @@ class Function(FunctionMixin, nodes.Item, fixtures.FuncargnamesCompatAttr): # disable since functions handle it themselfes _ALLOW_MARKERS = False - def __init__(self, name, parent, args=None, config=None, - callspec=None, callobj=NOTSET, keywords=None, session=None, - fixtureinfo=None, originalname=None): - super(Function, self).__init__(name, parent, config=config, - session=session) + def __init__( + self, + name, + parent, + args=None, + config=None, + callspec=None, + callobj=NOTSET, + keywords=None, + session=None, + fixtureinfo=None, + originalname=None, + ): + super(Function, self).__init__(name, parent, config=config, session=session) self._args = args if callobj is not NOTSET: self.obj = callobj @@ -1155,8 +1263,8 @@ class Function(FunctionMixin, nodes.Item, fixtures.FuncargnamesCompatAttr): if fixtureinfo is None: fixtureinfo = self.session._fixturemanager.getfixtureinfo( - self, self.obj, self.cls, - funcargs=not self._isyieldedfunction()) + self, self.obj, self.cls, funcargs=not self._isyieldedfunction() + ) self._fixtureinfo = fixtureinfo self.fixturenames = fixtureinfo.names_closure self._initrequest() @@ -1170,8 +1278,9 @@ class Function(FunctionMixin, nodes.Item, fixtures.FuncargnamesCompatAttr): def _initrequest(self): self.funcargs = {} if self._isyieldedfunction(): - assert not hasattr(self, "callspec"), ( - "yielded functions (deprecated) cannot have funcargs") + assert not hasattr( + self, "callspec" + ), "yielded functions (deprecated) cannot have funcargs" else: if hasattr(self, "callspec"): callspec = self.callspec @@ -1184,7 +1293,7 @@ class Function(FunctionMixin, nodes.Item, fixtures.FuncargnamesCompatAttr): @property def function(self): "underlying python 'function' object" - return getattr(self.obj, 'im_func', self.obj) + return getattr(self.obj, "im_func", self.obj) def _getobj(self): name = self.name diff --git a/_pytest/python_api.py b/_pytest/python_api.py index 838a4a50c..955fb4165 100644 --- a/_pytest/python_api.py +++ b/_pytest/python_api.py @@ -2,14 +2,19 @@ import math import sys import py -from six import binary_type, text_type from six.moves import zip, filterfalse from more_itertools.more import always_iterable from _pytest.compat import isclass + +from _pytest.compat import Mapping, Sequence +from _pytest.compat import STRING_TYPES + from _pytest.outcomes import fail import _pytest._code +BASE_TYPE = (type, STRING_TYPES) + def _cmp_raises_type_error(self, other): """__cmp__ implementation which raises TypeError. Used @@ -20,7 +25,9 @@ def _cmp_raises_type_error(self, other): other operators at all. """ __tracebackhide__ = True - raise TypeError('Comparison operators other than == and != not supported by approx objects') + raise TypeError( + "Comparison operators other than == and != not supported by approx objects" + ) # builtin pytest.approx helper @@ -47,8 +54,8 @@ class ApproxBase(object): def __eq__(self, actual): return all( - a == self._approx_scalar(x) - for a, x in self._yield_comparisons(actual)) + a == self._approx_scalar(x) for a, x in self._yield_comparisons(actual) + ) __hash__ = None @@ -79,8 +86,9 @@ class ApproxNumpy(ApproxBase): # shape of the array... import numpy as np - return "approx({0!r})".format(list( - self._approx_scalar(x) for x in np.asarray(self.expected))) + return "approx({!r})".format( + list(self._approx_scalar(x) for x in np.asarray(self.expected)) + ) if sys.version_info[0] == 2: __cmp__ = _cmp_raises_type_error @@ -94,7 +102,7 @@ class ApproxNumpy(ApproxBase): try: actual = np.asarray(actual) except: # noqa - raise TypeError("cannot compare '{0}' to numpy.ndarray".format(actual)) + raise TypeError("cannot compare '{}' to numpy.ndarray".format(actual)) if not np.isscalar(actual) and actual.shape != self.expected.shape: return False @@ -123,9 +131,9 @@ class ApproxMapping(ApproxBase): """ def __repr__(self): - return "approx({0!r})".format(dict( - (k, self._approx_scalar(v)) - for k, v in self.expected.items())) + return "approx({!r})".format( + {k: self._approx_scalar(v) for k, v in self.expected.items()} + ) def __eq__(self, actual): if set(actual.keys()) != set(self.expected.keys()): @@ -147,8 +155,9 @@ class ApproxSequence(ApproxBase): seq_type = type(self.expected) if seq_type not in (tuple, list, set): seq_type = list - return "approx({0!r})".format(seq_type( - self._approx_scalar(x) for x in self.expected)) + return "approx({!r})".format( + seq_type(self._approx_scalar(x) for x in self.expected) + ) def __eq__(self, actual): if len(actual) != len(self.expected): @@ -184,14 +193,14 @@ class ApproxScalar(ApproxBase): # If a sensible tolerance can't be calculated, self.tolerance will # raise a ValueError. In this case, display '???'. try: - vetted_tolerance = '{:.1e}'.format(self.tolerance) + vetted_tolerance = "{:.1e}".format(self.tolerance) except ValueError: - vetted_tolerance = '???' + vetted_tolerance = "???" if sys.version_info[0] == 2: - return '{0} +- {1}'.format(self.expected, vetted_tolerance) + return "{} +- {}".format(self.expected, vetted_tolerance) else: - return u'{0} \u00b1 {1}'.format(self.expected, vetted_tolerance) + return u"{} \u00b1 {}".format(self.expected, vetted_tolerance) def __eq__(self, actual): """ @@ -232,6 +241,7 @@ class ApproxScalar(ApproxBase): absolute tolerance or a relative tolerance, depending on what the user specified or which would be larger. """ + def set_default(x, default): return x if x is not None else default @@ -240,7 +250,9 @@ class ApproxScalar(ApproxBase): absolute_tolerance = set_default(self.abs, self.DEFAULT_ABSOLUTE_TOLERANCE) if absolute_tolerance < 0: - raise ValueError("absolute tolerance can't be negative: {}".format(absolute_tolerance)) + raise ValueError( + "absolute tolerance can't be negative: {}".format(absolute_tolerance) + ) if math.isnan(absolute_tolerance): raise ValueError("absolute tolerance can't be NaN.") @@ -255,10 +267,16 @@ class ApproxScalar(ApproxBase): # we've made sure the user didn't ask for an absolute tolerance only, # because we don't want to raise errors about the relative tolerance if # we aren't even going to use it. - relative_tolerance = set_default(self.rel, self.DEFAULT_RELATIVE_TOLERANCE) * abs(self.expected) + relative_tolerance = set_default( + self.rel, self.DEFAULT_RELATIVE_TOLERANCE + ) * abs( + self.expected + ) if relative_tolerance < 0: - raise ValueError("relative tolerance can't be negative: {}".format(absolute_tolerance)) + raise ValueError( + "relative tolerance can't be negative: {}".format(absolute_tolerance) + ) if math.isnan(relative_tolerance): raise ValueError("relative tolerance can't be NaN.") @@ -269,8 +287,8 @@ class ApproxScalar(ApproxBase): class ApproxDecimal(ApproxScalar): from decimal import Decimal - DEFAULT_ABSOLUTE_TOLERANCE = Decimal('1e-12') - DEFAULT_RELATIVE_TOLERANCE = Decimal('1e-6') + DEFAULT_ABSOLUTE_TOLERANCE = Decimal("1e-12") + DEFAULT_RELATIVE_TOLERANCE = Decimal("1e-6") def approx(expected, rel=None, abs=None, nan_ok=False): @@ -426,8 +444,6 @@ def approx(expected, rel=None, abs=None, nan_ok=False): __ https://docs.python.org/3/reference/datamodel.html#object.__ge__ """ - from _pytest.compat import Mapping, Sequence - from _pytest.compat import STRING_TYPES as String from decimal import Decimal # Delegate the comparison to a class that knows how to deal with the type @@ -448,7 +464,7 @@ def approx(expected, rel=None, abs=None, nan_ok=False): cls = ApproxNumpy elif isinstance(expected, Mapping): cls = ApproxMapping - elif isinstance(expected, Sequence) and not isinstance(expected, String): + elif isinstance(expected, Sequence) and not isinstance(expected, STRING_TYPES): cls = ApproxSequence elif isinstance(expected, Decimal): cls = ApproxDecimal @@ -466,9 +482,10 @@ def _is_numpy_array(obj): import inspect for cls in inspect.getmro(type(obj)): - if cls.__module__ == 'numpy': + if cls.__module__ == "numpy": try: import numpy as np + return isinstance(obj, np.ndarray) except ImportError: pass @@ -478,6 +495,7 @@ def _is_numpy_array(obj): # builtin pytest.raises helper + def raises(expected_exception, *args, **kwargs): """ Assert that a code block/function call raises ``expected_exception`` @@ -585,13 +603,14 @@ def raises(expected_exception, *args, **kwargs): """ __tracebackhide__ = True - base_type = (type, text_type, binary_type) - for exc in filterfalse(isclass, always_iterable(expected_exception, base_type)): - msg = ("exceptions must be old-style classes or" - " derived from BaseException, not %s") + for exc in filterfalse(isclass, always_iterable(expected_exception, BASE_TYPE)): + msg = ( + "exceptions must be old-style classes or" + " derived from BaseException, not %s" + ) raise TypeError(msg % type(exc)) - message = "DID NOT RAISE {0}".format(expected_exception) + message = "DID NOT RAISE {}".format(expected_exception) match_expr = None if not args: @@ -600,8 +619,8 @@ def raises(expected_exception, *args, **kwargs): if "match" in kwargs: match_expr = kwargs.pop("match") if kwargs: - msg = 'Unexpected keyword arguments passed to pytest.raises: ' - msg += ', '.join(kwargs.keys()) + msg = "Unexpected keyword arguments passed to pytest.raises: " + msg += ", ".join(kwargs.keys()) raise TypeError(msg) return RaisesContext(expected_exception, message, match_expr) elif isinstance(args[0], str): @@ -631,6 +650,7 @@ raises.Exception = fail.Exception class RaisesContext(object): + def __init__(self, expected_exception, message, match_expr): self.expected_exception = expected_exception self.message = message diff --git a/_pytest/recwarn.py b/_pytest/recwarn.py index ab0f79c75..565af8a3f 100644 --- a/_pytest/recwarn.py +++ b/_pytest/recwarn.py @@ -23,7 +23,7 @@ def recwarn(): """ wrec = WarningsRecorder() with wrec: - warnings.simplefilter('default') + warnings.simplefilter("default") yield wrec @@ -76,7 +76,9 @@ class _DeprecatedCallContext(object): if exc_type is None: deprecation_categories = (DeprecationWarning, PendingDeprecationWarning) - if not any(issubclass(c, deprecation_categories) for c in self._captured_categories): + if not any( + issubclass(c, deprecation_categories) for c in self._captured_categories + ): __tracebackhide__ = True msg = "Did not produce DeprecationWarning or PendingDeprecationWarning" raise AssertionError(msg) @@ -180,7 +182,7 @@ class WarningsRecorder(warnings.catch_warnings): __tracebackhide__ = True raise RuntimeError("Cannot enter %r twice" % self) self._list = super(WarningsRecorder, self).__enter__() - warnings.simplefilter('always') + warnings.simplefilter("always") return self def __exit__(self, *exc_info): @@ -191,11 +193,13 @@ class WarningsRecorder(warnings.catch_warnings): class WarningsChecker(WarningsRecorder): + def __init__(self, expected_warning=None, match_expr=None): super(WarningsChecker, self).__init__() - msg = ("exceptions must be old-style classes or " - "derived from Warning, not %s") + msg = ( + "exceptions must be old-style classes or " "derived from Warning, not %s" + ) if isinstance(expected_warning, tuple): for exc in expected_warning: if not inspect.isclass(exc): @@ -214,20 +218,26 @@ class WarningsChecker(WarningsRecorder): # only check if we're not currently handling an exception if all(a is None for a in exc_info): if self.expected_warning is not None: - if not any(issubclass(r.category, self.expected_warning) - for r in self): + if not any(issubclass(r.category, self.expected_warning) for r in self): __tracebackhide__ = True - fail("DID NOT WARN. No warnings of type {0} was emitted. " - "The list of emitted warnings is: {1}.".format( - self.expected_warning, - [each.message for each in self])) + fail( + "DID NOT WARN. No warnings of type {} was emitted. " + "The list of emitted warnings is: {}.".format( + self.expected_warning, [each.message for each in self] + ) + ) elif self.match_expr is not None: for r in self: if issubclass(r.category, self.expected_warning): if re.compile(self.match_expr).search(str(r.message)): break else: - fail("DID NOT WARN. No warnings of type {0} matching" - " ('{1}') was emitted. The list of emitted warnings" - " is: {2}.".format(self.expected_warning, self.match_expr, - [each.message for each in self])) + fail( + "DID NOT WARN. No warnings of type {} matching" + " ('{}') was emitted. The list of emitted warnings" + " is: {}.".format( + self.expected_warning, + self.match_expr, + [each.message for each in self], + ) + ) diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py index 9f9c2d1f6..8f300c983 100644 --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -9,28 +9,34 @@ import os def pytest_addoption(parser): group = parser.getgroup("terminal reporting", "resultlog plugin options") - group.addoption('--resultlog', '--result-log', action="store", - metavar="path", default=None, - help="DEPRECATED path for machine-readable result log.") + group.addoption( + "--resultlog", + "--result-log", + action="store", + metavar="path", + default=None, + help="DEPRECATED path for machine-readable result log.", + ) def pytest_configure(config): resultlog = config.option.resultlog # prevent opening resultlog on slave nodes (xdist) - if resultlog and not hasattr(config, 'slaveinput'): + if resultlog and not hasattr(config, "slaveinput"): dirname = os.path.dirname(os.path.abspath(resultlog)) if not os.path.isdir(dirname): os.makedirs(dirname) - logfile = open(resultlog, 'w', 1) # line buffered + logfile = open(resultlog, "w", 1) # line buffered config._resultlog = ResultLog(config, logfile) config.pluginmanager.register(config._resultlog) from _pytest.deprecated import RESULT_LOG - config.warn('C1', RESULT_LOG) + + config.warn("C1", RESULT_LOG) def pytest_unconfigure(config): - resultlog = getattr(config, '_resultlog', None) + resultlog = getattr(config, "_resultlog", None) if resultlog: resultlog.logfile.close() del config._resultlog @@ -46,22 +52,23 @@ def generic_path(item): newfspath = node.fspath if newfspath == fspath: if fspart: - gpath.append(':') + gpath.append(":") fspart = False else: - gpath.append('.') + gpath.append(".") else: - gpath.append('/') + gpath.append("/") fspart = True name = node.name - if name[0] in '([': + if name[0] in "([": gpath.pop() gpath.append(name) fspath = newfspath - return ''.join(gpath) + return "".join(gpath) class ResultLog(object): + def __init__(self, config, logfile): self.config = config self.logfile = logfile # preferably line buffered @@ -72,7 +79,7 @@ class ResultLog(object): print(" %s" % line, file=self.logfile) def log_outcome(self, report, lettercode, longrepr): - testpath = getattr(report, 'nodeid', None) + testpath = getattr(report, "nodeid", None) if testpath is None: testpath = report.fspath self.write_log_entry(testpath, lettercode, longrepr) @@ -82,10 +89,10 @@ class ResultLog(object): return res = self.config.hook.pytest_report_teststatus(report=report) code = res[1] - if code == 'x': + if code == "x": longrepr = str(report.longrepr) - elif code == 'X': - longrepr = '' + elif code == "X": + longrepr = "" elif report.passed: longrepr = "" elif report.failed: @@ -106,8 +113,8 @@ class ResultLog(object): self.log_outcome(report, code, longrepr) def pytest_internalerror(self, excrepr): - reprcrash = getattr(excrepr, 'reprcrash', None) + reprcrash = getattr(excrepr, "reprcrash", None) path = getattr(reprcrash, "path", None) if path is None: path = "cwd:%s" % py.path.local() - self.write_log_entry(path, '!', str(excrepr)) + self.write_log_entry(path, "!", str(excrepr)) diff --git a/_pytest/runner.py b/_pytest/runner.py index 6df558a7f..ef1a0e694 100644 --- a/_pytest/runner.py +++ b/_pytest/runner.py @@ -16,9 +16,14 @@ from _pytest.outcomes import skip, Skipped, TEST_OUTCOME def pytest_addoption(parser): group = parser.getgroup("terminal reporting", "reporting", after="general") - group.addoption('--durations', - action="store", type=int, default=None, metavar="N", - help="show N slowest setup/test durations (N=0 for all)."), + group.addoption( + "--durations", + action="store", + type=int, + default=None, + metavar="N", + help="show N slowest setup/test durations (N=0 for all).", + ), def pytest_terminal_summary(terminalreporter): @@ -29,7 +34,7 @@ def pytest_terminal_summary(terminalreporter): dlist = [] for replist in tr.stats.values(): for rep in replist: - if hasattr(rep, 'duration'): + if hasattr(rep, "duration"): dlist.append(rep) if not dlist: return @@ -43,8 +48,7 @@ def pytest_terminal_summary(terminalreporter): for rep in dlist: nodeid = rep.nodeid.replace("::()::", "::") - tr.write_line("%02.2fs %-8s %s" % - (rep.duration, rep.when, nodeid)) + tr.write_line("%02.2fs %-8s %s" % (rep.duration, rep.when, nodeid)) def pytest_sessionstart(session): @@ -56,13 +60,9 @@ def pytest_sessionfinish(session): def pytest_runtest_protocol(item, nextitem): - item.ihook.pytest_runtest_logstart( - nodeid=item.nodeid, location=item.location, - ) + item.ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location) runtestprotocol(item, nextitem=nextitem) - item.ihook.pytest_runtest_logfinish( - nodeid=item.nodeid, location=item.location, - ) + item.ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location) return True @@ -77,8 +77,7 @@ def runtestprotocol(item, log=True, nextitem=None): show_test_item(item) if not item.config.option.setuponly: reports.append(call_and_report(item, "call", log)) - reports.append(call_and_report(item, "teardown", log, - nextitem=nextitem)) + reports.append(call_and_report(item, "teardown", log, nextitem=nextitem)) # after all teardown hooks have been called # want funcargs and request info to go away if hasrequest: @@ -91,20 +90,20 @@ def show_test_item(item): """Show test function, parameters and the fixtures of the test item.""" tw = item.config.get_terminal_writer() tw.line() - tw.write(' ' * 8) + tw.write(" " * 8) tw.write(item._nodeid) used_fixtures = sorted(item._fixtureinfo.name2fixturedefs.keys()) if used_fixtures: - tw.write(' (fixtures used: {0})'.format(', '.join(used_fixtures))) + tw.write(" (fixtures used: {})".format(", ".join(used_fixtures))) def pytest_runtest_setup(item): - _update_current_test_var(item, 'setup') + _update_current_test_var(item, "setup") item.session._setupstate.prepare(item) def pytest_runtest_call(item): - _update_current_test_var(item, 'call') + _update_current_test_var(item, "call") sys.last_type, sys.last_value, sys.last_traceback = (None, None, None) try: item.runtest() @@ -120,7 +119,7 @@ def pytest_runtest_call(item): def pytest_runtest_teardown(item, nextitem): - _update_current_test_var(item, 'teardown') + _update_current_test_var(item, "teardown") item.session._setupstate.teardown_exact(item, nextitem) _update_current_test_var(item, None) @@ -131,11 +130,11 @@ def _update_current_test_var(item, when): If ``when`` is None, delete PYTEST_CURRENT_TEST from the environment. """ - var_name = 'PYTEST_CURRENT_TEST' + var_name = "PYTEST_CURRENT_TEST" if when: - value = '{0} ({1})'.format(item.nodeid, when) + value = "{} ({})".format(item.nodeid, when) # don't allow null bytes on environment variables (see #2644, #2957) - value = value.replace('\x00', '(null)') + value = value.replace("\x00", "(null)") os.environ[var_name] = value else: os.environ.pop(var_name) @@ -155,6 +154,7 @@ def pytest_report_teststatus(report): # # Implementation + def call_and_report(item, when, log=True, **kwds): call = call_runtest_hook(item, when, **kwds) hook = item.ihook @@ -168,16 +168,20 @@ def call_and_report(item, when, log=True, **kwds): def check_interactive_exception(call, report): return call.excinfo and not ( - hasattr(report, "wasxfail") or - call.excinfo.errisinstance(skip.Exception) or - call.excinfo.errisinstance(bdb.BdbQuit)) + hasattr(report, "wasxfail") + or call.excinfo.errisinstance(skip.Exception) + or call.excinfo.errisinstance(bdb.BdbQuit) + ) def call_runtest_hook(item, when, **kwds): hookname = "pytest_runtest_" + when ihook = getattr(item.ihook, hookname) - return CallInfo(lambda: ihook(item=item, **kwds), when=when, - treat_keyboard_interrupt_as_exception=item.config.getvalue("usepdb")) + return CallInfo( + lambda: ihook(item=item, **kwds), + when=when, + treat_keyboard_interrupt_as_exception=item.config.getvalue("usepdb"), + ) class CallInfo(object): @@ -215,9 +219,10 @@ def getslaveinfoline(node): return node._slaveinfocache except AttributeError: d = node.slaveinfo - ver = "%s.%s.%s" % d['version_info'][:3] + ver = "%s.%s.%s" % d["version_info"][:3] node._slaveinfocache = s = "[%s] %s -- Python %s %s" % ( - d['id'], d['sysplatform'], ver, d['executable']) + d["id"], d["sysplatform"], ver, d["executable"] + ) return s @@ -227,14 +232,14 @@ class BaseReport(object): self.__dict__.update(kw) def toterminal(self, out): - if hasattr(self, 'node'): + if hasattr(self, "node"): out.line(getslaveinfoline(self.node)) longrepr = self.longrepr if longrepr is None: return - if hasattr(longrepr, 'toterminal'): + if hasattr(longrepr, "toterminal"): longrepr.toterminal(out) else: try: @@ -267,7 +272,9 @@ class BaseReport(object): .. versionadded:: 3.5 """ - return '\n'.join(content for (prefix, content) in self.get_sections('Captured log')) + return "\n".join( + content for (prefix, content) in self.get_sections("Captured log") + ) @property def capstdout(self): @@ -275,7 +282,9 @@ class BaseReport(object): .. versionadded:: 3.0 """ - return ''.join(content for (prefix, content) in self.get_sections('Captured stdout')) + return "".join( + content for (prefix, content) in self.get_sections("Captured stdout") + ) @property def capstderr(self): @@ -283,7 +292,9 @@ class BaseReport(object): .. versionadded:: 3.0 """ - return ''.join(content for (prefix, content) in self.get_sections('Captured stderr')) + return "".join( + content for (prefix, content) in self.get_sections("Captured stderr") + ) passed = property(lambda x: x.outcome == "passed") failed = property(lambda x: x.outcome == "failed") @@ -297,7 +308,7 @@ class BaseReport(object): def pytest_runtest_makereport(item, call): when = call.when duration = call.stop - call.start - keywords = dict([(x, 1) for x in item.keywords]) + keywords = {x: 1 for x in item.keywords} excinfo = call.excinfo sections = [] if not call.excinfo: @@ -316,13 +327,22 @@ def pytest_runtest_makereport(item, call): if call.when == "call": longrepr = item.repr_failure(excinfo) else: # exception in setup or teardown - longrepr = item._repr_failure_py(excinfo, - style=item.config.option.tbstyle) + longrepr = item._repr_failure_py( + excinfo, style=item.config.option.tbstyle + ) for rwhen, key, content in item._report_sections: sections.append(("Captured %s %s" % (key, rwhen), content)) - return TestReport(item.nodeid, item.location, - keywords, outcome, longrepr, when, - sections, duration, user_properties=item.user_properties) + return TestReport( + item.nodeid, + item.location, + keywords, + outcome, + longrepr, + when, + sections, + duration, + user_properties=item.user_properties, + ) class TestReport(BaseReport): @@ -330,8 +350,19 @@ class TestReport(BaseReport): they fail). """ - def __init__(self, nodeid, location, keywords, outcome, - longrepr, when, sections=(), duration=0, user_properties=(), **extra): + def __init__( + self, + nodeid, + location, + keywords, + outcome, + longrepr, + when, + sections=(), + duration=0, + user_properties=(), + **extra + ): #: normalized collection node id self.nodeid = nodeid @@ -370,7 +401,8 @@ class TestReport(BaseReport): def __repr__(self): return "" % ( - self.nodeid, self.when, self.outcome) + self.nodeid, self.when, self.outcome + ) class TeardownErrorReport(BaseReport): @@ -384,14 +416,13 @@ class TeardownErrorReport(BaseReport): def pytest_make_collect_report(collector): - call = CallInfo( - lambda: list(collector.collect()), - 'collect') + call = CallInfo(lambda: list(collector.collect()), "collect") longrepr = None if not call.excinfo: outcome = "passed" else: from _pytest import nose + skip_exceptions = (Skipped,) + nose.get_skip_exceptions() if call.excinfo.errisinstance(skip_exceptions): outcome = "skipped" @@ -403,15 +434,16 @@ def pytest_make_collect_report(collector): if not hasattr(errorinfo, "toterminal"): errorinfo = CollectErrorRepr(errorinfo) longrepr = errorinfo - rep = CollectReport(collector.nodeid, outcome, longrepr, - getattr(call, 'result', None)) + rep = CollectReport( + collector.nodeid, outcome, longrepr, getattr(call, "result", None) + ) rep.call = call # see collect_one_node return rep class CollectReport(BaseReport): - def __init__(self, nodeid, outcome, longrepr, result, - sections=(), **extra): + + def __init__(self, nodeid, outcome, longrepr, result, sections=(), **extra): self.nodeid = nodeid self.outcome = outcome self.longrepr = longrepr @@ -425,10 +457,12 @@ class CollectReport(BaseReport): def __repr__(self): return "" % ( - self.nodeid, len(self.result), self.outcome) + self.nodeid, len(self.result), self.outcome + ) class CollectErrorRepr(TerminalRepr): + def __init__(self, msg): self.longrepr = msg @@ -477,8 +511,9 @@ class SetupState(object): if hasattr(colitem, "teardown"): colitem.teardown() for colitem in self._finalizers: - assert colitem is None or colitem in self.stack \ - or isinstance(colitem, tuple) + assert ( + colitem is None or colitem in self.stack or isinstance(colitem, tuple) + ) def teardown_all(self): while self.stack: @@ -505,7 +540,7 @@ class SetupState(object): # check if the last collection node has raised an error for col in self.stack: - if hasattr(col, '_prepare_exc'): + if hasattr(col, "_prepare_exc"): py.builtin._reraise(*col._prepare_exc) for col in needed_collectors[len(self.stack):]: self.stack.append(col) diff --git a/_pytest/setuponly.py b/_pytest/setuponly.py index a1c7457d7..81240d9d0 100644 --- a/_pytest/setuponly.py +++ b/_pytest/setuponly.py @@ -6,10 +6,18 @@ import sys def pytest_addoption(parser): group = parser.getgroup("debugconfig") - group.addoption('--setuponly', '--setup-only', action="store_true", - help="only setup fixtures, do not execute tests.") - group.addoption('--setupshow', '--setup-show', action="store_true", - help="show setup of fixtures while executing tests.") + group.addoption( + "--setuponly", + "--setup-only", + action="store_true", + help="only setup fixtures, do not execute tests.", + ) + group.addoption( + "--setupshow", + "--setup-show", + action="store_true", + help="show setup of fixtures while executing tests.", + ) @pytest.hookimpl(hookwrapper=True) @@ -17,50 +25,52 @@ def pytest_fixture_setup(fixturedef, request): yield config = request.config if config.option.setupshow: - if hasattr(request, 'param'): + if hasattr(request, "param"): # Save the fixture parameter so ._show_fixture_action() can # display it now and during the teardown (in .finish()). if fixturedef.ids: if callable(fixturedef.ids): fixturedef.cached_param = fixturedef.ids(request.param) else: - fixturedef.cached_param = fixturedef.ids[ - request.param_index] + fixturedef.cached_param = fixturedef.ids[request.param_index] else: fixturedef.cached_param = request.param - _show_fixture_action(fixturedef, 'SETUP') + _show_fixture_action(fixturedef, "SETUP") def pytest_fixture_post_finalizer(fixturedef): if hasattr(fixturedef, "cached_result"): config = fixturedef._fixturemanager.config if config.option.setupshow: - _show_fixture_action(fixturedef, 'TEARDOWN') + _show_fixture_action(fixturedef, "TEARDOWN") if hasattr(fixturedef, "cached_param"): del fixturedef.cached_param def _show_fixture_action(fixturedef, msg): config = fixturedef._fixturemanager.config - capman = config.pluginmanager.getplugin('capturemanager') + capman = config.pluginmanager.getplugin("capturemanager") if capman: out, err = capman.suspend_global_capture() tw = config.get_terminal_writer() tw.line() - tw.write(' ' * 2 * fixturedef.scopenum) - tw.write('{step} {scope} {fixture}'.format( - step=msg.ljust(8), # align the output to TEARDOWN - scope=fixturedef.scope[0].upper(), - fixture=fixturedef.argname)) + tw.write(" " * 2 * fixturedef.scopenum) + tw.write( + "{step} {scope} {fixture}".format( + step=msg.ljust(8), # align the output to TEARDOWN + scope=fixturedef.scope[0].upper(), + fixture=fixturedef.argname, + ) + ) - if msg == 'SETUP': - deps = sorted(arg for arg in fixturedef.argnames if arg != 'request') + if msg == "SETUP": + deps = sorted(arg for arg in fixturedef.argnames if arg != "request") if deps: - tw.write(' (fixtures used: {0})'.format(', '.join(deps))) + tw.write(" (fixtures used: {})".format(", ".join(deps))) - if hasattr(fixturedef, 'cached_param'): - tw.write('[{0}]'.format(fixturedef.cached_param)) + if hasattr(fixturedef, "cached_param"): + tw.write("[{}]".format(fixturedef.cached_param)) if capman: capman.resume_global_capture() diff --git a/_pytest/setupplan.py b/_pytest/setupplan.py index e11bd4069..23f4f97e6 100644 --- a/_pytest/setupplan.py +++ b/_pytest/setupplan.py @@ -5,9 +5,13 @@ import pytest def pytest_addoption(parser): group = parser.getgroup("debugconfig") - group.addoption('--setupplan', '--setup-plan', action="store_true", - help="show what fixtures and tests would be executed but " - "don't execute anything.") + group.addoption( + "--setupplan", + "--setup-plan", + action="store_true", + help="show what fixtures and tests would be executed but " + "don't execute anything.", + ) @pytest.hookimpl(tryfirst=True) diff --git a/_pytest/skipping.py b/_pytest/skipping.py index 36eb4a337..a348d5484 100644 --- a/_pytest/skipping.py +++ b/_pytest/skipping.py @@ -8,21 +8,28 @@ from _pytest.outcomes import fail, skip, xfail def pytest_addoption(parser): group = parser.getgroup("general") - group.addoption('--runxfail', - action="store_true", dest="runxfail", default=False, - help="run tests even if they are marked xfail") + group.addoption( + "--runxfail", + action="store_true", + dest="runxfail", + default=False, + help="run tests even if they are marked xfail", + ) - parser.addini("xfail_strict", - "default for the strict parameter of xfail " - "markers when not given explicitly (default: False)", - default=False, - type="bool") + parser.addini( + "xfail_strict", + "default for the strict parameter of xfail " + "markers when not given explicitly (default: False)", + default=False, + type="bool", + ) def pytest_configure(config): if config.option.runxfail: # yay a hack import pytest + old = pytest.xfail config._cleanup.append(lambda: setattr(pytest, "xfail", old)) @@ -32,48 +39,51 @@ def pytest_configure(config): nop.Exception = xfail.Exception setattr(pytest, "xfail", nop) - config.addinivalue_line("markers", - "skip(reason=None): skip the given test function with an optional reason. " - "Example: skip(reason=\"no way of currently testing this\") skips the " - "test." - ) - config.addinivalue_line("markers", - "skipif(condition): skip the given test function if eval(condition) " - "results in a True value. Evaluation happens within the " - "module global context. Example: skipif('sys.platform == \"win32\"') " - "skips the test if we are on the win32 platform. see " - "http://pytest.org/latest/skipping.html" - ) - config.addinivalue_line("markers", - "xfail(condition, reason=None, run=True, raises=None, strict=False): " - "mark the test function as an expected failure if eval(condition) " - "has a True value. Optionally specify a reason for better reporting " - "and run=False if you don't even want to execute the test function. " - "If only specific exception(s) are expected, you can list them in " - "raises, and if the test fails in other ways, it will be reported as " - "a true failure. See http://pytest.org/latest/skipping.html" - ) + config.addinivalue_line( + "markers", + "skip(reason=None): skip the given test function with an optional reason. " + 'Example: skip(reason="no way of currently testing this") skips the ' + "test.", + ) + config.addinivalue_line( + "markers", + "skipif(condition): skip the given test function if eval(condition) " + "results in a True value. Evaluation happens within the " + "module global context. Example: skipif('sys.platform == \"win32\"') " + "skips the test if we are on the win32 platform. see " + "http://pytest.org/latest/skipping.html", + ) + config.addinivalue_line( + "markers", + "xfail(condition, reason=None, run=True, raises=None, strict=False): " + "mark the test function as an expected failure if eval(condition) " + "has a True value. Optionally specify a reason for better reporting " + "and run=False if you don't even want to execute the test function. " + "If only specific exception(s) are expected, you can list them in " + "raises, and if the test fails in other ways, it will be reported as " + "a true failure. See http://pytest.org/latest/skipping.html", + ) @hookimpl(tryfirst=True) def pytest_runtest_setup(item): # Check if skip or skipif are specified as pytest marks item._skipped_by_mark = False - eval_skipif = MarkEvaluator(item, 'skipif') + eval_skipif = MarkEvaluator(item, "skipif") if eval_skipif.istrue(): item._skipped_by_mark = True skip(eval_skipif.getexplanation()) - for skip_info in item.iter_markers(name='skip'): + for skip_info in item.iter_markers(name="skip"): item._skipped_by_mark = True - if 'reason' in skip_info.kwargs: - skip(skip_info.kwargs['reason']) + if "reason" in skip_info.kwargs: + skip(skip_info.kwargs["reason"]) elif skip_info.args: skip(skip_info.args[0]) else: skip("unconditional skip") - item._evalxfail = MarkEvaluator(item, 'xfail') + item._evalxfail = MarkEvaluator(item, "xfail") check_xfail_no_run(item) @@ -91,7 +101,7 @@ def check_xfail_no_run(item): if not item.config.option.runxfail: evalxfail = item._evalxfail if evalxfail.istrue(): - if not evalxfail.get('run', True): + if not evalxfail.get("run", True): xfail("[NOTRUN] " + evalxfail.getexplanation()) @@ -99,24 +109,25 @@ def check_strict_xfail(pyfuncitem): """check xfail(strict=True) for the given PASSING test""" evalxfail = pyfuncitem._evalxfail if evalxfail.istrue(): - strict_default = pyfuncitem.config.getini('xfail_strict') - is_strict_xfail = evalxfail.get('strict', strict_default) + strict_default = pyfuncitem.config.getini("xfail_strict") + is_strict_xfail = evalxfail.get("strict", strict_default) if is_strict_xfail: del pyfuncitem._evalxfail explanation = evalxfail.getexplanation() - fail('[XPASS(strict)] ' + explanation, pytrace=False) + fail("[XPASS(strict)] " + explanation, pytrace=False) @hookimpl(hookwrapper=True) def pytest_runtest_makereport(item, call): outcome = yield rep = outcome.get_result() - evalxfail = getattr(item, '_evalxfail', None) + evalxfail = getattr(item, "_evalxfail", None) # unitttest special case, see setting of _unexpectedsuccess - if hasattr(item, '_unexpectedsuccess') and rep.when == "call": + if hasattr(item, "_unexpectedsuccess") and rep.when == "call": from _pytest.compat import _is_unittest_unexpected_success_a_failure + if item._unexpectedsuccess: - rep.longrepr = "Unexpected success: {0}".format(item._unexpectedsuccess) + rep.longrepr = "Unexpected success: {}".format(item._unexpectedsuccess) else: rep.longrepr = "Unexpected success" if _is_unittest_unexpected_success_a_failure(): @@ -129,8 +140,7 @@ def pytest_runtest_makereport(item, call): elif call.excinfo and call.excinfo.errisinstance(xfail.Exception): rep.wasxfail = "reason: " + call.excinfo.value.msg rep.outcome = "skipped" - elif evalxfail and not rep.skipped and evalxfail.wasvalid() and \ - evalxfail.istrue(): + elif evalxfail and not rep.skipped and evalxfail.wasvalid() and evalxfail.istrue(): if call.excinfo: if evalxfail.invalidraise(call.excinfo.value): rep.outcome = "failed" @@ -138,16 +148,18 @@ def pytest_runtest_makereport(item, call): rep.outcome = "skipped" rep.wasxfail = evalxfail.getexplanation() elif call.when == "call": - strict_default = item.config.getini('xfail_strict') - is_strict_xfail = evalxfail.get('strict', strict_default) + strict_default = item.config.getini("xfail_strict") + is_strict_xfail = evalxfail.get("strict", strict_default) explanation = evalxfail.getexplanation() if is_strict_xfail: rep.outcome = "failed" - rep.longrepr = "[XPASS(strict)] {0}".format(explanation) + rep.longrepr = "[XPASS(strict)] {}".format(explanation) else: rep.outcome = "passed" rep.wasxfail = explanation - elif getattr(item, '_skipped_by_mark', False) and rep.skipped and type(rep.longrepr) is tuple: + elif getattr(item, "_skipped_by_mark", False) and rep.skipped and type( + rep.longrepr + ) is tuple: # skipped by mark.skipif; change the location of the failure # to point to the item definition, otherwise it will display # the location of where the skip exception was raised within pytest @@ -164,7 +176,7 @@ def pytest_report_teststatus(report): if report.skipped: return "xfailed", "x", "xfail" elif report.passed: - return "xpassed", "X", ("XPASS", {'yellow': True}) + return "xpassed", "X", ("XPASS", {"yellow": True}) # called by the terminalreporter instance/plugin @@ -224,12 +236,12 @@ def folded_skips(skipped): for event in skipped: key = event.longrepr assert len(key) == 3, (event, key) - keywords = getattr(event, 'keywords', {}) + keywords = getattr(event, "keywords", {}) # folding reports with global pytestmark variable # this is workaround, because for now we cannot identify the scope of a skip marker # TODO: revisit after marks scope would be fixed - when = getattr(event, 'when', None) - if when == 'setup' and 'skip' in keywords and 'pytestmark' not in keywords: + when = getattr(event, "when", None) + if when == "setup" and "skip" in keywords and "pytestmark" not in keywords: key = (key[0], None, key[2]) d.setdefault(key, []).append(event) values = [] @@ -240,7 +252,7 @@ def folded_skips(skipped): def show_skipped(terminalreporter, lines): tr = terminalreporter - skipped = tr.stats.get('skipped', []) + skipped = tr.stats.get("skipped", []) if skipped: # if not tr.hasopt('skipped'): # tr.write_line( @@ -255,15 +267,14 @@ def show_skipped(terminalreporter, lines): reason = reason[9:] if lineno is not None: lines.append( - "SKIP [%d] %s:%d: %s" % - (num, fspath, lineno + 1, reason)) + "SKIP [%d] %s:%d: %s" % (num, fspath, lineno + 1, reason) + ) else: - lines.append( - "SKIP [%d] %s: %s" % - (num, fspath, reason)) + lines.append("SKIP [%d] %s: %s" % (num, fspath, reason)) def shower(stat, format): + def show_(terminalreporter, lines): return show_simple(terminalreporter, lines, stat, format) @@ -271,13 +282,12 @@ def shower(stat, format): REPORTCHAR_ACTIONS = { - 'x': show_xfailed, - 'X': show_xpassed, - 'f': shower('failed', "FAIL %s"), - 'F': shower('failed', "FAIL %s"), - 's': show_skipped, - 'S': show_skipped, - 'p': shower('passed', "PASSED %s"), - 'E': shower('error', "ERROR %s") - + "x": show_xfailed, + "X": show_xpassed, + "f": shower("failed", "FAIL %s"), + "F": shower("failed", "FAIL %s"), + "s": show_skipped, + "S": show_skipped, + "p": shower("passed", "PASSED %s"), + "E": shower("error", "ERROR %s"), } diff --git a/_pytest/terminal.py b/_pytest/terminal.py index f8ad33c10..94b416556 100644 --- a/_pytest/terminal.py +++ b/_pytest/terminal.py @@ -16,8 +16,13 @@ from more_itertools import collapse import pytest from _pytest import nodes -from _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, \ - EXIT_USAGEERROR, EXIT_NOTESTSCOLLECTED +from _pytest.main import ( + EXIT_OK, + EXIT_TESTSFAILED, + EXIT_INTERRUPTED, + EXIT_USAGEERROR, + EXIT_NOTESTSCOLLECTED, +) import argparse @@ -30,93 +35,140 @@ class MoreQuietAction(argparse.Action): used to unify verbosity handling """ - def __init__(self, - option_strings, - dest, - default=None, - required=False, - help=None): + + def __init__(self, option_strings, dest, default=None, required=False, help=None): super(MoreQuietAction, self).__init__( option_strings=option_strings, dest=dest, nargs=0, default=default, required=required, - help=help) + help=help, + ) def __call__(self, parser, namespace, values, option_string=None): new_count = getattr(namespace, self.dest, 0) - 1 setattr(namespace, self.dest, new_count) # todo Deprecate config.quiet - namespace.quiet = getattr(namespace, 'quiet', 0) + 1 + namespace.quiet = getattr(namespace, "quiet", 0) + 1 def pytest_addoption(parser): group = parser.getgroup("terminal reporting", "reporting", after="general") - group._addoption('-v', '--verbose', action="count", default=0, - dest="verbose", help="increase verbosity."), - group._addoption('-q', '--quiet', action=MoreQuietAction, default=0, - dest="verbose", help="decrease verbosity."), - group._addoption("--verbosity", dest='verbose', type=int, default=0, - help="set verbosity") - group._addoption('-r', - action="store", dest="reportchars", default='', metavar="chars", - help="show extra test summary info as specified by chars (f)ailed, " - "(E)error, (s)skipped, (x)failed, (X)passed, " - "(p)passed, (P)passed with output, (a)all except pP. " - "Warnings are displayed at all times except when " - "--disable-warnings is set") - group._addoption('--disable-warnings', '--disable-pytest-warnings', default=False, - dest='disable_warnings', action='store_true', - help='disable warnings summary') - group._addoption('-l', '--showlocals', - action="store_true", dest="showlocals", default=False, - help="show locals in tracebacks (disabled by default).") - group._addoption('--tb', metavar="style", - action="store", dest="tbstyle", default='auto', - choices=['auto', 'long', 'short', 'no', 'line', 'native'], - help="traceback print mode (auto/long/short/line/native/no).") - group._addoption('--show-capture', - action="store", dest="showcapture", - choices=['no', 'stdout', 'stderr', 'log', 'all'], default='all', - help="Controls how captured stdout/stderr/log is shown on failed tests. " - "Default is 'all'.") - group._addoption('--fulltrace', '--full-trace', - action="store_true", default=False, - help="don't cut any tracebacks (default is to cut).") - group._addoption('--color', metavar="color", - action="store", dest="color", default='auto', - choices=['yes', 'no', 'auto'], - help="color terminal output (yes/no/auto).") + group._addoption( + "-v", + "--verbose", + action="count", + default=0, + dest="verbose", + help="increase verbosity.", + ), + group._addoption( + "-q", + "--quiet", + action=MoreQuietAction, + default=0, + dest="verbose", + help="decrease verbosity.", + ), + group._addoption( + "--verbosity", dest="verbose", type=int, default=0, help="set verbosity" + ) + group._addoption( + "-r", + action="store", + dest="reportchars", + default="", + metavar="chars", + help="show extra test summary info as specified by chars (f)ailed, " + "(E)error, (s)skipped, (x)failed, (X)passed, " + "(p)passed, (P)passed with output, (a)all except pP. " + "Warnings are displayed at all times except when " + "--disable-warnings is set", + ) + group._addoption( + "--disable-warnings", + "--disable-pytest-warnings", + default=False, + dest="disable_warnings", + action="store_true", + help="disable warnings summary", + ) + group._addoption( + "-l", + "--showlocals", + action="store_true", + dest="showlocals", + default=False, + help="show locals in tracebacks (disabled by default).", + ) + group._addoption( + "--tb", + metavar="style", + action="store", + dest="tbstyle", + default="auto", + choices=["auto", "long", "short", "no", "line", "native"], + help="traceback print mode (auto/long/short/line/native/no).", + ) + group._addoption( + "--show-capture", + action="store", + dest="showcapture", + choices=["no", "stdout", "stderr", "log", "all"], + default="all", + help="Controls how captured stdout/stderr/log is shown on failed tests. " + "Default is 'all'.", + ) + group._addoption( + "--fulltrace", + "--full-trace", + action="store_true", + default=False, + help="don't cut any tracebacks (default is to cut).", + ) + group._addoption( + "--color", + metavar="color", + action="store", + dest="color", + default="auto", + choices=["yes", "no", "auto"], + help="color terminal output (yes/no/auto).", + ) - parser.addini("console_output_style", - help="console output: classic or with additional progress information (classic|progress).", - default='progress') + parser.addini( + "console_output_style", + help="console output: classic or with additional progress information (classic|progress).", + default="progress", + ) def pytest_configure(config): reporter = TerminalReporter(config, sys.stdout) - config.pluginmanager.register(reporter, 'terminalreporter') + config.pluginmanager.register(reporter, "terminalreporter") if config.option.debug or config.option.traceconfig: + def mywriter(tags, args): msg = " ".join(map(str, args)) reporter.write_line("[traceconfig] " + msg) + config.trace.root.setprocessor("pytest:config", mywriter) def getreportopt(config): reportopts = "" reportchars = config.option.reportchars - if not config.option.disable_warnings and 'w' not in reportchars: - reportchars += 'w' - elif config.option.disable_warnings and 'w' in reportchars: - reportchars = reportchars.replace('w', '') + if not config.option.disable_warnings and "w" not in reportchars: + reportchars += "w" + elif config.option.disable_warnings and "w" in reportchars: + reportchars = reportchars.replace("w", "") if reportchars: for char in reportchars: - if char not in reportopts and char != 'a': + if char not in reportopts and char != "a": reportopts += char - elif char == 'a': - reportopts = 'fEsxXw' + elif char == "a": + reportopts = "fEsxXw" return reportopts @@ -161,15 +213,17 @@ class WarningReport(object): if isinstance(self.fslocation, tuple) and len(self.fslocation) >= 2: filename, linenum = self.fslocation[:2] relpath = py.path.local(filename).relto(config.invocation_dir) - return '%s:%s' % (relpath, linenum) + return "%s:%s" % (relpath, linenum) else: return str(self.fslocation) return None class TerminalReporter(object): + def __init__(self, config, file=None): import _pytest.config + self.config = config self.verbosity = self.config.option.verbose self.showheader = self.verbosity >= 0 @@ -196,15 +250,15 @@ class TerminalReporter(object): def _determine_show_progress_info(self): """Return True if we should display progress information based on the current config""" # do not show progress if we are not capturing output (#3038) - if self.config.getoption('capture') == 'no': + if self.config.getoption("capture") == "no": return False # do not show progress if we are showing fixture setup/teardown - if self.config.getoption('setupshow'): + if self.config.getoption("setupshow"): return False - return self.config.getini('console_output_style') == 'progress' + return self.config.getini("console_output_style") == "progress" def hasopt(self, char): - char = {'xfailed': 'x', 'skipped': 's'}.get(char, char) + char = {"xfailed": "x", "skipped": "s"}.get(char, char) return char in self.reportchars def write_fspath_result(self, nodeid, res): @@ -250,12 +304,12 @@ class TerminalReporter(object): The rest of the keyword arguments are markup instructions. """ - erase = markup.pop('erase', False) + erase = markup.pop("erase", False) if erase: fill_count = self._tw.fullwidth - len(line) - 1 - fill = ' ' * fill_count + fill = " " * fill_count else: - fill = '' + fill = "" line = str(line) self._tw.write("\r" + line + fill, **markup) @@ -276,8 +330,9 @@ class TerminalReporter(object): def pytest_logwarning(self, code, fslocation, message, nodeid): warnings = self.stats.setdefault("warnings", []) - warning = WarningReport(code=code, fslocation=fslocation, - message=message, nodeid=nodeid) + warning = WarningReport( + code=code, fslocation=fslocation, message=message, nodeid=nodeid + ) warnings.append(warning) def pytest_plugin_registered(self, plugin): @@ -289,7 +344,7 @@ class TerminalReporter(object): self.write_line(msg) def pytest_deselected(self, items): - self.stats.setdefault('deselected', []).extend(items) + self.stats.setdefault("deselected", []).extend(items) def pytest_runtest_logstart(self, nodeid, location): # ensure that the path is printed before the @@ -314,7 +369,7 @@ class TerminalReporter(object): if not letter and not word: # probably passed setup/teardown return - running_xdist = hasattr(rep, 'node') + running_xdist = hasattr(rep, "node") if self.verbosity <= 0: if not running_xdist and self.showfspath: self.write_fspath_result(rep.nodeid, letter) @@ -324,11 +379,11 @@ class TerminalReporter(object): self._progress_nodeids_reported.add(rep.nodeid) if markup is None: if rep.passed: - markup = {'green': True} + markup = {"green": True} elif rep.failed: - markup = {'red': True} + markup = {"red": True} elif rep.skipped: - markup = {'yellow': True} + markup = {"yellow": True} else: markup = {} line = self._locationline(rep.nodeid, *rep.location) @@ -340,9 +395,11 @@ class TerminalReporter(object): self.ensure_newline() self._tw.write("[%s]" % rep.node.gateway.id) if self._show_progress_info: - self._tw.write(self._get_progress_information_message() + " ", cyan=True) + self._tw.write( + self._get_progress_information_message() + " ", cyan=True + ) else: - self._tw.write(' ') + self._tw.write(" ") self._tw.write(word, **markup) self._tw.write(" " + line) self.currentfspath = -2 @@ -350,29 +407,33 @@ class TerminalReporter(object): def pytest_runtest_logfinish(self, nodeid): if self.verbosity <= 0 and self._show_progress_info: self._progress_nodeids_reported.add(nodeid) - last_item = len(self._progress_nodeids_reported) == self._session.testscollected + last_item = len( + self._progress_nodeids_reported + ) == self._session.testscollected if last_item: self._write_progress_information_filling_space() else: past_edge = self._tw.chars_on_current_line + self._PROGRESS_LENGTH + 1 >= self._screen_width if past_edge: msg = self._get_progress_information_message() - self._tw.write(msg + '\n', cyan=True) + self._tw.write(msg + "\n", cyan=True) - _PROGRESS_LENGTH = len(' [100%]') + _PROGRESS_LENGTH = len(" [100%]") def _get_progress_information_message(self): - if self.config.getoption('capture') == 'no': - return '' + if self.config.getoption("capture") == "no": + return "" collected = self._session.testscollected if collected: progress = len(self._progress_nodeids_reported) * 100 // collected - return ' [{:3d}%]'.format(progress) - return ' [100%]' + return " [{:3d}%]".format(progress) + return " [100%]" def _write_progress_information_filling_space(self): msg = self._get_progress_information_message() - fill = ' ' * (self._tw.fullwidth - self._tw.chars_on_current_line - len(msg) - 1) + fill = " " * ( + self._tw.fullwidth - self._tw.chars_on_current_line - len(msg) - 1 + ) self.write(fill + msg, cyan=True) def pytest_collection(self): @@ -394,14 +455,16 @@ class TerminalReporter(object): if self.config.option.verbose < 0: return - errors = len(self.stats.get('error', [])) - skipped = len(self.stats.get('skipped', [])) - deselected = len(self.stats.get('deselected', [])) + errors = len(self.stats.get("error", [])) + skipped = len(self.stats.get("skipped", [])) + deselected = len(self.stats.get("deselected", [])) if final: line = "collected " else: line = "collecting " - line += str(self._numcollected) + " item" + ('' if self._numcollected == 1 else 's') + line += str(self._numcollected) + " item" + ( + "" if self._numcollected == 1 else "s" + ) if errors: line += " / %d errors" % errors if deselected: @@ -411,7 +474,7 @@ class TerminalReporter(object): if self.isatty: self.rewrite(line, bold=True, erase=True) if final: - self.write('\n') + self.write("\n") else: self.write_line(line) @@ -428,17 +491,22 @@ class TerminalReporter(object): self.write_sep("=", "test session starts", bold=True) verinfo = platform.python_version() msg = "platform %s -- Python %s" % (sys.platform, verinfo) - if hasattr(sys, 'pypy_version_info'): + if hasattr(sys, "pypy_version_info"): verinfo = ".".join(map(str, sys.pypy_version_info[:3])) msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3]) msg += ", pytest-%s, py-%s, pluggy-%s" % ( - pytest.__version__, py.__version__, pluggy.__version__) - if self.verbosity > 0 or self.config.option.debug or \ - getattr(self.config.option, 'pastebin', None): + pytest.__version__, py.__version__, pluggy.__version__ + ) + if ( + self.verbosity > 0 + or self.config.option.debug + or getattr(self.config.option, "pastebin", None) + ): msg += " -- " + str(sys.executable) self.write_line(msg) lines = self.config.hook.pytest_report_header( - config=self.config, startdir=self.startdir) + config=self.config, startdir=self.startdir + ) self._write_report_lines_from_hooks(lines) def _write_report_lines_from_hooks(self, lines): @@ -455,21 +523,21 @@ class TerminalReporter(object): plugininfo = config.pluginmanager.list_plugin_distinfo() if plugininfo: - lines.append( - "plugins: %s" % ", ".join(_plugin_nameversions(plugininfo))) + lines.append("plugins: %s" % ", ".join(_plugin_nameversions(plugininfo))) return lines def pytest_collection_finish(self, session): if self.config.option.collectonly: self._printcollecteditems(session.items) - if self.stats.get('failed'): + if self.stats.get("failed"): self._tw.sep("!", "collection failures") - for rep in self.stats.get('failed'): + for rep in self.stats.get("failed"): rep.toterminal(self._tw) return 1 return 0 lines = self.config.hook.pytest_report_collectionfinish( - config=self.config, startdir=self.startdir, items=session.items) + config=self.config, startdir=self.startdir, items=session.items + ) self._write_report_lines_from_hooks(lines) def _printcollecteditems(self, items): @@ -480,7 +548,7 @@ class TerminalReporter(object): if self.config.option.verbose < -1: counts = {} for item in items: - name = item.nodeid.split('::', 1)[0] + name = item.nodeid.split("::", 1)[0] counts[name] = counts.get(name, 0) + 1 for name, count in sorted(counts.items()): self._tw.line("%s: %d" % (name, count)) @@ -511,11 +579,16 @@ class TerminalReporter(object): outcome.get_result() self._tw.line("") summary_exit_codes = ( - EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, EXIT_USAGEERROR, - EXIT_NOTESTSCOLLECTED) + EXIT_OK, + EXIT_TESTSFAILED, + EXIT_INTERRUPTED, + EXIT_USAGEERROR, + EXIT_NOTESTSCOLLECTED, + ) if exitstatus in summary_exit_codes: - self.config.hook.pytest_terminal_summary(terminalreporter=self, - exitstatus=exitstatus) + self.config.hook.pytest_terminal_summary( + terminalreporter=self, exitstatus=exitstatus + ) if exitstatus == EXIT_INTERRUPTED: self._report_keyboardinterrupt() del self._keyboardinterrupt_memo @@ -533,7 +606,7 @@ class TerminalReporter(object): self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True) def pytest_unconfigure(self): - if hasattr(self, '_keyboardinterrupt_memo'): + if hasattr(self, "_keyboardinterrupt_memo"): self._report_keyboardinterrupt() def _report_keyboardinterrupt(self): @@ -544,18 +617,23 @@ class TerminalReporter(object): if self.config.option.fulltrace: excrepr.toterminal(self._tw) else: - self._tw.line("to show a full traceback on KeyboardInterrupt use --fulltrace", yellow=True) + self._tw.line( + "to show a full traceback on KeyboardInterrupt use --fulltrace", + yellow=True, + ) excrepr.reprcrash.toterminal(self._tw) def _locationline(self, nodeid, fspath, lineno, domain): + def mkrel(nodeid): line = self.config.cwd_relative_nodeid(nodeid) if domain and line.endswith(domain): line = line[:-len(domain)] values = domain.split("[") - values[0] = values[0].replace('.', '::') # don't replace '.' in params + values[0] = values[0].replace(".", "::") # don't replace '.' in params line += "[".join(values) return line + # collect_fspath comes from testid which has a "/"-normalized path if fspath: @@ -567,7 +645,7 @@ class TerminalReporter(object): return res + " " def _getfailureheadline(self, rep): - if hasattr(rep, 'location'): + if hasattr(rep, "location"): fspath, lineno, domain = rep.location return domain else: @@ -588,7 +666,7 @@ class TerminalReporter(object): def getreports(self, name): values = [] for x in self.stats.get(name, []): - if not hasattr(x, '_pdbshown'): + if not hasattr(x, "_pdbshown"): values.append(x) return values @@ -598,22 +676,24 @@ class TerminalReporter(object): if not all_warnings: return - grouped = itertools.groupby(all_warnings, key=lambda wr: wr.get_location(self.config)) + grouped = itertools.groupby( + all_warnings, key=lambda wr: wr.get_location(self.config) + ) self.write_sep("=", "warnings summary", yellow=True, bold=False) for location, warning_records in grouped: - self._tw.line(str(location) or '') + self._tw.line(str(location) or "") for w in warning_records: lines = w.message.splitlines() - indented = '\n'.join(' ' + x for x in lines) + indented = "\n".join(" " + x for x in lines) self._tw.line(indented) self._tw.line() - self._tw.line('-- Docs: http://doc.pytest.org/en/latest/warnings.html') + self._tw.line("-- Docs: http://doc.pytest.org/en/latest/warnings.html") def summary_passes(self): if self.config.option.tbstyle != "no": if self.hasopt("P"): - reports = self.getreports('passed') + reports = self.getreports("passed") if not reports: return self.write_sep("=", "PASSES") @@ -624,15 +704,15 @@ class TerminalReporter(object): def print_teardown_sections(self, rep): for secname, content in rep.sections: - if 'teardown' in secname: - self._tw.sep('-', secname) + if "teardown" in secname: + self._tw.sep("-", secname) if content[-1:] == "\n": content = content[:-1] self._tw.line(content) def summary_failures(self): if self.config.option.tbstyle != "no": - reports = self.getreports('failed') + reports = self.getreports("failed") if not reports: return self.write_sep("=", "FAILURES") @@ -642,22 +722,22 @@ class TerminalReporter(object): self.write_line(line) else: msg = self._getfailureheadline(rep) - markup = {'red': True, 'bold': True} + markup = {"red": True, "bold": True} self.write_sep("_", msg, **markup) self._outrep_summary(rep) - for report in self.getreports(''): - if report.nodeid == rep.nodeid and report.when == 'teardown': + for report in self.getreports(""): + if report.nodeid == rep.nodeid and report.when == "teardown": self.print_teardown_sections(report) def summary_errors(self): if self.config.option.tbstyle != "no": - reports = self.getreports('error') + reports = self.getreports("error") if not reports: return self.write_sep("=", "ERRORS") - for rep in self.stats['error']: + for rep in self.stats["error"]: msg = self._getfailureheadline(rep) - if not hasattr(rep, 'when'): + if not hasattr(rep, "when"): # collect msg = "ERROR collecting " + msg elif rep.when == "setup": @@ -670,10 +750,10 @@ class TerminalReporter(object): def _outrep_summary(self, rep): rep.toterminal(self._tw) showcapture = self.config.option.showcapture - if showcapture == 'no': + if showcapture == "no": return for secname, content in rep.sections: - if showcapture != 'all' and showcapture not in secname: + if showcapture != "all" and showcapture not in secname: continue self._tw.sep("-", secname) if content[-1:] == "\n": @@ -684,7 +764,7 @@ class TerminalReporter(object): session_duration = time.time() - self._sessionstarttime (line, color) = build_summary_stats_line(self.stats) msg = "%s in %.2f seconds" % (line, session_duration) - markup = {color: True, 'bold': True} + markup = {color: True, "bold": True} if self.verbosity >= 0: self.write_sep("=", msg, **markup) @@ -702,8 +782,9 @@ def repr_pythonversion(v=None): def build_summary_stats_line(stats): - keys = ("failed passed skipped deselected " - "xfailed xpassed warnings error").split() + keys = ( + "failed passed skipped deselected " "xfailed xpassed warnings error" + ).split() unknown_key_seen = False for key in stats.keys(): if key not in keys: @@ -721,14 +802,14 @@ def build_summary_stats_line(stats): else: line = "no tests ran" - if 'failed' in stats or 'error' in stats: - color = 'red' - elif 'warnings' in stats or unknown_key_seen: - color = 'yellow' - elif 'passed' in stats: - color = 'green' + if "failed" in stats or "error" in stats: + color = "red" + elif "warnings" in stats or unknown_key_seen: + color = "yellow" + elif "passed" in stats: + color = "green" else: - color = 'yellow' + color = "yellow" return (line, color) @@ -737,7 +818,7 @@ def _plugin_nameversions(plugininfo): values = [] for plugin, dist in plugininfo: # gets us name and version! - name = '{dist.project_name}-{dist.version}'.format(dist=dist) + name = "{dist.project_name}-{dist.version}".format(dist=dist) # questionable convenience, but it keeps things short if name.startswith("pytest-"): name = name[7:] diff --git a/_pytest/tmpdir.py b/_pytest/tmpdir.py index 315ead302..260d28422 100644 --- a/_pytest/tmpdir.py +++ b/_pytest/tmpdir.py @@ -37,8 +37,9 @@ class TempdirFactory(object): if not numbered: p = basetemp.mkdir(basename) else: - p = py.path.local.make_numbered_dir(prefix=basename, - keep=0, rootdir=basetemp, lock_timeout=None) + p = py.path.local.make_numbered_dir( + prefix=basename, keep=0, rootdir=basetemp, lock_timeout=None + ) self.trace("mktemp", p) return p @@ -59,12 +60,13 @@ class TempdirFactory(object): if user: # use a sub-directory in the temproot to speed-up # make_numbered_dir() call - rootdir = temproot.join('pytest-of-%s' % user) + rootdir = temproot.join("pytest-of-%s" % user) else: rootdir = temproot rootdir.ensure(dir=1) - basetemp = py.path.local.make_numbered_dir(prefix='pytest-', - rootdir=rootdir) + basetemp = py.path.local.make_numbered_dir( + prefix="pytest-", rootdir=rootdir + ) self._basetemp = t = basetemp.realpath() self.trace("new basetemp", t) return t @@ -78,6 +80,7 @@ def get_user(): in the current environment (see #1010). """ import getpass + try: return getpass.getuser() except (ImportError, KeyError): @@ -98,11 +101,11 @@ def pytest_configure(config): mp = MonkeyPatch() t = TempdirFactory(config) config._cleanup.extend([mp.undo, t.finish]) - mp.setattr(config, '_tmpdirhandler', t, raising=False) - mp.setattr(pytest, 'ensuretemp', t.ensuretemp, raising=False) + mp.setattr(config, "_tmpdirhandler", t, raising=False) + mp.setattr(pytest, "ensuretemp", t.ensuretemp, raising=False) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def tmpdir_factory(request): """Return a TempdirFactory instance for the test session. """ diff --git a/_pytest/unittest.py b/_pytest/unittest.py index 17c94bca4..6ad9fda88 100644 --- a/_pytest/unittest.py +++ b/_pytest/unittest.py @@ -29,18 +29,19 @@ class UnitTestCase(Class): def setup(self): cls = self.obj - if getattr(cls, '__unittest_skip__', False): + if getattr(cls, "__unittest_skip__", False): return # skipped - setup = getattr(cls, 'setUpClass', None) + setup = getattr(cls, "setUpClass", None) if setup is not None: setup() - teardown = getattr(cls, 'tearDownClass', None) + teardown = getattr(cls, "tearDownClass", None) if teardown is not None: self.addfinalizer(teardown) super(UnitTestCase, self).setup() def collect(self): from unittest import TestLoader + cls = self.obj if not getattr(cls, "__test__", True): return @@ -50,19 +51,19 @@ class UnitTestCase(Class): foundsomething = False for name in loader.getTestCaseNames(self.obj): x = getattr(self.obj, name) - if not getattr(x, '__test__', True): + if not getattr(x, "__test__", True): continue - funcobj = getattr(x, 'im_func', x) + funcobj = getattr(x, "im_func", x) transfer_markers(funcobj, cls, module) yield TestCaseFunction(name, parent=self, callobj=funcobj) foundsomething = True if not foundsomething: - runtest = getattr(self.obj, 'runTest', None) + runtest = getattr(self.obj, "runTest", None) if runtest is not None: ut = sys.modules.get("twisted.trial.unittest", None) if ut is None or runtest != ut.TestCase.runTest: - yield TestCaseFunction('runTest', parent=self) + yield TestCaseFunction("runTest", parent=self) class TestCaseFunction(Function): @@ -73,7 +74,7 @@ class TestCaseFunction(Function): self._testcase = self.parent.obj(self.name) self._fix_unittest_skip_decorator() self._obj = getattr(self._testcase, self.name) - if hasattr(self._testcase, 'setup_method'): + if hasattr(self._testcase, "setup_method"): self._testcase.setup_method(self._obj) if hasattr(self, "_request"): self._request._fillfixtures() @@ -92,7 +93,7 @@ class TestCaseFunction(Function): setattr(self._testcase, "__name__", self.name) def teardown(self): - if hasattr(self._testcase, 'teardown_method'): + if hasattr(self._testcase, "teardown_method"): self._testcase.teardown_method(self._obj) # Allow garbage collection on TestCase instance attributes. self._testcase = None @@ -103,26 +104,32 @@ class TestCaseFunction(Function): def _addexcinfo(self, rawexcinfo): # unwrap potential exception info (see twisted trial support below) - rawexcinfo = getattr(rawexcinfo, '_rawexcinfo', rawexcinfo) + rawexcinfo = getattr(rawexcinfo, "_rawexcinfo", rawexcinfo) try: excinfo = _pytest._code.ExceptionInfo(rawexcinfo) except TypeError: try: try: values = traceback.format_exception(*rawexcinfo) - values.insert(0, "NOTE: Incompatible Exception Representation, " - "displaying natively:\n\n") + values.insert( + 0, + "NOTE: Incompatible Exception Representation, " + "displaying natively:\n\n", + ) fail("".join(values), pytrace=False) except (fail.Exception, KeyboardInterrupt): raise except: # noqa - fail("ERROR: Unknown Incompatible Exception " - "representation:\n%r" % (rawexcinfo,), pytrace=False) + fail( + "ERROR: Unknown Incompatible Exception " + "representation:\n%r" % (rawexcinfo,), + pytrace=False, + ) except KeyboardInterrupt: raise except fail.Exception: excinfo = _pytest._code.ExceptionInfo() - self.__dict__.setdefault('_excinfo', []).append(excinfo) + self.__dict__.setdefault("_excinfo", []).append(excinfo) def addError(self, testcase, rawexcinfo): self._addexcinfo(rawexcinfo) @@ -156,11 +163,15 @@ class TestCaseFunction(Function): # implements the skipping machinery (see #2137) # analog to pythons Lib/unittest/case.py:run testMethod = getattr(self._testcase, self._testcase._testMethodName) - if (getattr(self._testcase.__class__, "__unittest_skip__", False) or - getattr(testMethod, "__unittest_skip__", False)): + if ( + getattr(self._testcase.__class__, "__unittest_skip__", False) + or getattr(testMethod, "__unittest_skip__", False) + ): # If the class or method was skipped. - skip_why = (getattr(self._testcase.__class__, '__unittest_skip_why__', '') or - getattr(testMethod, '__unittest_skip_why__', '')) + skip_why = ( + getattr(self._testcase.__class__, "__unittest_skip_why__", "") + or getattr(testMethod, "__unittest_skip_why__", "") + ) try: # PY3, unittest2 on PY2 self._testcase._addSkip(self, self._testcase, skip_why) except TypeError: # PY2 @@ -182,7 +193,8 @@ class TestCaseFunction(Function): def _prunetraceback(self, excinfo): Function._prunetraceback(self, excinfo) traceback = excinfo.traceback.filter( - lambda x: not x.frame.f_globals.get('__unittest')) + lambda x: not x.frame.f_globals.get("__unittest") + ) if traceback: excinfo.traceback = traceback @@ -197,19 +209,20 @@ def pytest_runtest_makereport(item, call): except AttributeError: pass + # twisted trial support @hookimpl(hookwrapper=True) def pytest_runtest_protocol(item): - if isinstance(item, TestCaseFunction) and \ - 'twisted.trial.unittest' in sys.modules: - ut = sys.modules['twisted.python.failure'] + if isinstance(item, TestCaseFunction) and "twisted.trial.unittest" in sys.modules: + ut = sys.modules["twisted.python.failure"] Failure__init__ = ut.Failure.__init__ check_testcase_implements_trial_reporter() - def excstore(self, exc_value=None, exc_type=None, exc_tb=None, - captureVars=None): + def excstore( + self, exc_value=None, exc_type=None, exc_tb=None, captureVars=None + ): if exc_value is None: self._rawexcinfo = sys.exc_info() else: @@ -217,8 +230,9 @@ def pytest_runtest_protocol(item): exc_type = type(exc_value) self._rawexcinfo = (exc_type, exc_value, exc_tb) try: - Failure__init__(self, exc_value, exc_type, exc_tb, - captureVars=captureVars) + Failure__init__( + self, exc_value, exc_type, exc_tb, captureVars=captureVars + ) except TypeError: Failure__init__(self, exc_value, exc_type, exc_tb) @@ -234,5 +248,6 @@ def check_testcase_implements_trial_reporter(done=[]): return from zope.interface import classImplements from twisted.trial.itrial import IReporter + classImplements(TestCaseFunction, IReporter) done.append(1) diff --git a/_pytest/warnings.py b/_pytest/warnings.py index 2179328dc..e023d0ab4 100644 --- a/_pytest/warnings.py +++ b/_pytest/warnings.py @@ -12,13 +12,12 @@ def _setoption(wmod, arg): """ Copy of the warning._setoption function but does not escape arguments. """ - parts = arg.split(':') + parts = arg.split(":") if len(parts) > 5: raise wmod._OptionError("too many fields (max 5): %r" % (arg,)) while len(parts) < 5: - parts.append('') - action, message, category, module, lineno = [s.strip() - for s in parts] + parts.append("") + action, message, category, module, lineno = [s.strip() for s in parts] action = wmod._getaction(action) category = wmod._getcategory(category) if lineno: @@ -36,12 +35,18 @@ def _setoption(wmod, arg): def pytest_addoption(parser): group = parser.getgroup("pytest-warnings") group.addoption( - '-W', '--pythonwarnings', action='append', - help="set which warnings to report, see -W option of python itself.") - parser.addini("filterwarnings", type="linelist", - help="Each line specifies a pattern for " - "warnings.filterwarnings. " - "Processed after -W and --pythonwarnings.") + "-W", + "--pythonwarnings", + action="append", + help="set which warnings to report, see -W option of python itself.", + ) + parser.addini( + "filterwarnings", + type="linelist", + help="Each line specifies a pattern for " + "warnings.filterwarnings. " + "Processed after -W and --pythonwarnings.", + ) @contextmanager @@ -51,7 +56,7 @@ def catch_warnings_for_item(item): of the given item and after it is done posts them as warnings to this item. """ - args = item.config.getoption('pythonwarnings') or [] + args = item.config.getoption("pythonwarnings") or [] inifilters = item.config.getini("filterwarnings") with warnings.catch_warnings(record=True) as log: for arg in args: @@ -60,7 +65,7 @@ def catch_warnings_for_item(item): for arg in inifilters: _setoption(warnings, arg) - for mark in item.iter_markers(name='filterwarnings'): + for mark in item.iter_markers(name="filterwarnings"): for arg in mark.args: warnings._setoption(arg) @@ -70,23 +75,35 @@ def catch_warnings_for_item(item): warn_msg = warning.message unicode_warning = False - if compat._PY2 and any(isinstance(m, compat.UNICODE_TYPES) for m in warn_msg.args): + if ( + compat._PY2 + and any(isinstance(m, compat.UNICODE_TYPES) for m in warn_msg.args) + ): new_args = [] for m in warn_msg.args: - new_args.append(compat.ascii_escaped(m) if isinstance(m, compat.UNICODE_TYPES) else m) + new_args.append( + compat.ascii_escaped(m) + if isinstance(m, compat.UNICODE_TYPES) + else m + ) unicode_warning = list(warn_msg.args) != new_args warn_msg.args = new_args msg = warnings.formatwarning( - warn_msg, warning.category, - warning.filename, warning.lineno, warning.line) + warn_msg, + warning.category, + warning.filename, + warning.lineno, + warning.line, + ) item.warn("unused", msg) if unicode_warning: warnings.warn( "Warning is using unicode non convertible to ascii, " "converting to a safe representation:\n %s" % msg, - UnicodeWarning) + UnicodeWarning, + ) @pytest.hookimpl(hookwrapper=True) diff --git a/bench/bench.py b/bench/bench.py index ce9496417..4e72444e7 100644 --- a/bench/bench.py +++ b/bench/bench.py @@ -1,12 +1,13 @@ import sys -if __name__ == '__main__': +if __name__ == "__main__": import cProfile - import pytest + import pytest # NOQA import pstats + script = sys.argv[1:] if len(sys.argv) > 1 else "empty.py" - stats = cProfile.run('pytest.cmdline.main(%r)' % script, 'prof') + stats = cProfile.run("pytest.cmdline.main(%r)" % script, "prof") p = pstats.Stats("prof") p.strip_dirs() - p.sort_stats('cumulative') + p.sort_stats("cumulative") print(p.print_stats(500)) diff --git a/bench/bench_argcomplete.py b/bench/bench_argcomplete.py index d66c664f3..495e2c4ed 100644 --- a/bench/bench_argcomplete.py +++ b/bench/bench_argcomplete.py @@ -5,15 +5,18 @@ # FilesCompleter 75.1109 69.2116 # FastFilesCompleter 0.7383 1.0760 +import timeit -if __name__ == '__main__': - import sys - import timeit - from argcomplete.completers import FilesCompleter - from _pytest._argcomplete import FastFilesCompleter - count = 1000 # only a few seconds - setup = 'from __main__ import FastFilesCompleter\nfc = FastFilesCompleter()' - run = 'fc("/d")' - sys.stdout.write('%s\n' % (timeit.timeit(run, - setup=setup.replace('Fast', ''), number=count))) - sys.stdout.write('%s\n' % (timeit.timeit(run, setup=setup, number=count))) +imports = [ + "from argcomplete.completers import FilesCompleter as completer", + "from _pytest._argcomplete import FastFilesCompleter as completer", +] + +count = 1000 # only a few seconds +setup = "%s\nfc = completer()" +run = 'fc("/d")' + + +if __name__ == "__main__": + print(timeit.timeit(run, setup=setup % imports[0], number=count)) + print((timeit.timeit(run, setup=setup % imports[1], number=count))) diff --git a/bench/empty.py b/bench/empty.py index ac5e25701..b90319936 100644 --- a/bench/empty.py +++ b/bench/empty.py @@ -1,3 +1,4 @@ import py + for i in range(1000): py.builtin.exec_("def test_func_%d(): pass" % i) diff --git a/bench/manyparam.py b/bench/manyparam.py index d2bca0e8a..a25b098de 100644 --- a/bench/manyparam.py +++ b/bench/manyparam.py @@ -1,12 +1,15 @@ import pytest -@pytest.fixture(scope='module', params=range(966)) + +@pytest.fixture(scope="module", params=range(966)) def foo(request): return request.param + def test_it(foo): pass + + def test_it2(foo): pass - diff --git a/bench/skip.py b/bench/skip.py index 960b30864..b105e79f8 100644 --- a/bench/skip.py +++ b/bench/skip.py @@ -1,10 +1,11 @@ - +from six.moves import range import pytest SKIP = True -@pytest.mark.parametrize("x", xrange(5000)) + +@pytest.mark.parametrize("x", range(5000)) def test_foo(x): if SKIP: pytest.skip("heh") diff --git a/changelog/3491.bugfix.rst b/changelog/3491.bugfix.rst index 2ac733cbc..4c2507b85 100644 --- a/changelog/3491.bugfix.rst +++ b/changelog/3491.bugfix.rst @@ -1 +1 @@ -Fixed a bug where stdout and stderr were logged twice by junitxml when a test was marked xfail. \ No newline at end of file +Fixed a bug where stdout and stderr were logged twice by junitxml when a test was marked xfail. diff --git a/changelog/README.rst b/changelog/README.rst index 35d3a40ed..e34bd4da2 100644 --- a/changelog/README.rst +++ b/changelog/README.rst @@ -4,7 +4,7 @@ text that will be added to the next ``CHANGELOG``. The ``CHANGELOG`` will be read by users, so this description should be aimed to pytest users instead of describing internal changes which are only relevant to the developers. -Make sure to use full sentences with correct case and punctuation, for example:: +Make sure to use full sentences with correct case and punctuation, for example:: Fix issue with non-ascii messages from the ``warnings`` module. diff --git a/doc/en/_templates/links.html b/doc/en/_templates/links.html index 3a1a27a26..6f27757a3 100644 --- a/doc/en/_templates/links.html +++ b/doc/en/_templates/links.html @@ -6,4 +6,3 @@
  • Issue Tracker
  • PDF Documentation - diff --git a/doc/en/_themes/flask/theme.conf b/doc/en/_themes/flask/theme.conf index 18c720f80..372b00283 100644 --- a/doc/en/_themes/flask/theme.conf +++ b/doc/en/_themes/flask/theme.conf @@ -6,4 +6,4 @@ pygments_style = flask_theme_support.FlaskyStyle [options] index_logo = '' index_logo_height = 120px -touch_icon = +touch_icon = diff --git a/doc/en/_themes/flask_theme_support.py b/doc/en/_themes/flask_theme_support.py index 33f47449c..0dcf53b75 100644 --- a/doc/en/_themes/flask_theme_support.py +++ b/doc/en/_themes/flask_theme_support.py @@ -1,7 +1,19 @@ # flasky extensions. flasky pygments style based on tango style from pygments.style import Style -from pygments.token import Keyword, Name, Comment, String, Error, \ - Number, Operator, Generic, Whitespace, Punctuation, Other, Literal +from pygments.token import ( + Keyword, + Name, + Comment, + String, + Error, + Number, + Operator, + Generic, + Whitespace, + Punctuation, + Other, + Literal, +) class FlaskyStyle(Style): @@ -10,77 +22,68 @@ class FlaskyStyle(Style): styles = { # No corresponding class for the following: - #Text: "", # class: '' - Whitespace: "underline #f8f8f8", # class: 'w' - Error: "#a40000 border:#ef2929", # class: 'err' - Other: "#000000", # class 'x' - - Comment: "italic #8f5902", # class: 'c' - Comment.Preproc: "noitalic", # class: 'cp' - - Keyword: "bold #004461", # class: 'k' - Keyword.Constant: "bold #004461", # class: 'kc' - Keyword.Declaration: "bold #004461", # class: 'kd' - Keyword.Namespace: "bold #004461", # class: 'kn' - Keyword.Pseudo: "bold #004461", # class: 'kp' - Keyword.Reserved: "bold #004461", # class: 'kr' - Keyword.Type: "bold #004461", # class: 'kt' - - Operator: "#582800", # class: 'o' - Operator.Word: "bold #004461", # class: 'ow' - like keywords - - Punctuation: "bold #000000", # class: 'p' - + # Text: "", # class: '' + Whitespace: "underline #f8f8f8", # class: 'w' + Error: "#a40000 border:#ef2929", # class: 'err' + Other: "#000000", # class 'x' + Comment: "italic #8f5902", # class: 'c' + Comment.Preproc: "noitalic", # class: 'cp' + Keyword: "bold #004461", # class: 'k' + Keyword.Constant: "bold #004461", # class: 'kc' + Keyword.Declaration: "bold #004461", # class: 'kd' + Keyword.Namespace: "bold #004461", # class: 'kn' + Keyword.Pseudo: "bold #004461", # class: 'kp' + Keyword.Reserved: "bold #004461", # class: 'kr' + Keyword.Type: "bold #004461", # class: 'kt' + Operator: "#582800", # class: 'o' + Operator.Word: "bold #004461", # class: 'ow' - like keywords + Punctuation: "bold #000000", # class: 'p' # because special names such as Name.Class, Name.Function, etc. # are not recognized as such later in the parsing, we choose them # to look the same as ordinary variables. - Name: "#000000", # class: 'n' - Name.Attribute: "#c4a000", # class: 'na' - to be revised - Name.Builtin: "#004461", # class: 'nb' - Name.Builtin.Pseudo: "#3465a4", # class: 'bp' - Name.Class: "#000000", # class: 'nc' - to be revised - Name.Constant: "#000000", # class: 'no' - to be revised - Name.Decorator: "#888", # class: 'nd' - to be revised - Name.Entity: "#ce5c00", # class: 'ni' - Name.Exception: "bold #cc0000", # class: 'ne' - Name.Function: "#000000", # class: 'nf' - Name.Property: "#000000", # class: 'py' - Name.Label: "#f57900", # class: 'nl' - Name.Namespace: "#000000", # class: 'nn' - to be revised - Name.Other: "#000000", # class: 'nx' - Name.Tag: "bold #004461", # class: 'nt' - like a keyword - Name.Variable: "#000000", # class: 'nv' - to be revised - Name.Variable.Class: "#000000", # class: 'vc' - to be revised - Name.Variable.Global: "#000000", # class: 'vg' - to be revised - Name.Variable.Instance: "#000000", # class: 'vi' - to be revised - - Number: "#990000", # class: 'm' - - Literal: "#000000", # class: 'l' - Literal.Date: "#000000", # class: 'ld' - - String: "#4e9a06", # class: 's' - String.Backtick: "#4e9a06", # class: 'sb' - String.Char: "#4e9a06", # class: 'sc' - String.Doc: "italic #8f5902", # class: 'sd' - like a comment - String.Double: "#4e9a06", # class: 's2' - String.Escape: "#4e9a06", # class: 'se' - String.Heredoc: "#4e9a06", # class: 'sh' - String.Interpol: "#4e9a06", # class: 'si' - String.Other: "#4e9a06", # class: 'sx' - String.Regex: "#4e9a06", # class: 'sr' - String.Single: "#4e9a06", # class: 's1' - String.Symbol: "#4e9a06", # class: 'ss' - - Generic: "#000000", # class: 'g' - Generic.Deleted: "#a40000", # class: 'gd' - Generic.Emph: "italic #000000", # class: 'ge' - Generic.Error: "#ef2929", # class: 'gr' - Generic.Heading: "bold #000080", # class: 'gh' - Generic.Inserted: "#00A000", # class: 'gi' - Generic.Output: "#888", # class: 'go' - Generic.Prompt: "#745334", # class: 'gp' - Generic.Strong: "bold #000000", # class: 'gs' - Generic.Subheading: "bold #800080", # class: 'gu' - Generic.Traceback: "bold #a40000", # class: 'gt' + Name: "#000000", # class: 'n' + Name.Attribute: "#c4a000", # class: 'na' - to be revised + Name.Builtin: "#004461", # class: 'nb' + Name.Builtin.Pseudo: "#3465a4", # class: 'bp' + Name.Class: "#000000", # class: 'nc' - to be revised + Name.Constant: "#000000", # class: 'no' - to be revised + Name.Decorator: "#888", # class: 'nd' - to be revised + Name.Entity: "#ce5c00", # class: 'ni' + Name.Exception: "bold #cc0000", # class: 'ne' + Name.Function: "#000000", # class: 'nf' + Name.Property: "#000000", # class: 'py' + Name.Label: "#f57900", # class: 'nl' + Name.Namespace: "#000000", # class: 'nn' - to be revised + Name.Other: "#000000", # class: 'nx' + Name.Tag: "bold #004461", # class: 'nt' - like a keyword + Name.Variable: "#000000", # class: 'nv' - to be revised + Name.Variable.Class: "#000000", # class: 'vc' - to be revised + Name.Variable.Global: "#000000", # class: 'vg' - to be revised + Name.Variable.Instance: "#000000", # class: 'vi' - to be revised + Number: "#990000", # class: 'm' + Literal: "#000000", # class: 'l' + Literal.Date: "#000000", # class: 'ld' + String: "#4e9a06", # class: 's' + String.Backtick: "#4e9a06", # class: 'sb' + String.Char: "#4e9a06", # class: 'sc' + String.Doc: "italic #8f5902", # class: 'sd' - like a comment + String.Double: "#4e9a06", # class: 's2' + String.Escape: "#4e9a06", # class: 'se' + String.Heredoc: "#4e9a06", # class: 'sh' + String.Interpol: "#4e9a06", # class: 'si' + String.Other: "#4e9a06", # class: 'sx' + String.Regex: "#4e9a06", # class: 'sr' + String.Single: "#4e9a06", # class: 's1' + String.Symbol: "#4e9a06", # class: 'ss' + Generic: "#000000", # class: 'g' + Generic.Deleted: "#a40000", # class: 'gd' + Generic.Emph: "italic #000000", # class: 'ge' + Generic.Error: "#ef2929", # class: 'gr' + Generic.Heading: "bold #000080", # class: 'gh' + Generic.Inserted: "#00A000", # class: 'gi' + Generic.Output: "#888", # class: 'go' + Generic.Prompt: "#745334", # class: 'gp' + Generic.Strong: "bold #000000", # class: 'gs' + Generic.Subheading: "bold #800080", # class: 'gu' + Generic.Traceback: "bold #a40000", # class: 'gt' } diff --git a/doc/en/announce/index.rst b/doc/en/announce/index.rst index 98b9de572..8d1c2bd8f 100644 --- a/doc/en/announce/index.rst +++ b/doc/en/announce/index.rst @@ -5,7 +5,7 @@ Release announcements .. toctree:: :maxdepth: 2 - + release-3.6.0 release-3.5.1 release-3.5.0 diff --git a/doc/en/announce/release-2.0.3.rst b/doc/en/announce/release-2.0.3.rst index 9bbfdaab3..81d01eb99 100644 --- a/doc/en/announce/release-2.0.3.rst +++ b/doc/en/announce/release-2.0.3.rst @@ -1,4 +1,4 @@ -py.test 2.0.3: bug fixes and speed ups +py.test 2.0.3: bug fixes and speed ups =========================================================================== Welcome to pytest-2.0.3, a maintenance and bug fix release of pytest, @@ -37,4 +37,3 @@ Changes between 2.0.2 and 2.0.3 internally) - fix issue37: avoid invalid characters in junitxml's output - diff --git a/doc/en/announce/release-2.1.1.rst b/doc/en/announce/release-2.1.1.rst index ecdd69f4d..c2285eba9 100644 --- a/doc/en/announce/release-2.1.1.rst +++ b/doc/en/announce/release-2.1.1.rst @@ -34,4 +34,3 @@ Changes between 2.1.0 and 2.1.1 - fix issue59: provide system-out/err tags for junitxml output - fix issue61: assertion rewriting on boolean operations with 3 or more operands - you can now build a man page with "cd doc ; make man" - diff --git a/doc/en/announce/release-2.1.2.rst b/doc/en/announce/release-2.1.2.rst index 51b7591d3..1975f368a 100644 --- a/doc/en/announce/release-2.1.2.rst +++ b/doc/en/announce/release-2.1.2.rst @@ -30,4 +30,3 @@ Changes between 2.1.1 and 2.1.2 - fix issue68 / packages now work with assertion rewriting - fix issue66: use different assertion rewriting caches when the -O option is passed - don't try assertion rewriting on Jython, use reinterp - diff --git a/doc/en/announce/release-2.1.3.rst b/doc/en/announce/release-2.1.3.rst index f4da60b8b..a43bc058c 100644 --- a/doc/en/announce/release-2.1.3.rst +++ b/doc/en/announce/release-2.1.3.rst @@ -9,7 +9,7 @@ and integration testing. See extensive docs with examples here: The release contains another fix to the perfected assertions introduced with the 2.1 series as well as the new possibility to customize reporting -for assertion expressions on a per-directory level. +for assertion expressions on a per-directory level. If you want to install or upgrade pytest, just type one of:: diff --git a/doc/en/announce/release-2.2.1.rst b/doc/en/announce/release-2.2.1.rst index 5d28bcb01..44281597e 100644 --- a/doc/en/announce/release-2.2.1.rst +++ b/doc/en/announce/release-2.2.1.rst @@ -27,7 +27,7 @@ Changes between 2.2.0 and 2.2.1 ---------------------------------------- - fix issue99 (in pytest and py) internallerrors with resultlog now - produce better output - fixed by normalizing pytest_internalerror + produce better output - fixed by normalizing pytest_internalerror input arguments. - fix issue97 / traceback issues (in pytest and py) improve traceback output in conjunction with jinja2 and cython which hack tracebacks @@ -35,7 +35,7 @@ Changes between 2.2.0 and 2.2.1 the final test in a test node will now run its teardown directly instead of waiting for the end of the session. Thanks Dave Hunt for the good reporting and feedback. The pytest_runtest_protocol as well - as the pytest_runtest_teardown hooks now have "nextitem" available + as the pytest_runtest_teardown hooks now have "nextitem" available which will be None indicating the end of the test run. - fix collection crash due to unknown-source collected items, thanks to Ralf Schmitt (fixed by depending on a more recent pylib) diff --git a/doc/en/announce/release-2.2.2.rst b/doc/en/announce/release-2.2.2.rst index 733aedec4..22ef0bc7a 100644 --- a/doc/en/announce/release-2.2.2.rst +++ b/doc/en/announce/release-2.2.2.rst @@ -4,7 +4,7 @@ pytest-2.2.2: bug fixes pytest-2.2.2 (updated to 2.2.3 to fix packaging issues) is a minor backward-compatible release of the versatile py.test testing tool. It contains bug fixes and a few refinements particularly to reporting with -"--collectonly", see below for betails. +"--collectonly", see below for betails. For general information see here: @@ -27,7 +27,7 @@ Changes between 2.2.1 and 2.2.2 - fix issue101: wrong args to unittest.TestCase test function now produce better output -- fix issue102: report more useful errors and hints for when a +- fix issue102: report more useful errors and hints for when a test directory was renamed and some pyc/__pycache__ remain - fix issue106: allow parametrize to be applied multiple times e.g. from module, class and at function level. @@ -38,6 +38,6 @@ Changes between 2.2.1 and 2.2.2 - fix issue115: make --collectonly robust against early failure (missing files/directories) - "-qq --collectonly" now shows only files and the number of tests in them -- "-q --collectonly" now shows test ids +- "-q --collectonly" now shows test ids - allow adding of attributes to test reports such that it also works with distributed testing (no upgrade of pytest-xdist needed) diff --git a/doc/en/announce/release-2.2.4.rst b/doc/en/announce/release-2.2.4.rst index 67f0feb27..a8fb9b93c 100644 --- a/doc/en/announce/release-2.2.4.rst +++ b/doc/en/announce/release-2.2.4.rst @@ -36,4 +36,3 @@ Changes between 2.2.3 and 2.2.4 configure/sessionstart where called - fix issue #144: better mangle test ids to junitxml classnames - upgrade distribute_setup.py to 0.6.27 - diff --git a/doc/en/announce/release-2.3.0.rst b/doc/en/announce/release-2.3.0.rst index f863aad0a..061aa025c 100644 --- a/doc/en/announce/release-2.3.0.rst +++ b/doc/en/announce/release-2.3.0.rst @@ -1,7 +1,7 @@ pytest-2.3: improved fixtures / better unittest integration ============================================================================= -pytest-2.3 comes with many major improvements for fixture/funcarg management +pytest-2.3 comes with many major improvements for fixture/funcarg management and parametrized testing in Python. It is now easier, more efficient and more predicatable to re-run the same tests with different fixture instances. Also, you can directly declare the caching "scope" of @@ -9,7 +9,7 @@ fixtures so that dependent tests throughout your whole test suite can re-use database or other expensive fixture objects with ease. Lastly, it's possible for fixture functions (formerly known as funcarg factories) to use other fixtures, allowing for a completely modular and -re-useable fixture design. +re-useable fixture design. For detailed info and tutorial-style examples, see: @@ -27,7 +27,7 @@ All changes are backward compatible and you should be able to continue to run your test suites and 3rd party plugins that worked with pytest-2.2.4. -If you are interested in the precise reasoning (including examples) of the +If you are interested in the precise reasoning (including examples) of the pytest-2.3 fixture evolution, please consult http://pytest.org/latest/funcarg_compare.html @@ -43,7 +43,7 @@ and more details for those already in the knowing of pytest can be found in the CHANGELOG below. Particular thanks for this release go to Floris Bruynooghe, Alex Okrushko -Carl Meyer, Ronny Pfannschmidt, Benjamin Peterson and Alex Gaynor for helping +Carl Meyer, Ronny Pfannschmidt, Benjamin Peterson and Alex Gaynor for helping to get the new features right and well integrated. Ronny and Floris also helped to fix a number of bugs and yet more people helped by providing bug reports. @@ -94,7 +94,7 @@ Changes between 2.2.4 and 2.3.0 - pluginmanager.register(...) now raises ValueError if the plugin has been already registered or the name is taken -- fix issue159: improve http://pytest.org/latest/faq.html +- fix issue159: improve http://pytest.org/latest/faq.html especially with respect to the "magic" history, also mention pytest-django, trial and unittest integration. @@ -125,10 +125,9 @@ Changes between 2.2.4 and 2.3.0 you can use startdir.bestrelpath(yourpath) to show nice relative path - - allow plugins to implement both pytest_report_header and + - allow plugins to implement both pytest_report_header and pytest_sessionstart (sessionstart is invoked first). - don't show deselected reason line if there is none - py.test -vv will show all of assert comparisons instead of truncating - diff --git a/doc/en/announce/release-2.3.1.rst b/doc/en/announce/release-2.3.1.rst index b787dc203..6f8770b34 100644 --- a/doc/en/announce/release-2.3.1.rst +++ b/doc/en/announce/release-2.3.1.rst @@ -3,16 +3,16 @@ pytest-2.3.1: fix regression with factory functions pytest-2.3.1 is a quick follow-up release: -- fix issue202 - regression with fixture functions/funcarg factories: - using "self" is now safe again and works as in 2.2.4. Thanks +- fix issue202 - regression with fixture functions/funcarg factories: + using "self" is now safe again and works as in 2.2.4. Thanks to Eduard Schettino for the quick bug report. -- disable pexpect pytest self tests on Freebsd - thanks Koob for the +- disable pexpect pytest self tests on Freebsd - thanks Koob for the quick reporting - fix/improve interactive docs with --markers -See +See http://pytest.org/ diff --git a/doc/en/announce/release-2.3.2.rst b/doc/en/announce/release-2.3.2.rst index 75312b429..484feaaa5 100644 --- a/doc/en/announce/release-2.3.2.rst +++ b/doc/en/announce/release-2.3.2.rst @@ -8,9 +8,9 @@ pytest-2.3.2 is another stabilization release: - fix teardown-ordering for parametrized setups - fix unittest and trial compat behaviour with respect to runTest() methods - issue 206 and others: some improvements to packaging -- fix issue127 and others: improve some docs +- fix issue127 and others: improve some docs -See +See http://pytest.org/ @@ -26,7 +26,7 @@ holger krekel Changes between 2.3.1 and 2.3.2 ----------------------------------- -- fix issue208 and fix issue29 use new py version to avoid long pauses +- fix issue208 and fix issue29 use new py version to avoid long pauses when printing tracebacks in long modules - fix issue205 - conftests in subdirs customizing diff --git a/doc/en/announce/release-2.3.3.rst b/doc/en/announce/release-2.3.3.rst index 3a48b6ac4..0cb598a42 100644 --- a/doc/en/announce/release-2.3.3.rst +++ b/doc/en/announce/release-2.3.3.rst @@ -6,7 +6,7 @@ which offers uebersimple assertions, scalable fixture mechanisms and deep customization for testing with Python. Particularly, this release provides: -- integration fixes and improvements related to flask, numpy, nose, +- integration fixes and improvements related to flask, numpy, nose, unittest, mock - makes pytest work on py24 again (yes, people sometimes still need to use it) @@ -16,7 +16,7 @@ this release provides: Thanks to Manuel Jacob, Thomas Waldmann, Ronny Pfannschmidt, Pavel Repin and Andreas Taumoefolau for providing patches and all for the issues. -See +See http://pytest.org/ @@ -59,4 +59,3 @@ Changes between 2.3.2 and 2.3.3 - fix issue127 - improve documentation for pytest_addoption() and add a ``config.getoption(name)`` helper function for consistency. - diff --git a/doc/en/announce/release-2.3.4.rst b/doc/en/announce/release-2.3.4.rst index fc5f361d5..e2e8cb143 100644 --- a/doc/en/announce/release-2.3.4.rst +++ b/doc/en/announce/release-2.3.4.rst @@ -10,10 +10,10 @@ comes with the following fixes and features: can write: -k "name1 or name2" etc. This is a slight usage incompatibility if you used special syntax like "TestClass.test_method" which you now need to write as -k "TestClass and test_method" to match a certain - method in a certain test class. + method in a certain test class. - allow to dynamically define markers via item.keywords[...]=assignment integrating with "-m" option -- yielded test functions will now have autouse-fixtures active but +- yielded test functions will now have autouse-fixtures active but cannot accept fixtures as funcargs - it's anyway recommended to rather use the post-2.0 parametrize features instead of yield, see: http://pytest.org/latest/example/parametrize.html @@ -26,7 +26,7 @@ comes with the following fixes and features: Thanks in particular to Thomas Waldmann for spotting and reporting issues. -See +See http://pytest.org/ diff --git a/doc/en/announce/release-2.3.5.rst b/doc/en/announce/release-2.3.5.rst index bb4e84af0..465dd826e 100644 --- a/doc/en/announce/release-2.3.5.rst +++ b/doc/en/announce/release-2.3.5.rst @@ -13,7 +13,7 @@ few interesting new plugins saw the light last month: - pytest-random: randomize test ordering And several others like pytest-django saw maintenance releases. -For a more complete list, check out +For a more complete list, check out https://pypi.org/search/?q=pytest For general information see: @@ -81,7 +81,7 @@ Changes between 2.3.4 and 2.3.5 - fix bug where using capsys with pytest.set_trace() in a test function would break when looking at capsys.readouterr() -- allow to specify prefixes starting with "_" when +- allow to specify prefixes starting with "_" when customizing python_functions test discovery. (thanks Graham Horler) - improve PYTEST_DEBUG tracing output by putting @@ -94,4 +94,3 @@ Changes between 2.3.4 and 2.3.5 - fix issue134 - print the collect errors that prevent running specified test items - fix issue266 - accept unicode in MarkEvaluator expressions - diff --git a/doc/en/announce/release-2.4.0.rst b/doc/en/announce/release-2.4.0.rst index 4fb3d05cd..25f6254c5 100644 --- a/doc/en/announce/release-2.4.0.rst +++ b/doc/en/announce/release-2.4.0.rst @@ -1,9 +1,9 @@ pytest-2.4.0: new fixture features/hooks and bug fixes =========================================================================== -The just released pytest-2.4.0 brings many improvements and numerous +The just released pytest-2.4.0 brings many improvements and numerous bug fixes while remaining plugin- and test-suite compatible apart -from a few supposedly very minor incompatibilities. See below for +from a few supposedly very minor incompatibilities. See below for a full list of details. A few feature highlights: - new yield-style fixtures `pytest.yield_fixture @@ -13,7 +13,7 @@ a full list of details. A few feature highlights: - improved pdb support: ``import pdb ; pdb.set_trace()`` now works without requiring prior disabling of stdout/stderr capturing. Also the ``--pdb`` options works now on collection and internal errors - and we introduced a new experimental hook for IDEs/plugins to + and we introduced a new experimental hook for IDEs/plugins to intercept debugging: ``pytest_exception_interact(node, call, report)``. - shorter monkeypatch variant to allow specifying an import path as @@ -23,7 +23,7 @@ a full list of details. A few feature highlights: called if the corresponding setup method succeeded. - integrate tab-completion on command line options if you - have `argcomplete `_ + have `argcomplete `_ configured. - allow boolean expression directly with skipif/xfail @@ -36,8 +36,8 @@ a full list of details. A few feature highlights: - reporting: color the last line red or green depending if failures/errors occurred or everything passed. -The documentation has been updated to accommodate the changes, -see `http://pytest.org `_ +The documentation has been updated to accommodate the changes, +see `http://pytest.org `_ To install or upgrade pytest:: @@ -45,8 +45,8 @@ To install or upgrade pytest:: easy_install -U pytest -**Many thanks to all who helped, including Floris Bruynooghe, -Brianna Laugher, Andreas Pelme, Anthon van der Neut, Anatoly Bubenkoff, +**Many thanks to all who helped, including Floris Bruynooghe, +Brianna Laugher, Andreas Pelme, Anthon van der Neut, Anatoly Bubenkoff, Vladimir Keleshev, Mathieu Agopian, Ronny Pfannschmidt, Christian Theunert and many others.** @@ -101,12 +101,12 @@ new features: - make "import pdb ; pdb.set_trace()" work natively wrt capturing (no "-s" needed anymore), making ``pytest.set_trace()`` a mere shortcut. -- fix issue181: --pdb now also works on collect errors (and - on internal errors) . This was implemented by a slight internal - refactoring and the introduction of a new hook +- fix issue181: --pdb now also works on collect errors (and + on internal errors) . This was implemented by a slight internal + refactoring and the introduction of a new hook ``pytest_exception_interact`` hook (see next item). -- fix issue341: introduce new experimental hook for IDEs/terminals to +- fix issue341: introduce new experimental hook for IDEs/terminals to intercept debugging: ``pytest_exception_interact(node, call, report)``. - new monkeypatch.setattr() variant to provide a shorter @@ -124,7 +124,7 @@ new features: phase of a node. - simplify pytest.mark.parametrize() signature: allow to pass a - CSV-separated string to specify argnames. For example: + CSV-separated string to specify argnames. For example: ``pytest.mark.parametrize("input,expected", [(1,2), (2,3)])`` works as well as the previous: ``pytest.mark.parametrize(("input", "expected"), ...)``. @@ -149,10 +149,10 @@ new features: Bug fixes: -- fix issue358 - capturing options are now parsed more properly +- fix issue358 - capturing options are now parsed more properly by using a new parser.parse_known_args method. -- pytest now uses argparse instead of optparse (thanks Anthon) which +- pytest now uses argparse instead of optparse (thanks Anthon) which means that "argparse" is added as a dependency if installing into python2.6 environments or below. @@ -193,7 +193,7 @@ Bug fixes: - fix issue323 - sorting of many module-scoped arg parametrizations - make sessionfinish hooks execute with the same cwd-context as at - session start (helps fix plugin behaviour which write output files + session start (helps fix plugin behaviour which write output files with relative path such as pytest-cov) - fix issue316 - properly reference collection hooks in docs @@ -201,7 +201,7 @@ Bug fixes: - fix issue 306 - cleanup of -k/-m options to only match markers/test names/keywords respectively. Thanks Wouter van Ackooy. -- improved doctest counting for doctests in python modules -- +- improved doctest counting for doctests in python modules -- files without any doctest items will not show up anymore and doctest examples are counted as separate test items. thanks Danilo Bellini. @@ -211,7 +211,7 @@ Bug fixes: mode. Thanks Jason R. Coombs. - fix junitxml generation when test output contains control characters, - addressing issue267, thanks Jaap Broekhuizen + addressing issue267, thanks Jaap Broekhuizen - fix issue338: honor --tb style for setup/teardown errors as well. Thanks Maho. @@ -220,6 +220,5 @@ Bug fixes: - better parametrize error messages, thanks Brianna Laugher - pytest_terminal_summary(terminalreporter) hooks can now use - ".section(title)" and ".line(msg)" methods to print extra + ".section(title)" and ".line(msg)" methods to print extra information at the end of a test run. - diff --git a/doc/en/announce/release-2.4.1.rst b/doc/en/announce/release-2.4.1.rst index 64ba170f8..308df6bdc 100644 --- a/doc/en/announce/release-2.4.1.rst +++ b/doc/en/announce/release-2.4.1.rst @@ -8,7 +8,7 @@ compared to 2.3.5 before they hit more people: "type" keyword should also be converted to the respective types. thanks Floris Bruynooghe, @dnozay. (fixes issue360 and issue362) -- fix dotted filename completion when using argcomplete +- fix dotted filename completion when using argcomplete thanks Anthon van der Neuth. (fixes issue361) - fix regression when a 1-tuple ("arg",) is used for specifying diff --git a/doc/en/announce/release-2.4.2.rst b/doc/en/announce/release-2.4.2.rst index 3b4aa95ab..ab08b72aa 100644 --- a/doc/en/announce/release-2.4.2.rst +++ b/doc/en/announce/release-2.4.2.rst @@ -26,9 +26,9 @@ pytest-2.4.2 is another bug-fixing release: - remove attempt to "dup" stdout at startup as it's icky. the normal capturing should catch enough possibilities - of tests messing up standard FDs. + of tests messing up standard FDs. -- add pluginmanager.do_configure(config) as a link to +- add pluginmanager.do_configure(config) as a link to config.do_configure() for plugin-compatibility as usual, docs at http://pytest.org and upgrades via:: diff --git a/doc/en/announce/release-2.5.0.rst b/doc/en/announce/release-2.5.0.rst index b04a825cd..29064e05e 100644 --- a/doc/en/announce/release-2.5.0.rst +++ b/doc/en/announce/release-2.5.0.rst @@ -4,7 +4,7 @@ pytest-2.5.0: now down to ZERO reported bugs! pytest-2.5.0 is a big fixing release, the result of two community bug fixing days plus numerous additional works from many people and reporters. The release should be fully compatible to 2.4.2, existing -plugins and test suites. We aim at maintaining this level of ZERO reported +plugins and test suites. We aim at maintaining this level of ZERO reported bugs because it's no fun if your testing tool has bugs, is it? Under a condition, though: when submitting a bug report please provide clear information about the circumstances and a simple example which @@ -17,12 +17,12 @@ help. For those who use older Python versions, please note that pytest is not automatically tested on python2.5 due to virtualenv, setuptools and tox not supporting it anymore. Manual verification shows that it mostly -works fine but it's not going to be part of the automated release +works fine but it's not going to be part of the automated release process and thus likely to break in the future. -As usual, current docs are at +As usual, current docs are at - http://pytest.org + http://pytest.org and you can upgrade from pypi via:: @@ -40,28 +40,28 @@ holger krekel 2.5.0 ----------------------------------- -- dropped python2.5 from automated release testing of pytest itself - which means it's probably going to break soon (but still works +- dropped python2.5 from automated release testing of pytest itself + which means it's probably going to break soon (but still works with this release we believe). - simplified and fixed implementation for calling finalizers when - parametrized fixtures or function arguments are involved. finalization + parametrized fixtures or function arguments are involved. finalization is now performed lazily at setup time instead of in the "teardown phase". - While this might sound odd at first, it helps to ensure that we are + While this might sound odd at first, it helps to ensure that we are correctly handling setup/teardown even in complex code. User-level code should not be affected unless it's implementing the pytest_runtest_teardown hook and expecting certain fixture instances are torn down within (very unlikely and would have been unreliable anyway). -- PR90: add --color=yes|no|auto option to force terminal coloring +- PR90: add --color=yes|no|auto option to force terminal coloring mode ("auto" is default). Thanks Marc Abramowitz. - fix issue319 - correctly show unicode in assertion errors. Many thanks to Floris Bruynooghe for the complete PR. Also means we depend on py>=1.4.19 now. -- fix issue396 - correctly sort and finalize class-scoped parametrized - tests independently from number of methods on the class. +- fix issue396 - correctly sort and finalize class-scoped parametrized + tests independently from number of methods on the class. - refix issue323 in a better way -- parametrization should now never cause Runtime Recursion errors because the underlying algorithm @@ -70,18 +70,18 @@ holger krekel to problems for more than >966 non-function scoped parameters). - fix issue290 - there is preliminary support now for parametrizing - with repeated same values (sometimes useful to test if calling + with repeated same values (sometimes useful to test if calling a second time works as with the first time). - close issue240 - document precisely how pytest module importing - works, discuss the two common test directory layouts, and how it + works, discuss the two common test directory layouts, and how it interacts with PEP420-namespace packages. - fix issue246 fix finalizer order to be LIFO on independent fixtures - depending on a parametrized higher-than-function scoped fixture. + depending on a parametrized higher-than-function scoped fixture. (was quite some effort so please bear with the complexity of this sentence :) Thanks Ralph Schmitt for the precise failure example. - + - fix issue244 by implementing special index for parameters to only use indices for paramentrized test ids @@ -99,9 +99,9 @@ holger krekel filtering with simple strings that are not valid python expressions. Examples: "-k 1.3" matches all tests parametrized with 1.3. "-k None" filters all tests that have "None" in their name - and conversely "-k 'not None'". + and conversely "-k 'not None'". Previously these examples would raise syntax errors. - + - fix issue384 by removing the trial support code since the unittest compat enhancements allow trial to handle it on its own @@ -109,7 +109,7 @@ holger krekel - don't hide an ImportError when importing a plugin produces one. fixes issue375. -- fix issue275 - allow usefixtures and autouse fixtures +- fix issue275 - allow usefixtures and autouse fixtures for running doctest text files. - fix issue380 by making --resultlog only rely on longrepr instead @@ -135,20 +135,20 @@ holger krekel (it already did neutralize pytest.mark.xfail markers) - refine pytest / pkg_resources interactions: The AssertionRewritingHook - PEP302 compliant loader now registers itself with setuptools/pkg_resources + PEP302 compliant loader now registers itself with setuptools/pkg_resources properly so that the pkg_resources.resource_stream method works properly. Fixes issue366. Thanks for the investigations and full PR to Jason R. Coombs. - pytestconfig fixture is now session-scoped as it is the same object during the - whole test run. Fixes issue370. + whole test run. Fixes issue370. - avoid one surprising case of marker malfunction/confusion:: - + @pytest.mark.some(lambda arg: ...) def test_function(): - would not work correctly because pytest assumes @pytest.mark.some - gets a function to be decorated already. We now at least detect if this + would not work correctly because pytest assumes @pytest.mark.some + gets a function to be decorated already. We now at least detect if this arg is a lambda and thus the example will work. Thanks Alex Gaynor for bringing it up. @@ -159,11 +159,11 @@ holger krekel although it's not needed by pytest itself atm. Also fix caching. Fixes issue376. -- fix issue221 - handle importing of namespace-package with no +- fix issue221 - handle importing of namespace-package with no __init__.py properly. - refactor internal FixtureRequest handling to avoid monkeypatching. - One of the positive user-facing effects is that the "request" object + One of the positive user-facing effects is that the "request" object can now be used in closures. - fixed version comparison in pytest.importskip(modname, minverstring) @@ -172,4 +172,3 @@ holger krekel does not duplicate the unittest-API into the "plain" namespace. - fix verbose reporting for @mock'd test functions - diff --git a/doc/en/announce/release-2.5.1.rst b/doc/en/announce/release-2.5.1.rst index a3a74cec6..22e69a836 100644 --- a/doc/en/announce/release-2.5.1.rst +++ b/doc/en/announce/release-2.5.1.rst @@ -1,8 +1,8 @@ pytest-2.5.1: fixes and new home page styling =========================================================================== -pytest is a mature Python testing tool with more than a 1000 tests -against itself, passing on many different interpreters and platforms. +pytest is a mature Python testing tool with more than a 1000 tests +against itself, passing on many different interpreters and platforms. The 2.5.1 release maintains the "zero-reported-bugs" promise by fixing the three bugs reported since the last release a few days ago. It also @@ -11,12 +11,12 @@ the flask theme from Armin Ronacher: http://pytest.org -If you have anything more to improve styling and docs, +If you have anything more to improve styling and docs, we'd be very happy to merge further pull requests. On the coding side, the release also contains a little enhancement to fixture decorators allowing to directly influence generation of test -ids, thanks to Floris Bruynooghe. Other thanks for helping with +ids, thanks to Floris Bruynooghe. Other thanks for helping with this release go to Anatoly Bubenkoff and Ronny Pfannschmidt. As usual, you can upgrade from pypi via:: @@ -37,11 +37,10 @@ holger krekel - Allow parameterized fixtures to specify the ID of the parameters by adding an ids argument to pytest.fixture() and pytest.yield_fixture(). - Thanks Floris Bruynooghe. + Thanks Floris Bruynooghe. - fix issue404 by always using the binary xml escape in the junitxml plugin. Thanks Ronny Pfannschmidt. - fix issue407: fix addoption docstring to point to argparse instead of optparse. Thanks Daniel D. Wright. - diff --git a/doc/en/announce/release-2.5.2.rst b/doc/en/announce/release-2.5.2.rst index d5cfca2db..c389f5f54 100644 --- a/doc/en/announce/release-2.5.2.rst +++ b/doc/en/announce/release-2.5.2.rst @@ -1,8 +1,8 @@ -pytest-2.5.2: fixes +pytest-2.5.2: fixes =========================================================================== -pytest is a mature Python testing tool with more than a 1000 tests -against itself, passing on many different interpreters and platforms. +pytest is a mature Python testing tool with more than a 1000 tests +against itself, passing on many different interpreters and platforms. The 2.5.2 release fixes a few bugs with two maybe-bugs remaining and actively being worked on (and waiting for the bug reporter's input). @@ -19,18 +19,18 @@ As usual, you can upgrade from pypi via:: Thanks to the following people who contributed to this release: - Anatoly Bubenkov + Anatoly Bubenkov Ronny Pfannschmidt Floris Bruynooghe - Bruno Oliveira - Andreas Pelme + Bruno Oliveira + Andreas Pelme Jurko Gospodnetić - Piotr Banaszkiewicz - Simon Liedtke - lakka - Lukasz Balcerzak - Philippe Muller - Daniel Hahler + Piotr Banaszkiewicz + Simon Liedtke + lakka + Lukasz Balcerzak + Philippe Muller + Daniel Hahler have fun, holger krekel @@ -39,11 +39,11 @@ holger krekel ----------------------------------- - fix issue409 -- better interoperate with cx_freeze by not - trying to import from collections.abc which causes problems + trying to import from collections.abc which causes problems for py27/cx_freeze. Thanks Wolfgang L. for reporting and tracking it down. - fixed docs and code to use "pytest" instead of "py.test" almost everywhere. - Thanks Jurko Gospodnetic for the complete PR. + Thanks Jurko Gospodnetic for the complete PR. - fix issue425: mention at end of "py.test -h" that --markers and --fixtures work according to specified test path (or current dir) @@ -54,11 +54,10 @@ holger krekel - copy, cleanup and integrate py.io capture from pylib 1.4.20.dev2 (rev 13d9af95547e) - + - address issue416: clarify docs as to conftest.py loading semantics - fix issue429: comparing byte strings with non-ascii chars in assert expressions now work better. Thanks Floris Bruynooghe. - make capfd/capsys.capture private, its unused and shouldn't be exposed - diff --git a/doc/en/announce/release-2.6.1.rst b/doc/en/announce/release-2.6.1.rst index 2f3257cd8..fba6f2993 100644 --- a/doc/en/announce/release-2.6.1.rst +++ b/doc/en/announce/release-2.6.1.rst @@ -56,4 +56,3 @@ Changes 2.6.1 - don't use py.std import helper, rather import things directly. Thanks Bruno Oliveira. - diff --git a/doc/en/announce/release-2.6.2.rst b/doc/en/announce/release-2.6.2.rst index 4efc73a4e..f6ce178a1 100644 --- a/doc/en/announce/release-2.6.2.rst +++ b/doc/en/announce/release-2.6.2.rst @@ -49,4 +49,3 @@ holger krekel - Do not mark as universal wheel because Python 2.6 is different from other builds due to the extra argparse dependency. Fixes issue566. Thanks sontek. - diff --git a/doc/en/announce/release-2.6.3.rst b/doc/en/announce/release-2.6.3.rst index ee0d2692c..7353dfee7 100644 --- a/doc/en/announce/release-2.6.3.rst +++ b/doc/en/announce/release-2.6.3.rst @@ -49,4 +49,3 @@ Changes 2.6.3 - check xfail/skip also with non-python function test items. Thanks Floris Bruynooghe. - diff --git a/doc/en/announce/release-2.7.0.rst b/doc/en/announce/release-2.7.0.rst index 4e317ff8f..d63081edb 100644 --- a/doc/en/announce/release-2.7.0.rst +++ b/doc/en/announce/release-2.7.0.rst @@ -98,4 +98,3 @@ holger krekel - On failure, the ``sys.last_value``, ``sys.last_type`` and ``sys.last_traceback`` are set, so that a user can inspect the error via postmortem debugging (almarklein). - diff --git a/doc/en/announce/release-2.7.2.rst b/doc/en/announce/release-2.7.2.rst index 69130ad62..1e3950de4 100644 --- a/doc/en/announce/release-2.7.2.rst +++ b/doc/en/announce/release-2.7.2.rst @@ -55,4 +55,3 @@ The py.test Development Team - fix issue756, fix issue752 (and similar issues): depend on py-1.4.29 which has a refined algorithm for traceback generation. - diff --git a/doc/en/announce/release-2.8.3.rst b/doc/en/announce/release-2.8.3.rst index d080ac724..b131a7e1f 100644 --- a/doc/en/announce/release-2.8.3.rst +++ b/doc/en/announce/release-2.8.3.rst @@ -53,7 +53,6 @@ The py.test Development Team Thanks Gabriel Reis for the PR. - add more talks to the documentation -- extend documentation on the --ignore cli option -- use pytest-runner for setuptools integration +- extend documentation on the --ignore cli option +- use pytest-runner for setuptools integration - minor fixes for interaction with OS X El Capitan system integrity protection (thanks Florian) - diff --git a/doc/en/announce/release-2.8.7.rst b/doc/en/announce/release-2.8.7.rst index d98d73106..9005f5636 100644 --- a/doc/en/announce/release-2.8.7.rst +++ b/doc/en/announce/release-2.8.7.rst @@ -28,4 +28,4 @@ The py.test Development Team 2.8.7 (compared to 2.8.6) ------------------------- -- fix #1338: use predictable object resolution for monkeypatch \ No newline at end of file +- fix #1338: use predictable object resolution for monkeypatch diff --git a/doc/en/announce/release-2.9.0.rst b/doc/en/announce/release-2.9.0.rst index 011b1ffb9..8d829996d 100644 --- a/doc/en/announce/release-2.9.0.rst +++ b/doc/en/announce/release-2.9.0.rst @@ -14,25 +14,25 @@ As usual, you can upgrade from pypi via:: Thanks to all who contributed to this release, among them: - Anatoly Bubenkov - Bruno Oliveira - Buck Golemon - David Vierra - Florian Bruhin - Galaczi Endre - Georgy Dyuldin - Lukas Bednar - Luke Murphy - Marcin Biernat - Matt Williams - Michael Aquilina - Raphael Pierzina - Ronny Pfannschmidt - Ryan Wooden - Tiemo Kieft - TomV - holger krekel - jab + Anatoly Bubenkov + Bruno Oliveira + Buck Golemon + David Vierra + Florian Bruhin + Galaczi Endre + Georgy Dyuldin + Lukas Bednar + Luke Murphy + Marcin Biernat + Matt Williams + Michael Aquilina + Raphael Pierzina + Ronny Pfannschmidt + Ryan Wooden + Tiemo Kieft + TomV + holger krekel + jab Happy testing, @@ -76,18 +76,18 @@ The py.test Development Team **Changes** * **Important**: `py.code `_ has been - merged into the ``pytest`` repository as ``pytest._code``. This decision - was made because ``py.code`` had very few uses outside ``pytest`` and the - fact that it was in a different repository made it difficult to fix bugs on + merged into the ``pytest`` repository as ``pytest._code``. This decision + was made because ``py.code`` had very few uses outside ``pytest`` and the + fact that it was in a different repository made it difficult to fix bugs on its code in a timely manner. The team hopes with this to be able to better refactor out and improve that code. This change shouldn't affect users, but it is useful to let users aware if they encounter any strange behavior. - - Keep in mind that the code for ``pytest._code`` is **private** and + + Keep in mind that the code for ``pytest._code`` is **private** and **experimental**, so you definitely should not import it explicitly! - Please note that the original ``py.code`` is still available in + Please note that the original ``py.code`` is still available in `pylib `_. * ``pytest_enter_pdb`` now optionally receives the pytest config object. @@ -129,8 +129,8 @@ The py.test Development Team * Fix (`#1422`_): junit record_xml_property doesn't allow multiple records with same name. - - + + .. _`traceback style docs`: https://pytest.org/latest/usage.html#modifying-python-traceback-printing .. _#1422: https://github.com/pytest-dev/pytest/issues/1422 @@ -156,4 +156,4 @@ The py.test Development Team .. _@tomviner: https://github.com/tomviner .. _@RonnyPfannschmidt: https://github.com/RonnyPfannschmidt .. _@rabbbit: https://github.com/rabbbit -.. _@hackebrot: https://github.com/hackebrot \ No newline at end of file +.. _@hackebrot: https://github.com/hackebrot diff --git a/doc/en/announce/release-2.9.1.rst b/doc/en/announce/release-2.9.1.rst index 3277da1e9..c71f38516 100644 --- a/doc/en/announce/release-2.9.1.rst +++ b/doc/en/announce/release-2.9.1.rst @@ -14,17 +14,17 @@ As usual, you can upgrade from pypi via:: Thanks to all who contributed to this release, among them: - Bruno Oliveira - Daniel Hahler - Dmitry Malinovsky - Florian Bruhin - Floris Bruynooghe - Matt Bachmann - Ronny Pfannschmidt - TomV - Vladimir Bolshakov - Zearin - palaviv + Bruno Oliveira + Daniel Hahler + Dmitry Malinovsky + Florian Bruhin + Floris Bruynooghe + Matt Bachmann + Ronny Pfannschmidt + TomV + Vladimir Bolshakov + Zearin + palaviv Happy testing, diff --git a/doc/en/announce/release-3.0.0.rst b/doc/en/announce/release-3.0.0.rst index 4bf1e8534..ca3e9e327 100644 --- a/doc/en/announce/release-3.0.0.rst +++ b/doc/en/announce/release-3.0.0.rst @@ -8,10 +8,10 @@ against itself, passing on many different interpreters and platforms. This release contains a lot of bugs fixes and improvements, and much of the work done on it was possible because of the 2016 Sprint[1], which -was funded by an indiegogo campaign which raised over US$12,000 with -nearly 100 backers. +was funded by an indiegogo campaign which raised over US$12,000 with +nearly 100 backers. -There's a "What's new in pytest 3.0" [2] blog post highlighting the +There's a "What's new in pytest 3.0" [2] blog post highlighting the major features in this release. To see the complete changelog and documentation, please visit: diff --git a/doc/en/announce/release-3.0.1.rst b/doc/en/announce/release-3.0.1.rst index 9fb38047b..eb6f6a50e 100644 --- a/doc/en/announce/release-3.0.1.rst +++ b/doc/en/announce/release-3.0.1.rst @@ -7,7 +7,7 @@ This release fixes some regressions reported in version 3.0.0, being a drop-in replacement. To upgrade: pip install --upgrade pytest - + The changelog is available at http://doc.pytest.org/en/latest/changelog.html. Thanks to all who contributed to this release, among them: diff --git a/doc/en/announce/release-3.0.2.rst b/doc/en/announce/release-3.0.2.rst index 9d1c05f2d..4af412fc5 100644 --- a/doc/en/announce/release-3.0.2.rst +++ b/doc/en/announce/release-3.0.2.rst @@ -7,7 +7,7 @@ This release fixes some regressions and bugs reported in version 3.0.1, being a drop-in replacement. To upgrade:: pip install --upgrade pytest - + The changelog is available at http://doc.pytest.org/en/latest/changelog.html. Thanks to all who contributed to this release, among them: diff --git a/doc/en/announce/release-3.0.3.rst b/doc/en/announce/release-3.0.3.rst index f00172195..896d47873 100644 --- a/doc/en/announce/release-3.0.3.rst +++ b/doc/en/announce/release-3.0.3.rst @@ -3,11 +3,11 @@ pytest-3.0.3 pytest 3.0.3 has just been released to PyPI. -This release fixes some regressions and bugs reported in the last version, +This release fixes some regressions and bugs reported in the last version, being a drop-in replacement. To upgrade:: pip install --upgrade pytest - + The changelog is available at http://doc.pytest.org/en/latest/changelog.html. Thanks to all who contributed to this release, among them: diff --git a/doc/en/announce/release-3.0.4.rst b/doc/en/announce/release-3.0.4.rst index 852057037..855bc56d5 100644 --- a/doc/en/announce/release-3.0.4.rst +++ b/doc/en/announce/release-3.0.4.rst @@ -3,11 +3,11 @@ pytest-3.0.4 pytest 3.0.4 has just been released to PyPI. -This release fixes some regressions and bugs reported in the last version, +This release fixes some regressions and bugs reported in the last version, being a drop-in replacement. To upgrade:: pip install --upgrade pytest - + The changelog is available at http://doc.pytest.org/en/latest/changelog.html. Thanks to all who contributed to this release, among them: diff --git a/doc/en/announce/release-3.0.5.rst b/doc/en/announce/release-3.0.5.rst index 3e2419d7e..2f3698275 100644 --- a/doc/en/announce/release-3.0.5.rst +++ b/doc/en/announce/release-3.0.5.rst @@ -6,7 +6,7 @@ pytest 3.0.5 has just been released to PyPI. This is a bug-fix release, being a drop-in replacement. To upgrade:: pip install --upgrade pytest - + The changelog is available at http://doc.pytest.org/en/latest/changelog.html. Thanks to all who contributed to this release, among them: diff --git a/doc/en/announce/release-3.0.6.rst b/doc/en/announce/release-3.0.6.rst index 2988b9cb3..149c2d65e 100644 --- a/doc/en/announce/release-3.0.6.rst +++ b/doc/en/announce/release-3.0.6.rst @@ -6,7 +6,7 @@ pytest 3.0.6 has just been released to PyPI. This is a bug-fix release, being a drop-in replacement. To upgrade:: pip install --upgrade pytest - + The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. diff --git a/doc/en/announce/release-3.0.7.rst b/doc/en/announce/release-3.0.7.rst index 591557aa7..b37e4f61d 100644 --- a/doc/en/announce/release-3.0.7.rst +++ b/doc/en/announce/release-3.0.7.rst @@ -6,7 +6,7 @@ pytest 3.0.7 has just been released to PyPI. This is a bug-fix release, being a drop-in replacement. To upgrade:: pip install --upgrade pytest - + The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. Thanks to all who contributed to this release, among them: diff --git a/doc/en/announce/release-3.1.1.rst b/doc/en/announce/release-3.1.1.rst index 370b8fd73..4ce753197 100644 --- a/doc/en/announce/release-3.1.1.rst +++ b/doc/en/announce/release-3.1.1.rst @@ -6,7 +6,7 @@ pytest 3.1.1 has just been released to PyPI. This is a bug-fix release, being a drop-in replacement. To upgrade:: pip install --upgrade pytest - + The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. Thanks to all who contributed to this release, among them: diff --git a/doc/en/announce/release-3.1.2.rst b/doc/en/announce/release-3.1.2.rst index 60168a857..8ed0c93e9 100644 --- a/doc/en/announce/release-3.1.2.rst +++ b/doc/en/announce/release-3.1.2.rst @@ -6,7 +6,7 @@ pytest 3.1.2 has just been released to PyPI. This is a bug-fix release, being a drop-in replacement. To upgrade:: pip install --upgrade pytest - + The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. Thanks to all who contributed to this release, among them: diff --git a/doc/en/announce/release-3.1.3.rst b/doc/en/announce/release-3.1.3.rst index a55280626..d7771f922 100644 --- a/doc/en/announce/release-3.1.3.rst +++ b/doc/en/announce/release-3.1.3.rst @@ -6,7 +6,7 @@ pytest 3.1.3 has just been released to PyPI. This is a bug-fix release, being a drop-in replacement. To upgrade:: pip install --upgrade pytest - + The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. Thanks to all who contributed to this release, among them: diff --git a/doc/en/announce/release-3.2.1.rst b/doc/en/announce/release-3.2.1.rst index 899ffcd4b..afe2c5bfe 100644 --- a/doc/en/announce/release-3.2.1.rst +++ b/doc/en/announce/release-3.2.1.rst @@ -6,7 +6,7 @@ pytest 3.2.1 has just been released to PyPI. This is a bug-fix release, being a drop-in replacement. To upgrade:: pip install --upgrade pytest - + The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. Thanks to all who contributed to this release, among them: diff --git a/doc/en/announce/release-3.2.2.rst b/doc/en/announce/release-3.2.2.rst index 599bf8727..88e32873a 100644 --- a/doc/en/announce/release-3.2.2.rst +++ b/doc/en/announce/release-3.2.2.rst @@ -6,7 +6,7 @@ pytest 3.2.2 has just been released to PyPI. This is a bug-fix release, being a drop-in replacement. To upgrade:: pip install --upgrade pytest - + The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. Thanks to all who contributed to this release, among them: diff --git a/doc/en/announce/release-3.2.3.rst b/doc/en/announce/release-3.2.3.rst index 589374974..ddfda4d13 100644 --- a/doc/en/announce/release-3.2.3.rst +++ b/doc/en/announce/release-3.2.3.rst @@ -6,7 +6,7 @@ pytest 3.2.3 has just been released to PyPI. This is a bug-fix release, being a drop-in replacement. To upgrade:: pip install --upgrade pytest - + The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. Thanks to all who contributed to this release, among them: diff --git a/doc/en/announce/release-3.2.4.rst b/doc/en/announce/release-3.2.4.rst index 44bfcc27e..65e486b7a 100644 --- a/doc/en/announce/release-3.2.4.rst +++ b/doc/en/announce/release-3.2.4.rst @@ -6,7 +6,7 @@ pytest 3.2.4 has just been released to PyPI. This is a bug-fix release, being a drop-in replacement. To upgrade:: pip install --upgrade pytest - + The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. Thanks to all who contributed to this release, among them: diff --git a/doc/en/announce/release-3.2.5.rst b/doc/en/announce/release-3.2.5.rst index a520ce2b3..2e5304c6f 100644 --- a/doc/en/announce/release-3.2.5.rst +++ b/doc/en/announce/release-3.2.5.rst @@ -6,7 +6,7 @@ pytest 3.2.5 has just been released to PyPI. This is a bug-fix release, being a drop-in replacement. To upgrade:: pip install --upgrade pytest - + The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. Thanks to all who contributed to this release, among them: diff --git a/doc/en/announce/release-3.3.1.rst b/doc/en/announce/release-3.3.1.rst index 074c3d5ac..7eed836ae 100644 --- a/doc/en/announce/release-3.3.1.rst +++ b/doc/en/announce/release-3.3.1.rst @@ -6,7 +6,7 @@ pytest 3.3.1 has just been released to PyPI. This is a bug-fix release, being a drop-in replacement. To upgrade:: pip install --upgrade pytest - + The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. Thanks to all who contributed to this release, among them: diff --git a/doc/en/announce/release-3.3.2.rst b/doc/en/announce/release-3.3.2.rst index a994aff25..d9acef947 100644 --- a/doc/en/announce/release-3.3.2.rst +++ b/doc/en/announce/release-3.3.2.rst @@ -6,7 +6,7 @@ pytest 3.3.2 has just been released to PyPI. This is a bug-fix release, being a drop-in replacement. To upgrade:: pip install --upgrade pytest - + The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. Thanks to all who contributed to this release, among them: diff --git a/doc/en/announce/release-3.4.1.rst b/doc/en/announce/release-3.4.1.rst index 0c5932e62..e37f5d7e2 100644 --- a/doc/en/announce/release-3.4.1.rst +++ b/doc/en/announce/release-3.4.1.rst @@ -6,7 +6,7 @@ pytest 3.4.1 has just been released to PyPI. This is a bug-fix release, being a drop-in replacement. To upgrade:: pip install --upgrade pytest - + The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. Thanks to all who contributed to this release, among them: diff --git a/doc/en/announce/release-3.4.2.rst b/doc/en/announce/release-3.4.2.rst index 59bbf6191..8e9988228 100644 --- a/doc/en/announce/release-3.4.2.rst +++ b/doc/en/announce/release-3.4.2.rst @@ -6,7 +6,7 @@ pytest 3.4.2 has just been released to PyPI. This is a bug-fix release, being a drop-in replacement. To upgrade:: pip install --upgrade pytest - + The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. Thanks to all who contributed to this release, among them: diff --git a/doc/en/announce/release-3.5.1.rst b/doc/en/announce/release-3.5.1.rst index 8eadcc3ac..91f14390e 100644 --- a/doc/en/announce/release-3.5.1.rst +++ b/doc/en/announce/release-3.5.1.rst @@ -6,7 +6,7 @@ pytest 3.5.1 has just been released to PyPI. This is a bug-fix release, being a drop-in replacement. To upgrade:: pip install --upgrade pytest - + The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. Thanks to all who contributed to this release, among them: diff --git a/doc/en/assert.rst b/doc/en/assert.rst index 4a852978e..a2c588d81 100644 --- a/doc/en/assert.rst +++ b/doc/en/assert.rst @@ -29,17 +29,17 @@ you will see the return value of the function call:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_assert1.py F [100%] - + ================================= FAILURES ================================= ______________________________ test_function _______________________________ - + def test_function(): > assert f() == 4 E assert 3 == 4 E + where 3 = f() - + test_assert1.py:5: AssertionError ========================= 1 failed in 0.12 seconds ========================= @@ -172,12 +172,12 @@ if you run this module:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_assert2.py F [100%] - + ================================= FAILURES ================================= ___________________________ test_set_comparison ____________________________ - + def test_set_comparison(): set1 = set("1308") set2 = set("8035") @@ -188,7 +188,7 @@ if you run this module:: E Extra items in the right set: E '5' E Use -v to get the full diff - + test_assert2.py:5: AssertionError ========================= 1 failed in 0.12 seconds ========================= @@ -209,7 +209,7 @@ the ``pytest_assertrepr_compare`` hook. .. autofunction:: _pytest.hookspec.pytest_assertrepr_compare :noindex: -As an example consider adding the following hook in a :ref:`conftest.py ` +As an example consider adding the following hook in a :ref:`conftest.py ` file which provides an alternative explanation for ``Foo`` objects:: # content of conftest.py @@ -241,14 +241,14 @@ the conftest file:: F [100%] ================================= FAILURES ================================= _______________________________ test_compare _______________________________ - + def test_compare(): f1 = Foo(1) f2 = Foo(2) > assert f1 == f2 E assert Comparing Foo instances: E vals: 1 != 2 - + test_foocompare.py:11: AssertionError 1 failed in 0.12 seconds diff --git a/doc/en/bash-completion.rst b/doc/en/bash-completion.rst index 81fe62183..58c3d878d 100644 --- a/doc/en/bash-completion.rst +++ b/doc/en/bash-completion.rst @@ -23,6 +23,3 @@ For permanent (but not global) ``pytest`` activation, use:: For one-time activation of argcomplete for ``pytest`` only, use:: eval "$(register-python-argcomplete pytest)" - - - diff --git a/doc/en/builtin.rst b/doc/en/builtin.rst index 554cf59b2..c2d23469b 100644 --- a/doc/en/builtin.rst +++ b/doc/en/builtin.rst @@ -17,13 +17,13 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a $ pytest -q --fixtures cache Return a cache object that can persist state between testing sessions. - + cache.get(key, default) cache.set(key, value) - + Keys must be a ``/`` separated value, where the first part is usually the name of your plugin or application to avoid clashes with other cache users. - + Values can be any object handled by the json stdlib module. capsys Enable capturing of writes to ``sys.stdout`` and ``sys.stderr`` and make @@ -49,9 +49,9 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests. pytestconfig Session-scoped fixture that returns the :class:`_pytest.config.Config` object. - + Example:: - + def test_foo(pytestconfig): if pytestconfig.getoption("verbose"): ... @@ -61,9 +61,9 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a configured reporters, like JUnit XML. The fixture is callable with ``(name, value)``, with value being automatically xml-encoded. - + Example:: - + def test_function(record_property): record_property("example_key", 1) record_xml_property @@ -74,9 +74,9 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a automatically xml-encoded caplog Access and control log capturing. - + Captured logs are available through the following methods:: - + * caplog.text -> string containing formatted log output * caplog.records -> list of logging.LogRecord instances * caplog.record_tuples -> list of (logger_name, level, message) tuples @@ -84,7 +84,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a monkeypatch The returned ``monkeypatch`` fixture provides these helper methods to modify objects, dictionaries or os.environ:: - + monkeypatch.setattr(obj, name, value, raising=True) monkeypatch.delattr(obj, name, raising=True) monkeypatch.setitem(mapping, name, value) @@ -93,14 +93,14 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a monkeypatch.delenv(name, value, raising=True) monkeypatch.syspath_prepend(path) monkeypatch.chdir(path) - + All modifications will be undone after the requesting test function or fixture has finished. The ``raising`` parameter determines if a KeyError or AttributeError will be raised if the set/deletion operation has no target. recwarn Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions. - + See http://docs.python.org/library/warnings.html for information on warning categories. tmpdir_factory @@ -111,13 +111,12 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a created as a sub directory of the base temporary directory. The returned object is a `py.path.local`_ path object. - + .. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html - + no tests ran in 0.12 seconds You can also interactively ask for help, e.g. by typing on the Python interactive prompt something like:: import pytest help(pytest) - diff --git a/doc/en/cache.rst b/doc/en/cache.rst index 57a091116..37bcf7070 100644 --- a/doc/en/cache.rst +++ b/doc/en/cache.rst @@ -20,7 +20,7 @@ last ``pytest`` invocation: For cleanup (usually not needed), a ``--cache-clear`` option allows to remove all cross-session cache contents ahead of a test run. -Other plugins may access the `config.cache`_ object to set/get +Other plugins may access the `config.cache`_ object to set/get **json encodable** values between ``pytest`` invocations. .. note:: @@ -49,26 +49,26 @@ If you run this for the first time you will see two failures:: .................F.......F........................ [100%] ================================= FAILURES ================================= _______________________________ test_num[17] _______________________________ - + i = 17 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed _______________________________ test_num[25] _______________________________ - + i = 25 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed 2 failed, 48 passed in 0.12 seconds @@ -80,31 +80,31 @@ If you then run it with ``--lf``:: rootdir: $REGENDOC_TMPDIR, inifile: collected 50 items / 48 deselected run-last-failure: rerun previous 2 failures - + test_50.py FF [100%] - + ================================= FAILURES ================================= _______________________________ test_num[17] _______________________________ - + i = 17 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed _______________________________ test_num[25] _______________________________ - + i = 25 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed ================= 2 failed, 48 deselected in 0.12 seconds ================== @@ -121,31 +121,31 @@ of ``FF`` and dots):: rootdir: $REGENDOC_TMPDIR, inifile: collected 50 items run-last-failure: rerun previous 2 failures first - + test_50.py FF................................................ [100%] - + ================================= FAILURES ================================= _______________________________ test_num[17] _______________________________ - + i = 17 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed _______________________________ test_num[25] _______________________________ - + i = 25 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed =================== 2 failed, 48 passed in 0.12 seconds ==================== @@ -198,13 +198,13 @@ of the sleep:: F [100%] ================================= FAILURES ================================= ______________________________ test_function _______________________________ - + mydata = 42 - + def test_function(mydata): > assert mydata == 23 E assert 42 == 23 - + test_caching.py:14: AssertionError 1 failed in 0.12 seconds @@ -215,13 +215,13 @@ the cache and this will be quick:: F [100%] ================================= FAILURES ================================= ______________________________ test_function _______________________________ - + mydata = 42 - + def test_function(mydata): > assert mydata == 23 E assert 42 == 23 - + test_caching.py:14: AssertionError 1 failed in 0.12 seconds @@ -246,7 +246,7 @@ You can always peek at the content of the cache using the ['test_caching.py::test_function'] example/value contains: 42 - + ======================= no tests ran in 0.12 seconds ======================= Clearing Cache content @@ -260,5 +260,3 @@ by adding the ``--cache-clear`` option like this:: This is recommended for invocations from Continuous Integration servers where isolation and correctness is more important than speed. - - diff --git a/doc/en/capture.rst b/doc/en/capture.rst index 901def602..900fe3fb4 100644 --- a/doc/en/capture.rst +++ b/doc/en/capture.rst @@ -68,16 +68,16 @@ of the failing function and hide the other one:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py .F [100%] - + ================================= FAILURES ================================= ________________________________ test_func2 ________________________________ - + def test_func2(): > assert False E assert False - + test_module.py:9: AssertionError -------------------------- Captured stdout setup --------------------------- setting up diff --git a/doc/en/check_sphinx.py b/doc/en/check_sphinx.py index 0f536ffa6..af609624b 100644 --- a/doc/en/check_sphinx.py +++ b/doc/en/check_sphinx.py @@ -1,17 +1,17 @@ -import py import subprocess + + def test_build_docs(tmpdir): doctrees = tmpdir.join("doctrees") htmldir = tmpdir.join("html") - subprocess.check_call([ - "sphinx-build", "-W", "-bhtml", - "-d", str(doctrees), ".", str(htmldir)]) + subprocess.check_call( + ["sphinx-build", "-W", "-bhtml", "-d", str(doctrees), ".", str(htmldir)] + ) + def test_linkcheck(tmpdir): doctrees = tmpdir.join("doctrees") htmldir = tmpdir.join("html") subprocess.check_call( - ["sphinx-build", "-blinkcheck", - "-d", str(doctrees), ".", str(htmldir)]) - - + ["sphinx-build", "-blinkcheck", "-d", str(doctrees), ".", str(htmldir)] + ) diff --git a/doc/en/conf.py b/doc/en/conf.py index d64b81117..ebff6ff3b 100644 --- a/doc/en/conf.py +++ b/doc/en/conf.py @@ -29,7 +29,7 @@ release = ".".join(version.split(".")[:2]) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) +# sys.path.insert(0, os.path.abspath('.')) autodoc_member_order = "bysource" todo_include_todos = 1 @@ -37,59 +37,68 @@ todo_include_todos = 1 # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' +# needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.autosummary', - 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode', 'sphinxcontrib_trio'] +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.todo", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.viewcode", + "sphinxcontrib_trio", +] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix of source filenames. -source_suffix = '.rst' +source_suffix = ".rst" # The encoding of source files. -#source_encoding = 'utf-8-sig' +# source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = 'contents' +master_doc = "contents" # General information about the project. -project = u'pytest' +project = u"pytest" year = datetime.datetime.utcnow().year -copyright = u'2015–{} , holger krekel and pytest-dev team'.format(year) - +copyright = u"2015–{} , holger krekel and pytest-dev team".format(year) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. -#language = None +# language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['links.inc', '_build', 'naming20.rst', 'test/*', +exclude_patterns = [ + "links.inc", + "_build", + "naming20.rst", + "test/*", "old_*", - '*attic*', - '*/attic*', - 'funcargs.rst', - 'setup.rst', - 'example/remoteinterp.rst', - ] + "*attic*", + "*/attic*", + "funcargs.rst", + "setup.rst", + "example/remoteinterp.rst", +] # The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). @@ -97,39 +106,36 @@ add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- -sys.path.append(os.path.abspath('_themes')) -html_theme_path = ['_themes'] +sys.path.append(os.path.abspath("_themes")) +html_theme_path = ["_themes"] # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'flask' +html_theme = "flask" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -html_theme_options = { - 'index_logo': None -} +html_theme_options = {"index_logo": None} # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -html_title = 'pytest documentation' +html_title = "pytest documentation" # A shorter title for the navigation bar. Default is the same as html_title. html_short_title = "pytest-%s" % release @@ -150,37 +156,37 @@ html_favicon = "img/pytest1favi.ico" # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} -#html_sidebars = {'index': 'indexsidebar.html'} +# html_sidebars = {} +# html_sidebars = {'index': 'indexsidebar.html'} html_sidebars = { - 'index': [ - 'sidebarintro.html', - 'globaltoc.html', - 'links.html', - 'sourcelink.html', - 'searchbox.html' + "index": [ + "sidebarintro.html", + "globaltoc.html", + "links.html", + "sourcelink.html", + "searchbox.html", + ], + "**": [ + "globaltoc.html", + "relations.html", + "links.html", + "sourcelink.html", + "searchbox.html", ], - '**': [ - 'globaltoc.html', - 'relations.html', - 'links.html', - 'sourcelink.html', - 'searchbox.html' - ] } # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} -#html_additional_pages = {'index': 'index.html'} +# html_additional_pages = {} +# html_additional_pages = {'index': 'index.html'} # If false, no module index is generated. @@ -190,63 +196,68 @@ html_domain_indices = True html_use_index = False # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True +# html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True +# html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None +# html_file_suffix = None # Output file base name for HTML help builder. -htmlhelp_basename = 'pytestdoc' +htmlhelp_basename = "pytestdoc" # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' +# latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' +# latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('contents', 'pytest.tex', u'pytest Documentation', - u'holger krekel, trainer and consultant, http://merlinux.eu', 'manual'), + ( + "contents", + "pytest.tex", + u"pytest Documentation", + u"holger krekel, trainer and consultant, http://merlinux.eu", + "manual", + ) ] # The name of an image file (relative to this directory) to place at the top of # the title page. -latex_logo = 'img/pytest1.png' +latex_logo = "img/pytest1.png" # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # If true, show page references after internal links. -#latex_show_pagerefs = False +# latex_show_pagerefs = False # If true, show URL addresses after external links. -#latex_show_urls = False +# latex_show_urls = False # Additional stuff for the LaTeX preamble. -#latex_preamble = '' +# latex_preamble = '' # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. latex_domain_indices = False @@ -255,72 +266,78 @@ latex_domain_indices = False # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - ('usage', 'pytest', u'pytest usage', - [u'holger krekel at merlinux eu'], 1) -] +man_pages = [("usage", "pytest", u"pytest usage", [u"holger krekel at merlinux eu"], 1)] # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. -epub_title = u'pytest' -epub_author = u'holger krekel at merlinux eu' -epub_publisher = u'holger krekel at merlinux eu' -epub_copyright = u'2013, holger krekel et alii' +epub_title = u"pytest" +epub_author = u"holger krekel at merlinux eu" +epub_publisher = u"holger krekel at merlinux eu" +epub_copyright = u"2013, holger krekel et alii" # The language of the text. It defaults to the language option # or en if the language is not set. -#epub_language = '' +# epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. -#epub_scheme = '' +# epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. -#epub_identifier = '' +# epub_identifier = '' # A unique identification for the text. -#epub_uid = '' +# epub_uid = '' # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. -#epub_pre_files = [] +# epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. -#epub_post_files = [] +# epub_post_files = [] # A list of files that should not be packed into the epub file. -#epub_exclude_files = [] +# epub_exclude_files = [] # The depth of the table of contents in toc.ncx. -#epub_tocdepth = 3 +# epub_tocdepth = 3 # Allow duplicate toc entries. -#epub_tocdup = True +# epub_tocdup = True # -- Options for texinfo output ------------------------------------------------ texinfo_documents = [ - (master_doc, 'pytest', 'pytest Documentation', - ('Holger Krekel@*Benjamin Peterson@*Ronny Pfannschmidt@*' - 'Floris Bruynooghe@*others'), - 'pytest', - 'simple powerful testing with Python', - 'Programming', - 1), + ( + master_doc, + "pytest", + "pytest Documentation", + ( + "Holger Krekel@*Benjamin Peterson@*Ronny Pfannschmidt@*" + "Floris Bruynooghe@*others" + ), + "pytest", + "simple powerful testing with Python", + "Programming", + 1, + ) ] # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'python': ('http://docs.python.org/3', None)} +intersphinx_mapping = {"python": ("http://docs.python.org/3", None)} def setup(app): - #from sphinx.ext.autodoc import cut_lines - #app.connect('autodoc-process-docstring', cut_lines(4, what=['module'])) - app.add_description_unit('confval', 'confval', - objname='configuration value', - indextemplate='pair: %s; configuration value') + # from sphinx.ext.autodoc import cut_lines + # app.connect('autodoc-process-docstring', cut_lines(4, what=['module'])) + app.add_description_unit( + "confval", + "confval", + objname="configuration value", + indextemplate="pair: %s; configuration value", + ) diff --git a/doc/en/contact.rst b/doc/en/contact.rst index 83d496640..efc6a8f57 100644 --- a/doc/en/contact.rst +++ b/doc/en/contact.rst @@ -8,9 +8,9 @@ Contact channels - `pytest issue tracker`_ to report bugs or suggest features (for version 2.0 and above). -- `pytest on stackoverflow.com `_ - to post questions with the tag ``pytest``. New Questions will usually - be seen by pytest users or developers and answered quickly. +- `pytest on stackoverflow.com `_ + to post questions with the tag ``pytest``. New Questions will usually + be seen by pytest users or developers and answered quickly. - `Testing In Python`_: a mailing list for Python testing tools and discussion. @@ -47,4 +47,3 @@ Contact channels .. _`development mailing list`: .. _`pytest-dev at python.org (mailing list)`: http://mail.python.org/mailman/listinfo/pytest-dev .. _`pytest-commit at python.org (mailing list)`: http://mail.python.org/mailman/listinfo/pytest-commit - diff --git a/doc/en/contents.rst b/doc/en/contents.rst index 38a4be941..9f1d8d85a 100644 --- a/doc/en/contents.rst +++ b/doc/en/contents.rst @@ -62,4 +62,3 @@ Full pytest documentation :maxdepth: 1 changelog - diff --git a/doc/en/customize.rst b/doc/en/customize.rst index e89cdc002..c1a3498c7 100644 --- a/doc/en/customize.rst +++ b/doc/en/customize.rst @@ -38,7 +38,7 @@ Here's a summary what ``pytest`` uses ``rootdir`` for: Important to emphasize that ``rootdir`` is **NOT** used to modify ``sys.path``/``PYTHONPATH`` or influence how modules are imported. See :ref:`pythonpath` for more details. -``--rootdir=path`` command-line option can be used to force a specific directory. +``--rootdir=path`` command-line option can be used to force a specific directory. The directory passed may contain environment variables when it is used in conjunction with ``addopts`` in a ``pytest.ini`` file. diff --git a/doc/en/development_guide.rst b/doc/en/development_guide.rst index f69371a5a..69e866943 100644 --- a/doc/en/development_guide.rst +++ b/doc/en/development_guide.rst @@ -40,7 +40,7 @@ avoid creating labels just for the sake of creating them. Each label should include a description in the GitHub's interface stating its purpose. Temporary labels -~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~ To classify issues for a special event it is encouraged to create a temporary label. This helps those involved to find the relevant issues to work on. Examples of that are sprints in Python events or global hacking events. diff --git a/doc/en/doctest.rst b/doc/en/doctest.rst index 2ee7110b3..9488ee826 100644 --- a/doc/en/doctest.rst +++ b/doc/en/doctest.rst @@ -65,9 +65,9 @@ then you can just invoke ``pytest`` without command line options:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini collected 1 item - + mymodule.py . [100%] - + ========================= 1 passed in 0.12 seconds ========================= It is possible to use fixtures using the ``getfixture`` helper:: @@ -168,5 +168,3 @@ by using one of standard doctest modules format in options pytest --doctest-modules --doctest-report cdiff pytest --doctest-modules --doctest-report ndiff pytest --doctest-modules --doctest-report only_first_failure - - diff --git a/doc/en/example/assertion/failure_demo.py b/doc/en/example/assertion/failure_demo.py index 3ae0268d3..dc27f5dfb 100644 --- a/doc/en/example/assertion/failure_demo.py +++ b/doc/en/example/assertion/failure_demo.py @@ -2,135 +2,158 @@ from pytest import raises import _pytest._code import py -def otherfunc(a,b): - assert a==b -def somefunc(x,y): - otherfunc(x,y) +def otherfunc(a, b): + assert a == b + + +def somefunc(x, y): + otherfunc(x, y) + + +def otherfunc_multi(a, b): + assert a == b -def otherfunc_multi(a,b): - assert (a == - b) def test_generative(param1, param2): assert param1 * 2 < param2 + def pytest_generate_tests(metafunc): - if 'param1' in metafunc.fixturenames: + if "param1" in metafunc.fixturenames: metafunc.addcall(funcargs=dict(param1=3, param2=6)) + class TestFailing(object): + def test_simple(self): + def f(): return 42 + def g(): return 43 assert f() == g() def test_simple_multiline(self): - otherfunc_multi( - 42, - 6*9) + otherfunc_multi(42, 6 * 9) def test_not(self): + def f(): return 42 + assert not f() + class TestSpecialisedExplanations(object): + def test_eq_text(self): - assert 'spam' == 'eggs' + assert "spam" == "eggs" def test_eq_similar_text(self): - assert 'foo 1 bar' == 'foo 2 bar' + assert "foo 1 bar" == "foo 2 bar" def test_eq_multiline_text(self): - assert 'foo\nspam\nbar' == 'foo\neggs\nbar' + assert "foo\nspam\nbar" == "foo\neggs\nbar" def test_eq_long_text(self): - a = '1'*100 + 'a' + '2'*100 - b = '1'*100 + 'b' + '2'*100 + a = "1" * 100 + "a" + "2" * 100 + b = "1" * 100 + "b" + "2" * 100 assert a == b def test_eq_long_text_multiline(self): - a = '1\n'*100 + 'a' + '2\n'*100 - b = '1\n'*100 + 'b' + '2\n'*100 + a = "1\n" * 100 + "a" + "2\n" * 100 + b = "1\n" * 100 + "b" + "2\n" * 100 assert a == b def test_eq_list(self): assert [0, 1, 2] == [0, 1, 3] def test_eq_list_long(self): - a = [0]*100 + [1] + [3]*100 - b = [0]*100 + [2] + [3]*100 + a = [0] * 100 + [1] + [3] * 100 + b = [0] * 100 + [2] + [3] * 100 assert a == b def test_eq_dict(self): - assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0} + assert {"a": 0, "b": 1, "c": 0} == {"a": 0, "b": 2, "d": 0} def test_eq_set(self): - assert set([0, 10, 11, 12]) == set([0, 20, 21]) + assert {0, 10, 11, 12} == {0, 20, 21} def test_eq_longer_list(self): - assert [1,2] == [1,2,3] + assert [1, 2] == [1, 2, 3] def test_in_list(self): assert 1 in [0, 2, 3, 4, 5] def test_not_in_text_multiline(self): - text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail' - assert 'foo' not in text + text = "some multiline\ntext\nwhich\nincludes foo\nand a\ntail" + assert "foo" not in text def test_not_in_text_single(self): - text = 'single foo line' - assert 'foo' not in text + text = "single foo line" + assert "foo" not in text def test_not_in_text_single_long(self): - text = 'head ' * 50 + 'foo ' + 'tail ' * 20 - assert 'foo' not in text + text = "head " * 50 + "foo " + "tail " * 20 + assert "foo" not in text def test_not_in_text_single_long_term(self): - text = 'head ' * 50 + 'f'*70 + 'tail ' * 20 - assert 'f'*70 not in text + text = "head " * 50 + "f" * 70 + "tail " * 20 + assert "f" * 70 not in text def test_attribute(): + class Foo(object): b = 1 + i = Foo() assert i.b == 2 def test_attribute_instance(): + class Foo(object): b = 1 + assert Foo().b == 2 def test_attribute_failure(): + class Foo(object): + def _get_b(self): - raise Exception('Failed to get attrib') + raise Exception("Failed to get attrib") + b = property(_get_b) + i = Foo() assert i.b == 2 def test_attribute_multiple(): + class Foo(object): b = 1 + class Bar(object): b = 2 + assert Foo().b == Bar().b def globf(x): - return x+1 + return x + 1 + class TestRaises(object): + def test_raises(self): - s = 'qwe' + s = "qwe" # NOQA raises(TypeError, "int(s)") def test_raises_doesnt(self): @@ -140,15 +163,15 @@ class TestRaises(object): raise ValueError("demo error") def test_tupleerror(self): - a,b = [1] + a, b = [1] # NOQA def test_reinterpret_fails_with_print_for_the_fun_of_it(self): - l = [1,2,3] - print ("l is %r" % l) - a,b = l.pop() + items = [1, 2, 3] + print("items is %r" % items) + a, b = items.pop() def test_some_error(self): - if namenotexi: + if namenotexi: # NOQA pass def func1(self): @@ -159,31 +182,35 @@ class TestRaises(object): def test_dynamic_compile_shows_nicely(): import imp import sys - src = 'def foo():\n assert 1 == 0\n' - name = 'abc-123' + + src = "def foo():\n assert 1 == 0\n" + name = "abc-123" module = imp.new_module(name) - code = _pytest._code.compile(src, name, 'exec') + code = _pytest._code.compile(src, name, "exec") py.builtin.exec_(code, module.__dict__) sys.modules[name] = module module.foo() - class TestMoreErrors(object): + def test_complex_error(self): + def f(): return 44 + def g(): return 43 + somefunc(f(), g()) def test_z1_unpack_error(self): - l = [] - a,b = l + items = [] + a, b = items def test_z2_type_error(self): - l = 3 - a,b = l + items = 3 + a, b = items def test_startswith(self): s = "123" @@ -191,17 +218,20 @@ class TestMoreErrors(object): assert s.startswith(g) def test_startswith_nested(self): + def f(): return "123" + def g(): return "456" + assert f().startswith(g()) def test_global_func(self): assert isinstance(globf(42), float) def test_instance(self): - self.x = 6*7 + self.x = 6 * 7 assert self.x != 42 def test_compare(self): @@ -218,23 +248,31 @@ class TestMoreErrors(object): class TestCustomAssertMsg(object): def test_single_line(self): + class A(object): a = 1 + b = 2 assert A.a == b, "A.a appears not to be b" def test_multiline(self): + class A(object): a = 1 + b = 2 - assert A.a == b, "A.a appears not to be b\n" \ - "or does not appear to be b\none of those" + assert ( + A.a == b + ), "A.a appears not to be b\n" "or does not appear to be b\none of those" def test_custom_repr(self): + class JSON(object): a = 1 + def __repr__(self): return "This is JSON\n{\n 'foo': 'bar'\n}" + a = JSON() b = 2 assert a.a == b, a diff --git a/doc/en/example/assertion/global_testmodule_config/conftest.py b/doc/en/example/assertion/global_testmodule_config/conftest.py index 71e8c54be..4859bea78 100644 --- a/doc/en/example/assertion/global_testmodule_config/conftest.py +++ b/doc/en/example/assertion/global_testmodule_config/conftest.py @@ -1,10 +1,13 @@ -import pytest, py +import pytest +import py + mydir = py.path.local(__file__).dirpath() + def pytest_runtest_setup(item): if isinstance(item, pytest.Function): if not item.fspath.relto(mydir): return mod = item.getparent(pytest.Module).obj - if hasattr(mod, 'hello'): - print ("mod.hello %r" % (mod.hello,)) + if hasattr(mod, "hello"): + print("mod.hello %r" % (mod.hello,)) diff --git a/doc/en/example/assertion/global_testmodule_config/test_hello_world.py b/doc/en/example/assertion/global_testmodule_config/test_hello_world.py index 828e6b9fd..b945afa67 100644 --- a/doc/en/example/assertion/global_testmodule_config/test_hello_world.py +++ b/doc/en/example/assertion/global_testmodule_config/test_hello_world.py @@ -1,5 +1,6 @@ hello = "world" + def test_func(): pass diff --git a/doc/en/example/assertion/test_failures.py b/doc/en/example/assertion/test_failures.py index 2e5cd20b1..1150ec12f 100644 --- a/doc/en/example/assertion/test_failures.py +++ b/doc/en/example/assertion/test_failures.py @@ -1,14 +1,14 @@ import py -failure_demo = py.path.local(__file__).dirpath('failure_demo.py') -pytest_plugins = 'pytester', + +failure_demo = py.path.local(__file__).dirpath("failure_demo.py") +pytest_plugins = "pytester", + def test_failure_demo_fails_properly(testdir): target = testdir.tmpdir.join(failure_demo.basename) failure_demo.copy(target) failure_demo.copy(testdir.tmpdir.join(failure_demo.basename)) result = testdir.runpytest(target, syspathinsert=True) - result.stdout.fnmatch_lines([ - "*42 failed*" - ]) + result.stdout.fnmatch_lines(["*42 failed*"]) assert result.ret != 0 diff --git a/doc/en/example/assertion/test_setup_flow_example.py b/doc/en/example/assertion/test_setup_flow_example.py index 100effa49..c00711dc2 100644 --- a/doc/en/example/assertion/test_setup_flow_example.py +++ b/doc/en/example/assertion/test_setup_flow_example.py @@ -1,7 +1,9 @@ def setup_module(module): module.TestStateFullThing.classcount = 0 + class TestStateFullThing(object): + def setup_class(cls): cls.classcount += 1 @@ -19,9 +21,11 @@ class TestStateFullThing(object): assert self.classcount == 1 assert self.id == 23 + def teardown_module(module): assert module.TestStateFullThing.classcount == 0 + """ For this example the control flow happens as follows:: import test_setup_flow_example setup_module(test_setup_flow_example) @@ -39,4 +43,3 @@ Note that ``setup_class(TestStateFullThing)`` is called and not to insert ``setup_class = classmethod(setup_class)`` to make your setup function callable. """ - diff --git a/doc/en/example/costlysetup/conftest.py b/doc/en/example/costlysetup/conftest.py index ea3c1cffb..466c62c06 100644 --- a/doc/en/example/costlysetup/conftest.py +++ b/doc/en/example/costlysetup/conftest.py @@ -1,16 +1,20 @@ import pytest + @pytest.fixture("session") def setup(request): setup = CostlySetup() yield setup setup.finalize() + class CostlySetup(object): + def __init__(self): import time - print ("performing costly setup") + + print("performing costly setup") time.sleep(5) self.timecostly = 1 diff --git a/doc/en/example/costlysetup/sub_a/test_quick.py b/doc/en/example/costlysetup/sub_a/test_quick.py index d97657867..38dda2660 100644 --- a/doc/en/example/costlysetup/sub_a/test_quick.py +++ b/doc/en/example/costlysetup/sub_a/test_quick.py @@ -1,3 +1,2 @@ - def test_quick(setup): pass diff --git a/doc/en/example/costlysetup/sub_b/test_two.py b/doc/en/example/costlysetup/sub_b/test_two.py index 6ed6ee4d8..b1653aaab 100644 --- a/doc/en/example/costlysetup/sub_b/test_two.py +++ b/doc/en/example/costlysetup/sub_b/test_two.py @@ -1,6 +1,6 @@ def test_something(setup): assert setup.timecostly == 1 + def test_something_more(setup): assert setup.timecostly == 1 - diff --git a/doc/en/example/markers.rst b/doc/en/example/markers.rst index 5b049d463..bf352bc81 100644 --- a/doc/en/example/markers.rst +++ b/doc/en/example/markers.rst @@ -35,9 +35,9 @@ You can then restrict a test run to only run tests marked with ``webtest``:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items / 3 deselected - + test_server.py::test_send_http PASSED [100%] - + ================== 1 passed, 3 deselected in 0.12 seconds ================== Or the inverse, running all tests except the webtest ones:: @@ -48,11 +48,11 @@ Or the inverse, running all tests except the webtest ones:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items / 1 deselected - + test_server.py::test_something_quick PASSED [ 33%] test_server.py::test_another PASSED [ 66%] test_server.py::TestClass::test_method PASSED [100%] - + ================== 3 passed, 1 deselected in 0.12 seconds ================== Selecting tests based on their node ID @@ -68,9 +68,9 @@ tests based on their module, class, method, or function name:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 1 item - + test_server.py::TestClass::test_method PASSED [100%] - + ========================= 1 passed in 0.12 seconds ========================= You can also select on the class:: @@ -81,9 +81,9 @@ You can also select on the class:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 1 item - + test_server.py::TestClass::test_method PASSED [100%] - + ========================= 1 passed in 0.12 seconds ========================= Or select multiple nodes:: @@ -94,10 +94,10 @@ Or select multiple nodes:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 2 items - + test_server.py::TestClass::test_method PASSED [ 50%] test_server.py::test_send_http PASSED [100%] - + ========================= 2 passed in 0.12 seconds ========================= .. _node-id: @@ -132,9 +132,9 @@ select tests based on their names:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items / 3 deselected - + test_server.py::test_send_http PASSED [100%] - + ================== 1 passed, 3 deselected in 0.12 seconds ================== And you can also run all tests except the ones that match the keyword:: @@ -145,11 +145,11 @@ And you can also run all tests except the ones that match the keyword:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items / 1 deselected - + test_server.py::test_something_quick PASSED [ 33%] test_server.py::test_another PASSED [ 66%] test_server.py::TestClass::test_method PASSED [100%] - + ================== 3 passed, 1 deselected in 0.12 seconds ================== Or to select "http" and "quick" tests:: @@ -160,10 +160,10 @@ Or to select "http" and "quick" tests:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items / 2 deselected - + test_server.py::test_send_http PASSED [ 50%] test_server.py::test_something_quick PASSED [100%] - + ================== 2 passed, 2 deselected in 0.12 seconds ================== .. note:: @@ -199,21 +199,21 @@ You can ask which markers exist for your test suite - the list includes our just $ pytest --markers @pytest.mark.webtest: mark a test as a webtest. - + @pytest.mark.skip(reason=None): skip the given test function with an optional reason. Example: skip(reason="no way of currently testing this") skips the test. - + @pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html - + @pytest.mark.xfail(condition, reason=None, run=True, raises=None, strict=False): mark the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html - + @pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples. - - @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures - + + @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures + @pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible. - + @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible. - + For an example on how to add and work with markers from a plugin, see :ref:`adding a custom marker from a plugin`. @@ -227,7 +227,7 @@ For an example on how to add and work with markers from a plugin, see * Asking for existing markers via ``pytest --markers`` gives good output * Typos in function markers are treated as an error if you use - the ``--strict`` option. + the ``--strict`` option. .. _`scoped-marking`: @@ -352,9 +352,9 @@ the test needs:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_someenv.py s [100%] - + ======================== 1 skipped in 0.12 seconds ========================= and here is one that specifies exactly the environment needed:: @@ -364,30 +364,30 @@ and here is one that specifies exactly the environment needed:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_someenv.py . [100%] - + ========================= 1 passed in 0.12 seconds ========================= The ``--markers`` option always gives you a list of available markers:: $ pytest --markers @pytest.mark.env(name): mark test to run only on named environment - + @pytest.mark.skip(reason=None): skip the given test function with an optional reason. Example: skip(reason="no way of currently testing this") skips the test. - + @pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html - + @pytest.mark.xfail(condition, reason=None, run=True, raises=None, strict=False): mark the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html - + @pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples. - - @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures - + + @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures + @pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible. - + @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible. - + .. _`passing callables to custom markers`: @@ -523,11 +523,11 @@ then you will see two tests skipped and two executed tests as expected:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items - + test_plat.py s.s. [100%] ========================= short test summary info ========================== SKIP [2] $REGENDOC_TMPDIR/conftest.py:12: cannot run on platform linux - + =================== 2 passed, 2 skipped in 0.12 seconds ==================== Note that if you specify a platform via the marker-command line option like this:: @@ -537,9 +537,9 @@ Note that if you specify a platform via the marker-command line option like this platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items / 3 deselected - + test_plat.py . [100%] - + ================== 1 passed, 3 deselected in 0.12 seconds ================== then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests. @@ -588,9 +588,9 @@ We can now use the ``-m option`` to select one set:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items / 2 deselected - + test_module.py FF [100%] - + ================================= FAILURES ================================= __________________________ test_interface_simple ___________________________ test_module.py:3: in test_interface_simple @@ -609,9 +609,9 @@ or to select both "event" and "interface" tests:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items / 1 deselected - + test_module.py FFF [100%] - + ================================= FAILURES ================================= __________________________ test_interface_simple ___________________________ test_module.py:3: in test_interface_simple diff --git a/doc/en/example/multipython.py b/doc/en/example/multipython.py index 66079be7e..970800c7e 100644 --- a/doc/en/example/multipython.py +++ b/doc/en/example/multipython.py @@ -6,35 +6,48 @@ import py import pytest import _pytest._code -pythonlist = ['python2.7', 'python3.4', 'python3.5'] +pythonlist = ["python2.7", "python3.4", "python3.5"] + + @pytest.fixture(params=pythonlist) def python1(request, tmpdir): picklefile = tmpdir.join("data.pickle") return Python(request.param, picklefile) + @pytest.fixture(params=pythonlist) def python2(request, python1): return Python(request.param, python1.picklefile) + class Python(object): + def __init__(self, version, picklefile): self.pythonpath = py.path.local.sysfind(version) if not self.pythonpath: - pytest.skip("%r not found" %(version,)) + pytest.skip("%r not found" % (version,)) self.picklefile = picklefile + def dumps(self, obj): dumpfile = self.picklefile.dirpath("dump.py") - dumpfile.write(_pytest._code.Source(""" + dumpfile.write( + _pytest._code.Source( + """ import pickle f = open(%r, 'wb') s = pickle.dump(%r, f, protocol=2) f.close() - """ % (str(self.picklefile), obj))) - py.process.cmdexec("%s %s" %(self.pythonpath, dumpfile)) + """ + % (str(self.picklefile), obj) + ) + ) + py.process.cmdexec("%s %s" % (self.pythonpath, dumpfile)) def load_and_is_true(self, expression): loadfile = self.picklefile.dirpath("load.py") - loadfile.write(_pytest._code.Source(""" + loadfile.write( + _pytest._code.Source( + """ import pickle f = open(%r, 'rb') obj = pickle.load(f) @@ -42,11 +55,15 @@ class Python(object): res = eval(%r) if not res: raise SystemExit(1) - """ % (str(self.picklefile), expression))) - print (loadfile) - py.process.cmdexec("%s %s" %(self.pythonpath, loadfile)) + """ + % (str(self.picklefile), expression) + ) + ) + print(loadfile) + py.process.cmdexec("%s %s" % (self.pythonpath, loadfile)) -@pytest.mark.parametrize("obj", [42, {}, {1:3},]) + +@pytest.mark.parametrize("obj", [42, {}, {1: 3}]) def test_basic_objects(python1, python2, obj): python1.dumps(obj) python2.load_and_is_true("obj == %s" % obj) diff --git a/doc/en/example/nonpython.rst b/doc/en/example/nonpython.rst index ca7b2c8df..4f5adf63f 100644 --- a/doc/en/example/nonpython.rst +++ b/doc/en/example/nonpython.rst @@ -30,9 +30,9 @@ now execute the test specification:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR/nonpython, inifile: collected 2 items - + test_simple.yml F. [100%] - + ================================= FAILURES ================================= ______________________________ usecase: hello ______________________________ usecase execution failed @@ -63,10 +63,10 @@ consulted when reporting in ``verbose`` mode:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR/nonpython, inifile: collecting ... collected 2 items - + test_simple.yml::hello FAILED [ 50%] test_simple.yml::ok PASSED [100%] - + ================================= FAILURES ================================= ______________________________ usecase: hello ______________________________ usecase execution failed @@ -87,5 +87,5 @@ interesting to just look at the collection tree:: - + ======================= no tests ran in 0.12 seconds ======================= diff --git a/doc/en/example/nonpython/conftest.py b/doc/en/example/nonpython/conftest.py index baff30015..8429dd114 100644 --- a/doc/en/example/nonpython/conftest.py +++ b/doc/en/example/nonpython/conftest.py @@ -2,18 +2,24 @@ import pytest + def pytest_collect_file(parent, path): if path.ext == ".yml" and path.basename.startswith("test"): return YamlFile(path, parent) + class YamlFile(pytest.File): + def collect(self): - import yaml # we need a yaml parser, e.g. PyYAML + import yaml # we need a yaml parser, e.g. PyYAML + raw = yaml.safe_load(self.fspath.open()) for name, spec in sorted(raw.items()): yield YamlItem(name, self, spec) + class YamlItem(pytest.Item): + def __init__(self, name, parent, spec): super(YamlItem, self).__init__(name, parent) self.spec = spec @@ -27,14 +33,17 @@ class YamlItem(pytest.Item): def repr_failure(self, excinfo): """ called when self.runtest() raises an exception. """ if isinstance(excinfo.value, YamlException): - return "\n".join([ - "usecase execution failed", - " spec failed: %r: %r" % excinfo.value.args[1:3], - " no further details known at this point." - ]) + return "\n".join( + [ + "usecase execution failed", + " spec failed: %r: %r" % excinfo.value.args[1:3], + " no further details known at this point.", + ] + ) def reportinfo(self): return self.fspath, 0, "usecase: %s" % self.name + class YamlException(Exception): """ custom exception for error reporting. """ diff --git a/doc/en/example/parametrize.rst b/doc/en/example/parametrize.rst index 2cf3fa1e4..882700fec 100644 --- a/doc/en/example/parametrize.rst +++ b/doc/en/example/parametrize.rst @@ -55,13 +55,13 @@ let's run the full monty:: ....F [100%] ================================= FAILURES ================================= _____________________________ test_compute[4] ______________________________ - + param1 = 4 - + def test_compute(param1): > assert param1 < 4 E assert 4 < 4 - + test_compute.py:3: AssertionError 1 failed, 4 passed in 0.12 seconds @@ -151,7 +151,7 @@ objects, they are still using the default pytest representation:: - + ======================= no tests ran in 0.12 seconds ======================= In ``test_timedistance_v3``, we used ``pytest.param`` to specify the test IDs @@ -198,9 +198,9 @@ this is a fully self-contained example which you can run with:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items - + test_scenarios.py .... [100%] - + ========================= 4 passed in 0.12 seconds ========================= If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function:: @@ -218,7 +218,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia - + ======================= no tests ran in 0.12 seconds ======================= Note that we told ``metafunc.parametrize()`` that your scenario values @@ -279,7 +279,7 @@ Let's first see how it looks like at collection time:: - + ======================= no tests ran in 0.12 seconds ======================= And then when we run the test:: @@ -288,15 +288,15 @@ And then when we run the test:: .F [100%] ================================= FAILURES ================================= _________________________ test_db_initialized[d2] __________________________ - + db = - + def test_db_initialized(db): # a dummy test if db.__class__.__name__ == "DB2": > pytest.fail("deliberately failing for demo purposes") E Failed: deliberately failing for demo purposes - + test_backends.py:6: Failed 1 failed, 1 passed in 0.12 seconds @@ -339,7 +339,7 @@ The result of this test will be successful:: collected 1 item - + ======================= no tests ran in 0.12 seconds ======================= .. regendoc:wipe @@ -384,13 +384,13 @@ argument sets to use for each test function. Let's run it:: F.. [100%] ================================= FAILURES ================================= ________________________ TestClass.test_equals[1-2] ________________________ - + self = , a = 1, b = 2 - + def test_equals(self, a, b): > assert a == b E assert 1 == 2 - + test_parametrize.py:18: AssertionError 1 failed, 2 passed in 0.12 seconds @@ -462,11 +462,11 @@ If you run this with reporting for skips enabled:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py .s [100%] ========================= short test summary info ========================== SKIP [1] $REGENDOC_TMPDIR/conftest.py:11: could not import 'opt2' - + =================== 1 passed, 1 skipped in 0.12 seconds ==================== You'll see that we don't have an ``opt2`` module and thus the second test run @@ -504,10 +504,10 @@ For example:: ]) def test_eval(test_input, expected): assert eval(test_input) == expected - + In this example, we have 4 parametrized tests. Except for the first test, we mark the rest three parametrized tests with the custom marker ``basic``, -and for the fourth test we also use the built-in mark ``xfail`` to indicate this +and for the fourth test we also use the built-in mark ``xfail`` to indicate this test is expected to fail. For explicitness, we set test ids for some tests. Then run ``pytest`` with verbose mode and with only the ``basic`` marker:: diff --git a/doc/en/example/py2py3/conftest.py b/doc/en/example/py2py3/conftest.py index 81cd1fb11..5d9a07e3e 100644 --- a/doc/en/example/py2py3/conftest.py +++ b/doc/en/example/py2py3/conftest.py @@ -3,14 +3,14 @@ import pytest py3 = sys.version_info[0] >= 3 + class DummyCollector(pytest.collect.File): + def collect(self): return [] + def pytest_pycollect_makemodule(path, parent): bn = path.basename if "py3" in bn and not py3 or ("py2" in bn and py3): return DummyCollector(path, parent=parent) - - - diff --git a/doc/en/example/py2py3/test_py2.py b/doc/en/example/py2py3/test_py2.py index e09ed9466..664acf178 100644 --- a/doc/en/example/py2py3/test_py2.py +++ b/doc/en/example/py2py3/test_py2.py @@ -4,4 +4,3 @@ def test_exception_syntax(): 0/0 except ZeroDivisionError, e: pass - diff --git a/doc/en/example/py2py3/test_py3.py b/doc/en/example/py2py3/test_py3.py index a811f2bbc..baf0ffbd8 100644 --- a/doc/en/example/py2py3/test_py3.py +++ b/doc/en/example/py2py3/test_py3.py @@ -1,7 +1,5 @@ - def test_exception_syntax(): try: - 0/0 + 0 / 0 except ZeroDivisionError as e: pass - diff --git a/doc/en/example/pythoncollection.py b/doc/en/example/pythoncollection.py index 9c4bd31ce..3603361c3 100644 --- a/doc/en/example/pythoncollection.py +++ b/doc/en/example/pythoncollection.py @@ -1,11 +1,15 @@ - # run this with $ pytest --collect-only test_collectonly.py # + + def test_function(): pass + class TestClass(object): + def test_method(self): pass + def test_anothermethod(self): pass diff --git a/doc/en/example/pythoncollection.rst b/doc/en/example/pythoncollection.rst index 58b4364b5..8e9d3ae62 100644 --- a/doc/en/example/pythoncollection.rst +++ b/doc/en/example/pythoncollection.rst @@ -133,7 +133,7 @@ then the test collection looks like this:: - + ======================= no tests ran in 0.12 seconds ======================= .. note:: @@ -180,7 +180,7 @@ You can always peek at the collection tree without running tests like this:: - + ======================= no tests ran in 0.12 seconds ======================= .. _customizing-test-collection: @@ -243,5 +243,5 @@ file will be left out:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini collected 0 items - + ======================= no tests ran in 0.12 seconds ======================= diff --git a/doc/en/example/reportingdemo.rst b/doc/en/example/reportingdemo.rst index b7b6ffaf3..010d9c143 100644 --- a/doc/en/example/reportingdemo.rst +++ b/doc/en/example/reportingdemo.rst @@ -14,82 +14,82 @@ get on the terminal - we are working on that):: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR/assertion, inifile: collected 42 items - + failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF [100%] - + ================================= FAILURES ================================= ____________________________ test_generative[0] ____________________________ - + param1 = 3, param2 = 6 - + def test_generative(param1, param2): > assert param1 * 2 < param2 E assert (3 * 2) < 6 - + failure_demo.py:16: AssertionError _________________________ TestFailing.test_simple __________________________ - + self = - + def test_simple(self): def f(): return 42 def g(): return 43 - + > assert f() == g() E assert 42 == 43 E + where 42 = .f at 0xdeadbeef>() E + and 43 = .g at 0xdeadbeef>() - + failure_demo.py:29: AssertionError ____________________ TestFailing.test_simple_multiline _____________________ - + self = - + def test_simple_multiline(self): otherfunc_multi( 42, > 6*9) - - failure_demo.py:34: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - + + failure_demo.py:34: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + a = 42, b = 54 - + def otherfunc_multi(a,b): > assert (a == b) E assert 42 == 54 - + failure_demo.py:12: AssertionError ___________________________ TestFailing.test_not ___________________________ - + self = - + def test_not(self): def f(): return 42 > assert not f() E assert not 42 E + where 42 = .f at 0xdeadbeef>() - + failure_demo.py:39: AssertionError _________________ TestSpecialisedExplanations.test_eq_text _________________ - + self = - + def test_eq_text(self): > assert 'spam' == 'eggs' E AssertionError: assert 'spam' == 'eggs' E - spam E + eggs - + failure_demo.py:43: AssertionError _____________ TestSpecialisedExplanations.test_eq_similar_text _____________ - + self = - + def test_eq_similar_text(self): > assert 'foo 1 bar' == 'foo 2 bar' E AssertionError: assert 'foo 1 bar' == 'foo 2 bar' @@ -97,12 +97,12 @@ get on the terminal - we are working on that):: E ? ^ E + foo 2 bar E ? ^ - + failure_demo.py:46: AssertionError ____________ TestSpecialisedExplanations.test_eq_multiline_text ____________ - + self = - + def test_eq_multiline_text(self): > assert 'foo\nspam\nbar' == 'foo\neggs\nbar' E AssertionError: assert 'foo\nspam\nbar' == 'foo\neggs\nbar' @@ -110,12 +110,12 @@ get on the terminal - we are working on that):: E - spam E + eggs E bar - + failure_demo.py:49: AssertionError ______________ TestSpecialisedExplanations.test_eq_long_text _______________ - + self = - + def test_eq_long_text(self): a = '1'*100 + 'a' + '2'*100 b = '1'*100 + 'b' + '2'*100 @@ -127,12 +127,12 @@ get on the terminal - we are working on that):: E ? ^ E + 1111111111b222222222 E ? ^ - + failure_demo.py:54: AssertionError _________ TestSpecialisedExplanations.test_eq_long_text_multiline __________ - + self = - + def test_eq_long_text_multiline(self): a = '1\n'*100 + 'a' + '2\n'*100 b = '1\n'*100 + 'b' + '2\n'*100 @@ -145,25 +145,25 @@ get on the terminal - we are working on that):: E 1 E 1 E 1... - E + E E ...Full output truncated (7 lines hidden), use '-vv' to show - + failure_demo.py:59: AssertionError _________________ TestSpecialisedExplanations.test_eq_list _________________ - + self = - + def test_eq_list(self): > assert [0, 1, 2] == [0, 1, 3] E assert [0, 1, 2] == [0, 1, 3] E At index 2 diff: 2 != 3 E Use -v to get the full diff - + failure_demo.py:62: AssertionError ______________ TestSpecialisedExplanations.test_eq_list_long _______________ - + self = - + def test_eq_list_long(self): a = [0]*100 + [1] + [3]*100 b = [0]*100 + [2] + [3]*100 @@ -171,12 +171,12 @@ get on the terminal - we are working on that):: E assert [0, 0, 0, 0, 0, 0, ...] == [0, 0, 0, 0, 0, 0, ...] E At index 100 diff: 1 != 2 E Use -v to get the full diff - + failure_demo.py:67: AssertionError _________________ TestSpecialisedExplanations.test_eq_dict _________________ - + self = - + def test_eq_dict(self): > assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0} E AssertionError: assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0} @@ -187,14 +187,14 @@ get on the terminal - we are working on that):: E {'c': 0} E Right contains more items: E {'d': 0}... - E + E E ...Full output truncated (2 lines hidden), use '-vv' to show - + failure_demo.py:70: AssertionError _________________ TestSpecialisedExplanations.test_eq_set __________________ - + self = - + def test_eq_set(self): > assert set([0, 10, 11, 12]) == set([0, 20, 21]) E AssertionError: assert {0, 10, 11, 12} == {0, 20, 21} @@ -205,34 +205,34 @@ get on the terminal - we are working on that):: E Extra items in the right set: E 20 E 21... - E + E E ...Full output truncated (2 lines hidden), use '-vv' to show - + failure_demo.py:73: AssertionError _____________ TestSpecialisedExplanations.test_eq_longer_list ______________ - + self = - + def test_eq_longer_list(self): > assert [1,2] == [1,2,3] E assert [1, 2] == [1, 2, 3] E Right contains more items, first extra item: 3 E Use -v to get the full diff - + failure_demo.py:76: AssertionError _________________ TestSpecialisedExplanations.test_in_list _________________ - + self = - + def test_in_list(self): > assert 1 in [0, 2, 3, 4, 5] E assert 1 in [0, 2, 3, 4, 5] - + failure_demo.py:79: AssertionError __________ TestSpecialisedExplanations.test_not_in_text_multiline __________ - + self = - + def test_not_in_text_multiline(self): text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail' > assert 'foo' not in text @@ -244,14 +244,14 @@ get on the terminal - we are working on that):: E includes foo E ? +++ E and a... - E + E E ...Full output truncated (2 lines hidden), use '-vv' to show - + failure_demo.py:83: AssertionError ___________ TestSpecialisedExplanations.test_not_in_text_single ____________ - + self = - + def test_not_in_text_single(self): text = 'single foo line' > assert 'foo' not in text @@ -259,36 +259,36 @@ get on the terminal - we are working on that):: E 'foo' is contained here: E single foo line E ? +++ - + failure_demo.py:87: AssertionError _________ TestSpecialisedExplanations.test_not_in_text_single_long _________ - + self = - + def test_not_in_text_single_long(self): text = 'head ' * 50 + 'foo ' + 'tail ' * 20 > assert 'foo' not in text E AssertionError: assert 'foo' not in 'head head head head hea...ail tail tail tail tail ' E 'foo' is contained here: - E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail + E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail E ? +++ - + failure_demo.py:91: AssertionError ______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______ - + self = - + def test_not_in_text_single_long_term(self): text = 'head ' * 50 + 'f'*70 + 'tail ' * 20 > assert 'f'*70 not in text E AssertionError: assert 'fffffffffff...ffffffffffff' not in 'head head he...l tail tail ' E 'ffffffffffffffffff...fffffffffffffffffff' is contained here: - E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail + E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - + failure_demo.py:95: AssertionError ______________________________ test_attribute ______________________________ - + def test_attribute(): class Foo(object): b = 1 @@ -296,10 +296,10 @@ get on the terminal - we are working on that):: > assert i.b == 2 E assert 1 == 2 E + where 1 = .Foo object at 0xdeadbeef>.b - + failure_demo.py:102: AssertionError _________________________ test_attribute_instance __________________________ - + def test_attribute_instance(): class Foo(object): b = 1 @@ -307,10 +307,10 @@ get on the terminal - we are working on that):: E AssertionError: assert 1 == 2 E + where 1 = .Foo object at 0xdeadbeef>.b E + where .Foo object at 0xdeadbeef> = .Foo'>() - + failure_demo.py:108: AssertionError __________________________ test_attribute_failure __________________________ - + def test_attribute_failure(): class Foo(object): def _get_b(self): @@ -318,19 +318,19 @@ get on the terminal - we are working on that):: b = property(_get_b) i = Foo() > assert i.b == 2 - - failure_demo.py:117: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - + + failure_demo.py:117: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + self = .Foo object at 0xdeadbeef> - + def _get_b(self): > raise Exception('Failed to get attrib') E Exception: Failed to get attrib - + failure_demo.py:114: Exception _________________________ test_attribute_multiple __________________________ - + def test_attribute_multiple(): class Foo(object): b = 1 @@ -342,74 +342,74 @@ get on the terminal - we are working on that):: E + where .Foo object at 0xdeadbeef> = .Foo'>() E + and 2 = .Bar object at 0xdeadbeef>.b E + where .Bar object at 0xdeadbeef> = .Bar'>() - + failure_demo.py:125: AssertionError __________________________ TestRaises.test_raises __________________________ - + self = - + def test_raises(self): s = 'qwe' > raises(TypeError, "int(s)") - - failure_demo.py:134: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - + + failure_demo.py:134: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + > int(s) E ValueError: invalid literal for int() with base 10: 'qwe' - + <0-codegen $PYTHON_PREFIX/lib/python3.5/site-packages/_pytest/python_api.py:615>:1: ValueError ______________________ TestRaises.test_raises_doesnt _______________________ - + self = - + def test_raises_doesnt(self): > raises(IOError, "int('3')") E Failed: DID NOT RAISE - + failure_demo.py:137: Failed __________________________ TestRaises.test_raise ___________________________ - + self = - + def test_raise(self): > raise ValueError("demo error") E ValueError: demo error - + failure_demo.py:140: ValueError ________________________ TestRaises.test_tupleerror ________________________ - + self = - + def test_tupleerror(self): > a,b = [1] E ValueError: not enough values to unpack (expected 2, got 1) - + failure_demo.py:143: ValueError ______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______ - + self = - + def test_reinterpret_fails_with_print_for_the_fun_of_it(self): l = [1,2,3] print ("l is %r" % l) > a,b = l.pop() E TypeError: 'int' object is not iterable - + failure_demo.py:148: TypeError --------------------------- Captured stdout call --------------------------- l is [1, 2, 3] ________________________ TestRaises.test_some_error ________________________ - + self = - + def test_some_error(self): > if namenotexi: E NameError: name 'namenotexi' is not defined - + failure_demo.py:151: NameError ____________________ test_dynamic_compile_shows_nicely _____________________ - + def test_dynamic_compile_shows_nicely(): import imp import sys @@ -420,63 +420,63 @@ get on the terminal - we are working on that):: py.builtin.exec_(code, module.__dict__) sys.modules[name] = module > module.foo() - - failure_demo.py:168: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - + + failure_demo.py:168: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + def foo(): > assert 1 == 0 E AssertionError - + <2-codegen 'abc-123' $REGENDOC_TMPDIR/assertion/failure_demo.py:165>:2: AssertionError ____________________ TestMoreErrors.test_complex_error _____________________ - + self = - + def test_complex_error(self): def f(): return 44 def g(): return 43 > somefunc(f(), g()) - - failure_demo.py:178: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + + failure_demo.py:178: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ failure_demo.py:9: in somefunc otherfunc(x,y) - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + a = 44, b = 43 - + def otherfunc(a,b): > assert a==b E assert 44 == 43 - + failure_demo.py:6: AssertionError ___________________ TestMoreErrors.test_z1_unpack_error ____________________ - + self = - + def test_z1_unpack_error(self): l = [] > a,b = l E ValueError: not enough values to unpack (expected 2, got 0) - + failure_demo.py:182: ValueError ____________________ TestMoreErrors.test_z2_type_error _____________________ - + self = - + def test_z2_type_error(self): l = 3 > a,b = l E TypeError: 'int' object is not iterable - + failure_demo.py:186: TypeError ______________________ TestMoreErrors.test_startswith ______________________ - + self = - + def test_startswith(self): s = "123" g = "456" @@ -484,12 +484,12 @@ get on the terminal - we are working on that):: E AssertionError: assert False E + where False = ('456') E + where = '123'.startswith - + failure_demo.py:191: AssertionError __________________ TestMoreErrors.test_startswith_nested ___________________ - + self = - + def test_startswith_nested(self): def f(): return "123" @@ -501,55 +501,55 @@ get on the terminal - we are working on that):: E + where = '123'.startswith E + where '123' = .f at 0xdeadbeef>() E + and '456' = .g at 0xdeadbeef>() - + failure_demo.py:198: AssertionError _____________________ TestMoreErrors.test_global_func ______________________ - + self = - + def test_global_func(self): > assert isinstance(globf(42), float) E assert False E + where False = isinstance(43, float) E + where 43 = globf(42) - + failure_demo.py:201: AssertionError _______________________ TestMoreErrors.test_instance _______________________ - + self = - + def test_instance(self): self.x = 6*7 > assert self.x != 42 E assert 42 != 42 E + where 42 = .x - + failure_demo.py:205: AssertionError _______________________ TestMoreErrors.test_compare ________________________ - + self = - + def test_compare(self): > assert globf(10) < 5 E assert 11 < 5 E + where 11 = globf(10) - + failure_demo.py:208: AssertionError _____________________ TestMoreErrors.test_try_finally ______________________ - + self = - + def test_try_finally(self): x = 1 try: > assert x == 0 E assert 1 == 0 - + failure_demo.py:213: AssertionError ___________________ TestCustomAssertMsg.test_single_line ___________________ - + self = - + def test_single_line(self): class A(object): a = 1 @@ -558,12 +558,12 @@ get on the terminal - we are working on that):: E AssertionError: A.a appears not to be b E assert 1 == 2 E + where 1 = .A'>.a - + failure_demo.py:224: AssertionError ____________________ TestCustomAssertMsg.test_multiline ____________________ - + self = - + def test_multiline(self): class A(object): a = 1 @@ -575,12 +575,12 @@ get on the terminal - we are working on that):: E one of those E assert 1 == 2 E + where 1 = .A'>.a - + failure_demo.py:230: AssertionError ___________________ TestCustomAssertMsg.test_custom_repr ___________________ - + self = - + def test_custom_repr(self): class JSON(object): a = 1 @@ -595,12 +595,12 @@ get on the terminal - we are working on that):: E } E assert 1 == 2 E + where 1 = This is JSON\n{\n 'foo': 'bar'\n}.a - + failure_demo.py:240: AssertionError ============================= warnings summary ============================= None Metafunc.addcall is deprecated and scheduled to be removed in pytest 4.0. Please use Metafunc.parametrize instead. - + -- Docs: http://doc.pytest.org/en/latest/warnings.html ================== 42 failed, 1 warnings in 0.12 seconds =================== diff --git a/doc/en/example/simple.rst b/doc/en/example/simple.rst index 90d1bd4ad..1c18a59fe 100644 --- a/doc/en/example/simple.rst +++ b/doc/en/example/simple.rst @@ -46,9 +46,9 @@ Let's run this without supplying our new option:: F [100%] ================================= FAILURES ================================= _______________________________ test_answer ________________________________ - + cmdopt = 'type1' - + def test_answer(cmdopt): if cmdopt == "type1": print ("first") @@ -56,7 +56,7 @@ Let's run this without supplying our new option:: print ("second") > assert 0 # to see what was printed E assert 0 - + test_sample.py:6: AssertionError --------------------------- Captured stdout call --------------------------- first @@ -68,9 +68,9 @@ And now with supplying a command line option:: F [100%] ================================= FAILURES ================================= _______________________________ test_answer ________________________________ - + cmdopt = 'type2' - + def test_answer(cmdopt): if cmdopt == "type1": print ("first") @@ -78,7 +78,7 @@ And now with supplying a command line option:: print ("second") > assert 0 # to see what was printed E assert 0 - + test_sample.py:6: AssertionError --------------------------- Captured stdout call --------------------------- second @@ -118,7 +118,7 @@ directory with the above conftest.py:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items - + ======================= no tests ran in 0.12 seconds ======================= .. _`excontrolskip`: @@ -172,11 +172,11 @@ and when running it will see a skipped "slow" test:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py .s [100%] ========================= short test summary info ========================== SKIP [1] test_module.py:8: need --runslow option to run - + =================== 1 passed, 1 skipped in 0.12 seconds ==================== Or run it including the ``slow`` marked test:: @@ -186,9 +186,9 @@ Or run it including the ``slow`` marked test:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py .. [100%] - + ========================= 2 passed in 0.12 seconds ========================= Writing well integrated assertion helpers @@ -223,11 +223,11 @@ Let's run our little function:: F [100%] ================================= FAILURES ================================= ______________________________ test_something ______________________________ - + def test_something(): > checkconfig(42) E Failed: not configured: 42 - + test_checkconfig.py:8: Failed 1 failed in 0.12 seconds @@ -312,7 +312,7 @@ which will add the string to the test header accordingly:: project deps: mylib-1.1 rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items - + ======================= no tests ran in 0.12 seconds ======================= .. regendoc:wipe @@ -339,7 +339,7 @@ which will add info only when run with "--v":: did you? rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 0 items - + ======================= no tests ran in 0.12 seconds ======================= and nothing when run plainly:: @@ -349,7 +349,7 @@ and nothing when run plainly:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items - + ======================= no tests ran in 0.12 seconds ======================= profiling test duration @@ -383,9 +383,9 @@ Now we can profile which test functions execute the slowest:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items - + test_some_are_slow.py ... [100%] - + ========================= slowest 3 test durations ========================= 0.30s call test_some_are_slow.py::test_funcslow2 0.20s call test_some_are_slow.py::test_funcslow1 @@ -449,18 +449,18 @@ If we run this:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items - + test_step.py .Fx. [100%] - + ================================= FAILURES ================================= ____________________ TestUserHandling.test_modification ____________________ - + self = - + def test_modification(self): > assert 0 E assert 0 - + test_step.py:9: AssertionError ========================= short test summary info ========================== XFAIL test_step.py::TestUserHandling::()::test_deletion @@ -528,12 +528,12 @@ We can run this:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 7 items - + test_step.py .Fx. [ 57%] a/test_db.py F [ 71%] a/test_db2.py F [ 85%] b/test_error.py E [100%] - + ================================== ERRORS ================================== _______________________ ERROR at setup of test_root ________________________ file $REGENDOC_TMPDIR/b/test_error.py, line 1 @@ -541,37 +541,37 @@ We can run this:: E fixture 'db' not found > available fixtures: cache, capfd, capfdbinary, caplog, capsys, capsysbinary, doctest_namespace, monkeypatch, pytestconfig, record_property, record_xml_attribute, record_xml_property, recwarn, tmpdir, tmpdir_factory > use 'pytest --fixtures [testpath]' for help on them. - + $REGENDOC_TMPDIR/b/test_error.py:1 ================================= FAILURES ================================= ____________________ TestUserHandling.test_modification ____________________ - + self = - + def test_modification(self): > assert 0 E assert 0 - + test_step.py:9: AssertionError _________________________________ test_a1 __________________________________ - + db = - + def test_a1(db): > assert 0, db # to show value E AssertionError: E assert 0 - + a/test_db.py:2: AssertionError _________________________________ test_a2 __________________________________ - + db = - + def test_a2(db): > assert 0, db # to show value E AssertionError: E assert 0 - + a/test_db2.py:2: AssertionError ========== 3 failed, 2 passed, 1 xfailed, 1 error in 0.12 seconds ========== @@ -636,25 +636,25 @@ and run them:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py FF [100%] - + ================================= FAILURES ================================= ________________________________ test_fail1 ________________________________ - + tmpdir = local('PYTEST_TMPDIR/test_fail10') - + def test_fail1(tmpdir): > assert 0 E assert 0 - + test_module.py:2: AssertionError ________________________________ test_fail2 ________________________________ - + def test_fail2(): > assert 0 E assert 0 - + test_module.py:4: AssertionError ========================= 2 failed in 0.12 seconds ========================= @@ -730,36 +730,36 @@ and run it:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items - + test_module.py Esetting up a test failed! test_module.py::test_setup_fails Fexecuting test failed test_module.py::test_call_fails F - + ================================== ERRORS ================================== ____________________ ERROR at setup of test_setup_fails ____________________ - + @pytest.fixture def other(): > assert 0 E assert 0 - + test_module.py:6: AssertionError ================================= FAILURES ================================= _____________________________ test_call_fails ______________________________ - + something = None - + def test_call_fails(something): > assert 0 E assert 0 - + test_module.py:12: AssertionError ________________________________ test_fail2 ________________________________ - + def test_fail2(): > assert 0 E assert 0 - + test_module.py:15: AssertionError ==================== 2 failed, 1 error in 0.12 seconds ===================== @@ -809,7 +809,7 @@ In that order. can be changed between releases (even bug fixes) so it shouldn't be relied on for scripting or automation. -Freezing pytest +Freezing pytest --------------- If you freeze your application using a tool like @@ -821,18 +821,18 @@ while also allowing you to send test files to users so they can run them in thei machines, which can be useful to obtain more information about a hard to reproduce bug. Fortunately recent ``PyInstaller`` releases already have a custom hook -for pytest, but if you are using another tool to freeze executables +for pytest, but if you are using another tool to freeze executables such as ``cx_freeze`` or ``py2exe``, you can use ``pytest.freeze_includes()`` to obtain the full list of internal pytest modules. How to configure the tools to find the internal modules varies from tool to tool, however. -Instead of freezing the pytest runner as a separate executable, you can make +Instead of freezing the pytest runner as a separate executable, you can make your frozen program work as the pytest runner by some clever -argument handling during program startup. This allows you to +argument handling during program startup. This allows you to have a single executable, which is usually more convenient. Please note that the mechanism for plugin discovery used by pytest (setupttools entry points) doesn't work with frozen executables so pytest -can't find any third party plugins automatically. To include third party plugins +can't find any third party plugins automatically. To include third party plugins like ``pytest-timeout`` they must be imported explicitly and passed on to pytest.main. .. code-block:: python @@ -854,4 +854,3 @@ This allows you to execute tests using the frozen application with standard ``pytest`` command-line options:: ./app_main --pytest --verbose --tb=long --junitxml=results.xml test-suite/ - diff --git a/doc/en/example/xfail_demo.py b/doc/en/example/xfail_demo.py index 5648575e8..01e6da1ad 100644 --- a/doc/en/example/xfail_demo.py +++ b/doc/en/example/xfail_demo.py @@ -1,29 +1,37 @@ import pytest + xfail = pytest.mark.xfail + @xfail def test_hello(): assert 0 + @xfail(run=False) def test_hello2(): assert 0 + @xfail("hasattr(os, 'sep')") def test_hello3(): assert 0 + @xfail(reason="bug 110") def test_hello4(): assert 0 + @xfail('pytest.__version__[0] != "17"') def test_hello5(): assert 0 + def test_hello6(): pytest.xfail("reason") + @xfail(raises=IndexError) def test_hello7(): x = [] diff --git a/doc/en/fixture.rst b/doc/en/fixture.rst index f1cdf622a..f2c5d2e96 100644 --- a/doc/en/fixture.rst +++ b/doc/en/fixture.rst @@ -73,20 +73,20 @@ marked ``smtp`` fixture function. Running the test looks like this:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_smtpsimple.py F [100%] - + ================================= FAILURES ================================= ________________________________ test_ehlo _________________________________ - + smtp = - + def test_ehlo(smtp): response, msg = smtp.ehlo() assert response == 250 > assert 0 # for demo purposes E assert 0 - + test_smtpsimple.py:11: AssertionError ========================= 1 failed in 0.12 seconds ========================= @@ -152,9 +152,9 @@ to do this is by loading these data in a fixture for use by your tests. This makes use of the automatic caching mechanisms of pytest. Another good approach is by adding the data files in the ``tests`` folder. -There are also community plugins available to help managing this aspect of -testing, e.g. `pytest-datadir `__ -and `pytest-datafiles `__. +There are also community plugins available to help managing this aspect of +testing, e.g. `pytest-datadir `__ +and `pytest-datafiles `__. .. _smtpshared: @@ -172,7 +172,7 @@ per test *module* (the default is to invoke once per test *function*). Multiple test functions in a test module will thus each receive the same ``smtp`` fixture instance, thus saving time. -The next example puts the fixture function into a separate ``conftest.py`` file +The next example puts the fixture function into a separate ``conftest.py`` file so that tests from multiple test modules in the directory can access the fixture function:: @@ -209,32 +209,32 @@ inspect what is going on and can now run the tests:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py FF [100%] - + ================================= FAILURES ================================= ________________________________ test_ehlo _________________________________ - + smtp = - + def test_ehlo(smtp): response, msg = smtp.ehlo() assert response == 250 assert b"smtp.gmail.com" in msg > assert 0 # for demo purposes E assert 0 - + test_module.py:6: AssertionError ________________________________ test_noop _________________________________ - + smtp = - + def test_noop(smtp): response, msg = smtp.noop() assert response == 250 > assert 0 # for demo purposes E assert 0 - + test_module.py:11: AssertionError ========================= 2 failed in 0.12 seconds ========================= @@ -331,7 +331,7 @@ Let's execute it:: $ pytest -s -q --tb=no FFteardown smtp - + 2 failed in 0.12 seconds We see that the ``smtp`` instance is finalized after the two @@ -436,7 +436,7 @@ again, nothing much has changed:: $ pytest -s -q --tb=no FFfinalizing (smtp.gmail.com) - + 2 failed in 0.12 seconds Let's quickly create another test module that actually sets the @@ -504,51 +504,51 @@ So let's just do another run:: FFFF [100%] ================================= FAILURES ================================= ________________________ test_ehlo[smtp.gmail.com] _________________________ - + smtp = - + def test_ehlo(smtp): response, msg = smtp.ehlo() assert response == 250 assert b"smtp.gmail.com" in msg > assert 0 # for demo purposes E assert 0 - + test_module.py:6: AssertionError ________________________ test_noop[smtp.gmail.com] _________________________ - + smtp = - + def test_noop(smtp): response, msg = smtp.noop() assert response == 250 > assert 0 # for demo purposes E assert 0 - + test_module.py:11: AssertionError ________________________ test_ehlo[mail.python.org] ________________________ - + smtp = - + def test_ehlo(smtp): response, msg = smtp.ehlo() assert response == 250 > assert b"smtp.gmail.com" in msg E AssertionError: assert b'smtp.gmail.com' in b'mail.python.org\nPIPELINING\nSIZE 51200000\nETRN\nSTARTTLS\nAUTH DIGEST-MD5 NTLM CRAM-MD5\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8' - + test_module.py:5: AssertionError -------------------------- Captured stdout setup --------------------------- finalizing ________________________ test_noop[mail.python.org] ________________________ - + smtp = - + def test_noop(smtp): response, msg = smtp.noop() assert response == 250 > assert 0 # for demo purposes E assert 0 - + test_module.py:11: AssertionError ------------------------- Captured stdout teardown ------------------------- finalizing @@ -620,7 +620,7 @@ Running the above tests results in the following test IDs being used:: - + ======================= no tests ran in 0.12 seconds ======================= .. _`fixture-parametrize-marks`: @@ -650,11 +650,11 @@ Running this test will *skip* the invocation of ``data_set`` with value ``2``:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 3 items - + test_fixture_marks.py::test_data[0] PASSED [ 33%] test_fixture_marks.py::test_data[1] PASSED [ 66%] test_fixture_marks.py::test_data[2] SKIPPED [100%] - + =================== 2 passed, 1 skipped in 0.12 seconds ==================== .. _`interdependent fixtures`: @@ -693,10 +693,10 @@ Here we declare an ``app`` fixture which receives the previously defined cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 2 items - + test_appsetup.py::test_smtp_exists[smtp.gmail.com] PASSED [ 50%] test_appsetup.py::test_smtp_exists[mail.python.org] PASSED [100%] - + ========================= 2 passed in 0.12 seconds ========================= Due to the parametrization of ``smtp`` the test will run twice with two @@ -762,26 +762,26 @@ Let's run the tests in verbose mode and with looking at the print-output:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 8 items - + test_module.py::test_0[1] SETUP otherarg 1 RUN test0 with otherarg 1 PASSED TEARDOWN otherarg 1 - + test_module.py::test_0[2] SETUP otherarg 2 RUN test0 with otherarg 2 PASSED TEARDOWN otherarg 2 - + test_module.py::test_1[mod1] SETUP modarg mod1 RUN test1 with modarg mod1 PASSED test_module.py::test_2[mod1-1] SETUP otherarg 1 RUN test2 with otherarg 1 and modarg mod1 PASSED TEARDOWN otherarg 1 - + test_module.py::test_2[mod1-2] SETUP otherarg 2 RUN test2 with otherarg 2 and modarg mod1 PASSED TEARDOWN otherarg 2 - + test_module.py::test_1[mod2] TEARDOWN modarg mod1 SETUP modarg mod2 RUN test1 with modarg mod2 @@ -789,13 +789,13 @@ Let's run the tests in verbose mode and with looking at the print-output:: test_module.py::test_2[mod2-1] SETUP otherarg 1 RUN test2 with otherarg 1 and modarg mod2 PASSED TEARDOWN otherarg 1 - + test_module.py::test_2[mod2-2] SETUP otherarg 2 RUN test2 with otherarg 2 and modarg mod2 PASSED TEARDOWN otherarg 2 TEARDOWN modarg mod2 - - + + ========================= 8 passed in 0.12 seconds ========================= You can see that the parametrized module-scoped ``modarg`` resource caused an diff --git a/doc/en/funcarg_compare.rst b/doc/en/funcarg_compare.rst index b857a014d..c29ba1f3c 100644 --- a/doc/en/funcarg_compare.rst +++ b/doc/en/funcarg_compare.rst @@ -5,9 +5,9 @@ pytest-2.3: reasoning for fixture/funcarg evolution ============================================================= -**Target audience**: Reading this document requires basic knowledge of -python testing, xUnit setup methods and the (previous) basic pytest -funcarg mechanism, see http://pytest.org/2.2.4/funcargs.html +**Target audience**: Reading this document requires basic knowledge of +python testing, xUnit setup methods and the (previous) basic pytest +funcarg mechanism, see http://pytest.org/2.2.4/funcargs.html If you are new to pytest, then you can simply ignore this section and read the other sections. @@ -18,12 +18,12 @@ Shortcomings of the previous ``pytest_funcarg__`` mechanism The pre pytest-2.3 funcarg mechanism calls a factory each time a funcarg for a test function is required. If a factory wants to -re-use a resource across different scopes, it often used -the ``request.cached_setup()`` helper to manage caching of -resources. Here is a basic example how we could implement +re-use a resource across different scopes, it often used +the ``request.cached_setup()`` helper to manage caching of +resources. Here is a basic example how we could implement a per-session Database object:: - # content of conftest.py + # content of conftest.py class Database(object): def __init__(self): print ("database instance created") @@ -31,7 +31,7 @@ a per-session Database object:: print ("database instance destroyed") def pytest_funcarg__db(request): - return request.cached_setup(setup=DataBase, + return request.cached_setup(setup=DataBase, teardown=lambda db: db.destroy, scope="session") @@ -40,13 +40,13 @@ There are several limitations and difficulties with this approach: 1. Scoping funcarg resource creation is not straight forward, instead one must understand the intricate cached_setup() method mechanics. -2. parametrizing the "db" resource is not straight forward: +2. parametrizing the "db" resource is not straight forward: you need to apply a "parametrize" decorator or implement a - :py:func:`~hookspec.pytest_generate_tests` hook + :py:func:`~hookspec.pytest_generate_tests` hook calling :py:func:`~python.Metafunc.parametrize` which - performs parametrization at the places where the resource - is used. Moreover, you need to modify the factory to use an - ``extrakey`` parameter containing ``request.param`` to the + performs parametrization at the places where the resource + is used. Moreover, you need to modify the factory to use an + ``extrakey`` parameter containing ``request.param`` to the :py:func:`~python.Request.cached_setup` call. 3. Multiple parametrized session-scoped resources will be active @@ -56,7 +56,7 @@ There are several limitations and difficulties with this approach: 4. there is no way how you can make use of funcarg factories in xUnit setup methods. -5. A non-parametrized fixture function cannot use a parametrized +5. A non-parametrized fixture function cannot use a parametrized funcarg resource if it isn't stated in the test function signature. All of these limitations are addressed with pytest-2.3 and its @@ -72,18 +72,18 @@ the scope:: @pytest.fixture(scope="session") def db(request): - # factory will only be invoked once per session - + # factory will only be invoked once per session - db = DataBase() request.addfinalizer(db.destroy) # destroy when session is finished return db This factory implementation does not need to call ``cached_setup()`` anymore -because it will only be invoked once per session. Moreover, the +because it will only be invoked once per session. Moreover, the ``request.addfinalizer()`` registers a finalizer according to the specified resource scope on which the factory function is operating. -Direct parametrization of funcarg resource factories +Direct parametrization of funcarg resource factories ---------------------------------------------------------- Previously, funcarg factories could not directly cause parametrization. @@ -96,9 +96,9 @@ sets. pytest-2.3 introduces a decorator for use on the factory itself:: def db(request): ... # use request.param -Here the factory will be invoked twice (with the respective "mysql" -and "pg" values set as ``request.param`` attributes) and all of -the tests requiring "db" will run twice as well. The "mysql" and +Here the factory will be invoked twice (with the respective "mysql" +and "pg" values set as ``request.param`` attributes) and all of +the tests requiring "db" will run twice as well. The "mysql" and "pg" values will also be used for reporting the test-invocation variants. This new way of parametrizing funcarg factories should in many cases @@ -136,7 +136,7 @@ argument:: The name under which the funcarg resource can be requested is ``db``. -You can still use the "old" non-decorator way of specifying funcarg factories +You can still use the "old" non-decorator way of specifying funcarg factories aka:: def pytest_funcarg__db(request): @@ -156,10 +156,10 @@ several problems: 1. in distributed testing the master process would setup test resources that are never needed because it only co-ordinates the test run - activities of the slave processes. + activities of the slave processes. -2. if you only perform a collection (with "--collect-only") - resource-setup will still be executed. +2. if you only perform a collection (with "--collect-only") + resource-setup will still be executed. 3. If a pytest_sessionstart is contained in some subdirectories conftest.py file, it will not be called. This stems from the @@ -194,17 +194,17 @@ overview of fixture management in your project. Conclusion and compatibility notes --------------------------------------------------------- -**funcargs** were originally introduced to pytest-2.0. In pytest-2.3 +**funcargs** were originally introduced to pytest-2.0. In pytest-2.3 the mechanism was extended and refined and is now described as fixtures: -* previously funcarg factories were specified with a special - ``pytest_funcarg__NAME`` prefix instead of using the +* previously funcarg factories were specified with a special + ``pytest_funcarg__NAME`` prefix instead of using the ``@pytest.fixture`` decorator. * Factories received a ``request`` object which managed caching through - ``request.cached_setup()`` calls and allowed using other funcargs via - ``request.getfuncargvalue()`` calls. These intricate APIs made it hard + ``request.cached_setup()`` calls and allowed using other funcargs via + ``request.getfuncargvalue()`` calls. These intricate APIs made it hard to do proper parametrization and implement resource caching. The new :py:func:`pytest.fixture` decorator allows to declare the scope and let pytest figure things out for you. @@ -212,7 +212,5 @@ fixtures: * if you used parametrization and funcarg factories which made use of ``request.cached_setup()`` it is recommended to invest a few minutes and simplify your fixture function code to use the :ref:`@pytest.fixture` - decorator instead. This will also allow to take advantage of + decorator instead. This will also allow to take advantage of the automatic per-resource grouping of tests. - - diff --git a/doc/en/funcargs.rst b/doc/en/funcargs.rst index bc2c04302..4173675cd 100644 --- a/doc/en/funcargs.rst +++ b/doc/en/funcargs.rst @@ -11,4 +11,3 @@ and you can read on here: - :ref:`fixtures` - :ref:`parametrize` - :ref:`funcargcompare` - diff --git a/doc/en/genapi.py b/doc/en/genapi.py index 0ede44fa2..edbf49f2c 100644 --- a/doc/en/genapi.py +++ b/doc/en/genapi.py @@ -1,7 +1,10 @@ +from __future__ import print_function import textwrap import inspect + class Writer(object): + def __init__(self, clsname): self.clsname = clsname @@ -11,25 +14,24 @@ class Writer(object): def __exit__(self, *args): self.file.close() - print "wrote", self.file.name + print("wrote", self.file.name) def line(self, line): - self.file.write(line+"\n") + self.file.write(line + "\n") def docmethod(self, method): doc = " ".join(method.__doc__.split()) indent = " " - w = textwrap.TextWrapper(initial_indent=indent, - subsequent_indent=indent) + w = textwrap.TextWrapper(initial_indent=indent, subsequent_indent=indent) spec = inspect.getargspec(method) del spec.args[0] - self.line(".. py:method:: " + method.__name__ + - inspect.formatargspec(*spec)) + self.line(".. py:method:: " + method.__name__ + inspect.formatargspec(*spec)) self.line("") self.line(w.fill(doc)) self.line("") + def pytest_funcarg__a(request): with Writer("request") as writer: writer.docmethod(request.getfixturevalue) @@ -37,5 +39,6 @@ def pytest_funcarg__a(request): writer.docmethod(request.addfinalizer) writer.docmethod(request.applymarker) + def test_hello(a): pass diff --git a/doc/en/getting-started.rst b/doc/en/getting-started.rst index aae0bf971..f2dbec5e9 100644 --- a/doc/en/getting-started.rst +++ b/doc/en/getting-started.rst @@ -50,17 +50,17 @@ That’s it. You can now execute the test function:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_sample.py F [100%] - + ================================= FAILURES ================================= _______________________________ test_answer ________________________________ - + def test_answer(): > assert func(3) == 5 E assert 4 == 5 E + where 4 = func(3) - + test_sample.py:5: AssertionError ========================= 1 failed in 0.12 seconds ========================= @@ -117,15 +117,15 @@ Once you develop multiple tests, you may want to group them into a class. pytest .F [100%] ================================= FAILURES ================================= ____________________________ TestClass.test_two ____________________________ - + self = - + def test_two(self): x = "hello" > assert hasattr(x, 'check') E AssertionError: assert False E + where False = hasattr('hello', 'check') - + test_class.py:8: AssertionError 1 failed, 1 passed in 0.12 seconds @@ -147,14 +147,14 @@ List the name ``tmpdir`` in the test function signature and ``pytest`` will look F [100%] ================================= FAILURES ================================= _____________________________ test_needsfiles ______________________________ - + tmpdir = local('PYTEST_TMPDIR/test_needsfiles0') - + def test_needsfiles(tmpdir): print (tmpdir) > assert 0 E assert 0 - + test_tmpdir.py:3: AssertionError --------------------------- Captured stdout call --------------------------- PYTEST_TMPDIR/test_needsfiles0 diff --git a/doc/en/index.rst b/doc/en/index.rst index 66c59f08d..dc06b7932 100644 --- a/doc/en/index.rst +++ b/doc/en/index.rst @@ -28,17 +28,17 @@ To execute it:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_sample.py F [100%] - + ================================= FAILURES ================================= _______________________________ test_answer ________________________________ - + def test_answer(): > assert inc(3) == 5 E assert 4 == 5 E + where 4 = inc(3) - + test_sample.py:5: AssertionError ========================= 1 failed in 0.12 seconds ========================= diff --git a/doc/en/mark.rst b/doc/en/mark.rst index f07917653..7f39fd728 100644 --- a/doc/en/mark.rst +++ b/doc/en/mark.rst @@ -136,4 +136,3 @@ More details can be found in the `original PR `_ for details. - + .. currentmodule:: _pytest.monkeypatch diff --git a/doc/en/nose.rst b/doc/en/nose.rst index 31dffeb3f..1f7b7b638 100644 --- a/doc/en/nose.rst +++ b/doc/en/nose.rst @@ -70,6 +70,3 @@ Unsupported idioms / known issues There are no plans to fix this currently because ``yield``-tests are deprecated in pytest 3.0, with ``pytest.mark.parametrize`` being the recommended alternative. - - - diff --git a/doc/en/parametrize.rst b/doc/en/parametrize.rst index ba2cd3cce..693cf1913 100644 --- a/doc/en/parametrize.rst +++ b/doc/en/parametrize.rst @@ -11,13 +11,13 @@ Parametrizing fixtures and test functions pytest enables test parametrization at several levels: -- :py:func:`pytest.fixture` allows one to :ref:`parametrize fixture +- :py:func:`pytest.fixture` allows one to :ref:`parametrize fixture functions `. -* `@pytest.mark.parametrize`_ allows one to define multiple sets of +* `@pytest.mark.parametrize`_ allows one to define multiple sets of arguments and fixtures at the test function or class. -* `pytest_generate_tests`_ allows one to define custom parametrization +* `pytest_generate_tests`_ allows one to define custom parametrization schemes or extensions. .. _parametrizemark: @@ -57,14 +57,14 @@ them in turn:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items - + test_expectation.py ..F [100%] - + ================================= FAILURES ================================= ____________________________ test_eval[6*9-42] _____________________________ - + test_input = '6*9', expected = 42 - + @pytest.mark.parametrize("test_input,expected", [ ("3+5", 8), ("2+4", 6), @@ -74,7 +74,7 @@ them in turn:: > assert eval(test_input) == expected E AssertionError: assert 54 == 42 E + where 54 = eval('6*9') - + test_expectation.py:8: AssertionError ==================== 1 failed, 2 passed in 0.12 seconds ==================== @@ -106,9 +106,9 @@ Let's run this:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items - + test_expectation.py ..x [100%] - + =================== 2 passed, 1 xfailed in 0.12 seconds ==================== The one parameter set which caused a failure previously now @@ -123,7 +123,7 @@ To get all combinations of multiple parametrized arguments you can stack def test_foo(x, y): pass -This will run the test with the arguments set to ``x=0/y=2``, ``x=1/y=2``, +This will run the test with the arguments set to ``x=0/y=2``, ``x=1/y=2``, ``x=0/y=3``, and ``x=1/y=3`` exhausting parameters in the order of the decorators. .. _`pytest_generate_tests`: @@ -174,15 +174,15 @@ Let's also run with a stringinput that will lead to a failing test:: F [100%] ================================= FAILURES ================================= ___________________________ test_valid_string[!] ___________________________ - + stringinput = '!' - + def test_valid_string(stringinput): > assert stringinput.isalpha() E AssertionError: assert False E + where False = () E + where = '!'.isalpha - + test_strings.py:3: AssertionError 1 failed in 0.12 seconds @@ -198,7 +198,7 @@ list:: SKIP [1] test_strings.py: got empty parameter set ['stringinput'], function test_valid_string at $REGENDOC_TMPDIR/test_strings.py:1 1 skipped in 0.12 seconds -Note that when calling ``metafunc.parametrize`` multiple times with different parameter sets, all parameter names across +Note that when calling ``metafunc.parametrize`` multiple times with different parameter sets, all parameter names across those sets cannot be duplicated, otherwise an error will be raised. More examples diff --git a/doc/en/skipping.rst b/doc/en/skipping.rst index 9bac02c8c..4c052e346 100644 --- a/doc/en/skipping.rst +++ b/doc/en/skipping.rst @@ -334,12 +334,12 @@ Running it with the report-on-xfail option gives this output:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR/example, inifile: collected 7 items - + xfail_demo.py xxxxxxx [100%] ========================= short test summary info ========================== XFAIL xfail_demo.py::test_hello XFAIL xfail_demo.py::test_hello2 - reason: [NOTRUN] + reason: [NOTRUN] XFAIL xfail_demo.py::test_hello3 condition: hasattr(os, 'sep') XFAIL xfail_demo.py::test_hello4 @@ -349,7 +349,7 @@ Running it with the report-on-xfail option gives this output:: XFAIL xfail_demo.py::test_hello6 reason: reason XFAIL xfail_demo.py::test_hello7 - + ======================== 7 xfailed in 0.12 seconds ========================= .. _`skip/xfail with parametrize`: diff --git a/doc/en/talks.rst b/doc/en/talks.rst index bf593db4b..79534ea6e 100644 --- a/doc/en/talks.rst +++ b/doc/en/talks.rst @@ -98,8 +98,3 @@ Plugin specific examples: .. _`parametrizing tests, generalized`: http://tetamap.wordpress.com/2009/05/13/parametrizing-python-tests-generalized/ .. _`generating parametrized tests with fixtures`: parametrize.html#test-generators .. _`test generators and cached setup`: http://bruynooghe.blogspot.com/2010/06/pytest-test-generators-and-cached-setup.html - - - - - diff --git a/doc/en/test/config.html b/doc/en/test/config.html index cba5a46f9..d452e78b0 100644 --- a/doc/en/test/config.html +++ b/doc/en/test/config.html @@ -15,4 +15,3 @@ pageTracker._trackPageview(); } catch(err) {} - diff --git a/doc/en/test/dist.html b/doc/en/test/dist.html index e328550a8..7b4aee979 100644 --- a/doc/en/test/dist.html +++ b/doc/en/test/dist.html @@ -15,4 +15,3 @@ pageTracker._trackPageview(); } catch(err) {} - diff --git a/doc/en/test/extend.html b/doc/en/test/extend.html index cba5a46f9..d452e78b0 100644 --- a/doc/en/test/extend.html +++ b/doc/en/test/extend.html @@ -15,4 +15,3 @@ pageTracker._trackPageview(); } catch(err) {} - diff --git a/doc/en/test/index.rst b/doc/en/test/index.rst index 1a3b5a54d..b4c6d9ca7 100644 --- a/doc/en/test/index.rst +++ b/doc/en/test/index.rst @@ -31,5 +31,3 @@ changelog_: history of changes covering last releases .. _features: features.html .. _funcargs: funcargs.html .. _customize: customize.html - - diff --git a/doc/en/test/plugin/coverage.rst b/doc/en/test/plugin/coverage.rst index 71139d008..13a6be89b 100644 --- a/doc/en/test/plugin/coverage.rst +++ b/doc/en/test/plugin/coverage.rst @@ -6,22 +6,22 @@ Write and report coverage data with the 'coverage' package. .. contents:: :local: -Note: Original code by Ross Lawley. +Note: Original code by Ross Lawley. Install -------------- Use pip to (un)install:: - pip install pytest-coverage - pip uninstall pytest-coverage + pip install pytest-coverage + pip uninstall pytest-coverage or alternatively use easy_install to install:: - easy_install pytest-coverage + easy_install pytest-coverage -Usage +Usage ------------- To get full test coverage reports for a particular package type:: diff --git a/doc/en/test/plugin/django.rst b/doc/en/test/plugin/django.rst index 061497b38..59ef1e1cc 100644 --- a/doc/en/test/plugin/django.rst +++ b/doc/en/test/plugin/django.rst @@ -4,4 +4,3 @@ pytest_django plugin (EXTERNAL) pytest_django is a plugin for ``pytest`` that provides a set of useful tools for testing Django applications, checkout Ben Firshman's `pytest_django github page`_. .. _`pytest_django github page`: http://github.com/bfirsh/pytest_django/tree/master - diff --git a/doc/en/test/plugin/figleaf.rst b/doc/en/test/plugin/figleaf.rst index 0c1603ade..cff3d0484 100644 --- a/doc/en/test/plugin/figleaf.rst +++ b/doc/en/test/plugin/figleaf.rst @@ -12,7 +12,7 @@ Install To install the plugin issue:: easy_install pytest-figleaf # or - pip install pytest-figleaf + pip install pytest-figleaf and if you are using pip you can also uninstall:: diff --git a/doc/en/test/test.html b/doc/en/test/test.html index 7d00f718a..a1833bb9e 100644 --- a/doc/en/test/test.html +++ b/doc/en/test/test.html @@ -15,4 +15,3 @@ pageTracker._trackPageview(); } catch(err) {} - diff --git a/doc/en/tmpdir.rst b/doc/en/tmpdir.rst index 2a53adad9..a6aa1422f 100644 --- a/doc/en/tmpdir.rst +++ b/doc/en/tmpdir.rst @@ -32,14 +32,14 @@ Running this would result in a passed test except for the last platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_tmpdir.py F [100%] - + ================================= FAILURES ================================= _____________________________ test_create_file _____________________________ - + tmpdir = local('PYTEST_TMPDIR/test_create_file0') - + def test_create_file(tmpdir): p = tmpdir.mkdir("sub").join("hello.txt") p.write("content") @@ -47,7 +47,7 @@ Running this would result in a passed test except for the last assert len(tmpdir.listdir()) == 1 > assert 0 E assert 0 - + test_tmpdir.py:7: AssertionError ========================= 1 failed in 0.12 seconds ========================= diff --git a/doc/en/unittest.rst b/doc/en/unittest.rst index 1bc33ab19..53192b346 100644 --- a/doc/en/unittest.rst +++ b/doc/en/unittest.rst @@ -92,18 +92,18 @@ it from a unittest-style test:: def db_class(request): class DummyDB(object): pass - # set a class attribute on the invoking test context + # set a class attribute on the invoking test context request.cls.db = DummyDB() -This defines a fixture function ``db_class`` which - if used - is -called once for each test class and which sets the class-level +This defines a fixture function ``db_class`` which - if used - is +called once for each test class and which sets the class-level ``db`` attribute to a ``DummyDB`` instance. The fixture function achieves this by receiving a special ``request`` object which gives access to :ref:`the requesting test context ` such -as the ``cls`` attribute, denoting the class from which the fixture +as the ``cls`` attribute, denoting the class from which the fixture is used. This architecture de-couples fixture writing from actual test code and allows re-use of the fixture by a minimal reference, the fixture -name. So let's write an actual ``unittest.TestCase`` class using our +name. So let's write an actual ``unittest.TestCase`` class using our fixture definition:: # content of test_unittest_db.py @@ -120,7 +120,7 @@ fixture definition:: def test_method2(self): assert 0, self.db # fail for demo purposes -The ``@pytest.mark.usefixtures("db_class")`` class-decorator makes sure that +The ``@pytest.mark.usefixtures("db_class")`` class-decorator makes sure that the pytest fixture function ``db_class`` is called once per class. Due to the deliberately failing assert statements, we can take a look at the ``self.db`` values in the traceback:: @@ -130,30 +130,30 @@ the ``self.db`` values in the traceback:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_unittest_db.py FF [100%] - + ================================= FAILURES ================================= ___________________________ MyTest.test_method1 ____________________________ - + self = - + def test_method1(self): assert hasattr(self, "db") > assert 0, self.db # fail for demo purposes E AssertionError: .DummyDB object at 0xdeadbeef> E assert 0 - + test_unittest_db.py:9: AssertionError ___________________________ MyTest.test_method2 ____________________________ - + self = - + def test_method2(self): > assert 0, self.db # fail for demo purposes E AssertionError: .DummyDB object at 0xdeadbeef> E assert 0 - + test_unittest_db.py:12: AssertionError ========================= 2 failed in 0.12 seconds ========================= @@ -166,10 +166,10 @@ Using autouse fixtures and accessing other fixtures --------------------------------------------------- Although it's usually better to explicitly declare use of fixtures you need -for a given test, you may sometimes want to have fixtures that are -automatically used in a given context. After all, the traditional +for a given test, you may sometimes want to have fixtures that are +automatically used in a given context. After all, the traditional style of unittest-setup mandates the use of this implicit fixture writing -and chances are, you are used to it or like it. +and chances are, you are used to it or like it. You can flag fixture functions with ``@pytest.fixture(autouse=True)`` and define the fixture function in the context where you want it used. diff --git a/doc/en/usage.rst b/doc/en/usage.rst index 667eaf8ee..23747aff7 100644 --- a/doc/en/usage.rst +++ b/doc/en/usage.rst @@ -111,9 +111,9 @@ For more information see :ref:`marks `. :: pytest --pyargs pkg.testing - + This will import ``pkg.testing`` and use its filesystem location to find and run tests from. - + Modifying Python traceback printing ---------------------------------------------- @@ -195,7 +195,7 @@ in your code and pytest automatically disables its output capture for that test: Using the builtin breakpoint function ------------------------------------- -Python 3.7 introduces a builtin ``breakpoint()`` function. +Python 3.7 introduces a builtin ``breakpoint()`` function. Pytest supports the use of ``breakpoint()`` with the following behaviours: - When ``breakpoint()`` is called and ``PYTHONBREAKPOINT`` is set to the default value, pytest will use the custom internal PDB trace UI instead of the system default ``Pdb``. @@ -496,7 +496,7 @@ hook was invoked:: $ python myinvoke.py . [100%]*** test run reporting finishing - + .. note:: diff --git a/doc/en/warnings.rst b/doc/en/warnings.rst index f7b67f5f2..e78a6afc0 100644 --- a/doc/en/warnings.rst +++ b/doc/en/warnings.rst @@ -25,14 +25,14 @@ Running pytest now produces this output:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_show_warnings.py . [100%] - + ============================= warnings summary ============================= test_show_warnings.py::test_one $REGENDOC_TMPDIR/test_show_warnings.py:4: UserWarning: api v1, should use functions from v2 warnings.warn(UserWarning("api v1, should use functions from v2")) - + -- Docs: http://doc.pytest.org/en/latest/warnings.html =================== 1 passed, 1 warnings in 0.12 seconds =================== @@ -45,17 +45,17 @@ them into errors:: F [100%] ================================= FAILURES ================================= _________________________________ test_one _________________________________ - + def test_one(): > assert api_v1() == 1 - - test_show_warnings.py:8: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - + + test_show_warnings.py:8: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + def api_v1(): > warnings.warn(UserWarning("api v1, should use functions from v2")) E UserWarning: api v1, should use functions from v2 - + test_show_warnings.py:4: UserWarning 1 failed in 0.12 seconds diff --git a/doc/en/writing_plugins.rst b/doc/en/writing_plugins.rst index 67c885efb..2ef760118 100644 --- a/doc/en/writing_plugins.rst +++ b/doc/en/writing_plugins.rst @@ -592,8 +592,3 @@ declaring the hook functions directly in your plugin module, for example:: This has the added benefit of allowing you to conditionally install hooks depending on which plugins are installed. - - - - - diff --git a/doc/en/xunit_setup.rst b/doc/en/xunit_setup.rst index 148fb1209..7a6c099f5 100644 --- a/doc/en/xunit_setup.rst +++ b/doc/en/xunit_setup.rst @@ -6,7 +6,7 @@ classic xunit-style setup ======================================== This section describes a classic and popular way how you can implement -fixtures (setup and teardown test state) on a per-module/class/function basis. +fixtures (setup and teardown test state) on a per-module/class/function basis. .. note:: diff --git a/extra/get_issues.py b/extra/get_issues.py index 99378b2f5..c026972b1 100644 --- a/extra/get_issues.py +++ b/extra/get_issues.py @@ -1,10 +1,9 @@ import json import py +import requests issues_url = "https://api.github.com/repos/pytest-dev/pytest/issues" -import requests - def get_issues(): issues = [] @@ -15,16 +14,16 @@ def get_issues(): data = r.json() if r.status_code == 403: # API request limit exceeded - print(data['message']) + print(data["message"]) exit(1) issues.extend(data) # Look for next page - links = requests.utils.parse_header_links(r.headers['Link']) + links = requests.utils.parse_header_links(r.headers["Link"]) another_page = False for link in links: - if link['rel'] == 'next': - url = link['url'] + if link["rel"] == "next": + url = link["url"] another_page = True if not another_page: return issues @@ -45,17 +44,17 @@ def main(args): def _get_kind(issue): - labels = [l['name'] for l in issue['labels']] - for key in ('bug', 'enhancement', 'proposal'): + labels = [l["name"] for l in issue["labels"]] + for key in ("bug", "enhancement", "proposal"): if key in labels: return key - return 'issue' + return "issue" def report(issues): for issue in issues: title = issue["title"] - body = issue["body"] + # body = issue["body"] kind = _get_kind(issue) status = issue["state"] number = issue["number"] @@ -63,21 +62,23 @@ def report(issues): print("----") print(status, kind, link) print(title) - #print() - #lines = body.split("\n") - #print ("\n".join(lines[:3])) - #if len(lines) > 3 or len(body) > 240: + # print() + # lines = body.split("\n") + # print ("\n".join(lines[:3])) + # if len(lines) > 3 or len(body) > 240: # print ("...") print("\n\nFound %s open issues" % len(issues)) if __name__ == "__main__": import argparse + parser = argparse.ArgumentParser("process bitbucket issues") - parser.add_argument("--refresh", action="store_true", - help="invalidate cache, refresh issues") - parser.add_argument("--cache", action="store", default="issues.json", - help="cache file") + parser.add_argument( + "--refresh", action="store_true", help="invalidate cache, refresh issues" + ) + parser.add_argument( + "--cache", action="store", default="issues.json", help="cache file" + ) args = parser.parse_args() main(args) - diff --git a/pytest.py b/pytest.py index d3aebbff9..f27f5a195 100644 --- a/pytest.py +++ b/pytest.py @@ -6,10 +6,7 @@ pytest: unit and functional testing with Python. # else we are imported -from _pytest.config import ( - main, UsageError, cmdline, - hookspec, hookimpl -) +from _pytest.config import main, UsageError, cmdline, hookspec, hookimpl from _pytest.fixtures import fixture, yield_fixture from _pytest.assertion import register_assert_rewrite from _pytest.freeze_support import freeze_includes @@ -21,58 +18,55 @@ from _pytest.mark import MARK_GEN as mark, param from _pytest.main import Session from _pytest.nodes import Item, Collector, File from _pytest.fixtures import fillfixtures as _fillfuncargs -from _pytest.python import ( - Module, Class, Instance, Function, Generator, -) +from _pytest.python import Module, Class, Instance, Function, Generator from _pytest.python_api import approx, raises set_trace = __pytestPDB.set_trace __all__ = [ - 'main', - 'UsageError', - 'cmdline', - 'hookspec', - 'hookimpl', - '__version__', - 'register_assert_rewrite', - 'freeze_includes', - 'set_trace', - 'warns', - 'deprecated_call', - 'fixture', - 'yield_fixture', - 'fail', - 'skip', - 'xfail', - 'importorskip', - 'exit', - 'mark', - 'param', - 'approx', - '_fillfuncargs', - - 'Item', - 'File', - 'Collector', - 'Session', - 'Module', - 'Class', - 'Instance', - 'Function', - 'Generator', - 'raises', - - + "main", + "UsageError", + "cmdline", + "hookspec", + "hookimpl", + "__version__", + "register_assert_rewrite", + "freeze_includes", + "set_trace", + "warns", + "deprecated_call", + "fixture", + "yield_fixture", + "fail", + "skip", + "xfail", + "importorskip", + "exit", + "mark", + "param", + "approx", + "_fillfuncargs", + "Item", + "File", + "Collector", + "Session", + "Module", + "Class", + "Instance", + "Function", + "Generator", + "raises", ] -if __name__ == '__main__': +if __name__ == "__main__": # if run as a script or by 'python -m pytest' # we trigger the below "else" condition by the following import import pytest + raise SystemExit(pytest.main()) else: from _pytest.compat import _setup_collect_fakemodule + _setup_collect_fakemodule() diff --git a/scripts/check-rst.py b/scripts/check-rst.py index 57f717501..b7aeccc07 100644 --- a/scripts/check-rst.py +++ b/scripts/check-rst.py @@ -5,7 +5,16 @@ import subprocess import glob import sys -sys.exit(subprocess.call([ - 'rst-lint', '--encoding', 'utf-8', - 'CHANGELOG.rst', 'HOWTORELEASE.rst', 'README.rst', -] + glob.glob('changelog/[0-9]*.*'))) +sys.exit( + subprocess.call( + [ + "rst-lint", + "--encoding", + "utf-8", + "CHANGELOG.rst", + "HOWTORELEASE.rst", + "README.rst", + ] + + glob.glob("changelog/[0-9]*.*") + ) +) diff --git a/setup.py b/setup.py index ffccbaa87..1e67f27f0 100644 --- a/setup.py +++ b/setup.py @@ -5,21 +5,21 @@ import pkg_resources from setuptools import setup, Command classifiers = [ - 'Development Status :: 6 - Mature', - 'Intended Audience :: Developers', - 'License :: OSI Approved :: MIT License', - 'Operating System :: POSIX', - 'Operating System :: Microsoft :: Windows', - 'Operating System :: MacOS :: MacOS X', - 'Topic :: Software Development :: Testing', - 'Topic :: Software Development :: Libraries', - 'Topic :: Utilities', + "Development Status :: 6 - Mature", + "Intended Audience :: Developers", + "License :: OSI Approved :: MIT License", + "Operating System :: POSIX", + "Operating System :: Microsoft :: Windows", + "Operating System :: MacOS :: MacOS X", + "Topic :: Software Development :: Testing", + "Topic :: Software Development :: Libraries", + "Topic :: Utilities", ] + [ - ('Programming Language :: Python :: %s' % x) - for x in '2 2.7 3 3.4 3.5 3.6 3.7'.split() + ("Programming Language :: Python :: %s" % x) + for x in "2 2.7 3 3.4 3.5 3.6 3.7".split() ] -with open('README.rst') as fd: +with open("README.rst") as fd: long_description = fd.read() @@ -44,9 +44,9 @@ def get_environment_marker_support_level(): """ try: version = pkg_resources.parse_version(setuptools.__version__) - if version >= pkg_resources.parse_version('36.2.2'): + if version >= pkg_resources.parse_version("36.2.2"): return 2 - if version >= pkg_resources.parse_version('0.7.2'): + if version >= pkg_resources.parse_version("0.7.2"): return 1 except Exception as exc: sys.stderr.write("Could not test setuptool's version: %s\n" % exc) @@ -56,59 +56,57 @@ def get_environment_marker_support_level(): def main(): extras_require = {} install_requires = [ - 'py>=1.5.0', - 'six>=1.10.0', - 'setuptools', - 'attrs>=17.4.0', - 'more-itertools>=4.0.0', - 'atomicwrites>=1.0', + "py>=1.5.0", + "six>=1.10.0", + "setuptools", + "attrs>=17.4.0", + "more-itertools>=4.0.0", + "atomicwrites>=1.0", ] # if _PYTEST_SETUP_SKIP_PLUGGY_DEP is set, skip installing pluggy; # used by tox.ini to test with pluggy master - if '_PYTEST_SETUP_SKIP_PLUGGY_DEP' not in os.environ: - install_requires.append('pluggy>=0.5,<0.7') + if "_PYTEST_SETUP_SKIP_PLUGGY_DEP" not in os.environ: + install_requires.append("pluggy>=0.5,<0.7") environment_marker_support_level = get_environment_marker_support_level() if environment_marker_support_level >= 2: install_requires.append('funcsigs;python_version<"3.0"') install_requires.append('colorama;sys_platform=="win32"') elif environment_marker_support_level == 1: - extras_require[':python_version<"3.0"'] = ['funcsigs'] - extras_require[':sys_platform=="win32"'] = ['colorama'] + extras_require[':python_version<"3.0"'] = ["funcsigs"] + extras_require[':sys_platform=="win32"'] = ["colorama"] else: - if sys.platform == 'win32': - install_requires.append('colorama') + if sys.platform == "win32": + install_requires.append("colorama") if sys.version_info < (3, 0): - install_requires.append('funcsigs') + install_requires.append("funcsigs") setup( - name='pytest', - description='pytest: simple powerful testing with Python', + name="pytest", + description="pytest: simple powerful testing with Python", long_description=long_description, - use_scm_version={ - 'write_to': '_pytest/_version.py', - }, - url='http://pytest.org', + use_scm_version={"write_to": "_pytest/_version.py"}, + url="http://pytest.org", project_urls={ - 'Source': 'https://github.com/pytest-dev/pytest', - 'Tracker': 'https://github.com/pytest-dev/pytest/issues', + "Source": "https://github.com/pytest-dev/pytest", + "Tracker": "https://github.com/pytest-dev/pytest/issues", }, - license='MIT license', - platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], + license="MIT license", + platforms=["unix", "linux", "osx", "cygwin", "win32"], author=( - 'Holger Krekel, Bruno Oliveira, Ronny Pfannschmidt, ' - 'Floris Bruynooghe, Brianna Laugher, Florian Bruhin and others'), - entry_points={'console_scripts': [ - 'pytest=pytest:main', 'py.test=pytest:main']}, + "Holger Krekel, Bruno Oliveira, Ronny Pfannschmidt, " + "Floris Bruynooghe, Brianna Laugher, Florian Bruhin and others" + ), + entry_points={"console_scripts": ["pytest=pytest:main", "py.test=pytest:main"]}, classifiers=classifiers, keywords="test unittest", - cmdclass={'test': PyTest}, + cmdclass={"test": PyTest}, # the following should be enabled for release - setup_requires=['setuptools-scm'], - python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*', + setup_requires=["setuptools-scm"], + python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*", install_requires=install_requires, extras_require=extras_require, - packages=['_pytest', '_pytest.assertion', '_pytest._code', '_pytest.mark'], - py_modules=['pytest'], + packages=["_pytest", "_pytest.assertion", "_pytest._code", "_pytest.mark"], + py_modules=["pytest"], zip_safe=False, ) @@ -124,12 +122,13 @@ class PyTest(Command): def run(self): import subprocess - PPATH = [x for x in os.environ.get('PYTHONPATH', '').split(':') if x] + + PPATH = [x for x in os.environ.get("PYTHONPATH", "").split(":") if x] PPATH.insert(0, os.getcwd()) - os.environ['PYTHONPATH'] = ':'.join(PPATH) - errno = subprocess.call([sys.executable, 'pytest.py', '--ignore=doc']) + os.environ["PYTHONPATH"] = ":".join(PPATH) + errno = subprocess.call([sys.executable, "pytest.py", "--ignore=doc"]) raise SystemExit(errno) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/tasks/__init__.py b/tasks/__init__.py index 8ea038f0a..ea5b1293e 100644 --- a/tasks/__init__.py +++ b/tasks/__init__.py @@ -7,6 +7,4 @@ import invoke from . import generate -ns = invoke.Collection( - generate, -) +ns = invoke.Collection(generate) diff --git a/tasks/generate.py b/tasks/generate.py index 268b36fd6..398af70c9 100644 --- a/tasks/generate.py +++ b/tasks/generate.py @@ -7,54 +7,66 @@ from subprocess import check_output, check_call import invoke -@invoke.task(help={ - 'version': 'version being released', -}) +@invoke.task(help={"version": "version being released"}) def announce(ctx, version): """Generates a new release announcement entry in the docs.""" # Get our list of authors - stdout = check_output(["git", "describe", "--abbrev=0", '--tags']) - stdout = stdout.decode('utf-8') + stdout = check_output(["git", "describe", "--abbrev=0", "--tags"]) + stdout = stdout.decode("utf-8") last_version = stdout.strip() - stdout = check_output(["git", "log", "{}..HEAD".format(last_version), "--format=%aN"]) - stdout = stdout.decode('utf-8') + stdout = check_output( + ["git", "log", "{}..HEAD".format(last_version), "--format=%aN"] + ) + stdout = stdout.decode("utf-8") contributors = set(stdout.splitlines()) - template_name = 'release.minor.rst' if version.endswith('.0') else 'release.patch.rst' - template_text = Path(__file__).parent.joinpath(template_name).read_text(encoding='UTF-8') + template_name = "release.minor.rst" if version.endswith( + ".0" + ) else "release.patch.rst" + template_text = Path(__file__).parent.joinpath(template_name).read_text( + encoding="UTF-8" + ) - contributors_text = '\n'.join('* {}'.format(name) for name in sorted(contributors)) + '\n' + contributors_text = "\n".join( + "* {}".format(name) for name in sorted(contributors) + ) + "\n" text = template_text.format(version=version, contributors=contributors_text) - target = Path(__file__).parent.joinpath('../doc/en/announce/release-{}.rst'.format(version)) - target.write_text(text, encoding='UTF-8') + target = Path(__file__).parent.joinpath( + "../doc/en/announce/release-{}.rst".format(version) + ) + target.write_text(text, encoding="UTF-8") print("[generate.announce] Generated {}".format(target.name)) # Update index with the new release entry - index_path = Path(__file__).parent.joinpath('../doc/en/announce/index.rst') - lines = index_path.read_text(encoding='UTF-8').splitlines() - indent = ' ' + index_path = Path(__file__).parent.joinpath("../doc/en/announce/index.rst") + lines = index_path.read_text(encoding="UTF-8").splitlines() + indent = " " for index, line in enumerate(lines): - if line.startswith('{}release-'.format(indent)): + if line.startswith("{}release-".format(indent)): new_line = indent + target.stem if line != new_line: lines.insert(index, new_line) - index_path.write_text('\n'.join(lines) + '\n', encoding='UTF-8') + index_path.write_text("\n".join(lines) + "\n", encoding="UTF-8") print("[generate.announce] Updated {}".format(index_path.name)) else: - print("[generate.announce] Skip {} (already contains release)".format(index_path.name)) + print( + "[generate.announce] Skip {} (already contains release)".format( + index_path.name + ) + ) break - check_call(['git', 'add', str(target)]) + check_call(["git", "add", str(target)]) @invoke.task() def regen(ctx): """Call regendoc tool to update examples and pytest output in the docs.""" print("[generate.regen] Updating docs") - check_call(['tox', '-e', 'regen']) + check_call(["tox", "-e", "regen"]) @invoke.task() @@ -62,9 +74,9 @@ def make_tag(ctx, version): """Create a new, local tag for the release, only if the repository is clean.""" from git import Repo - repo = Repo('.') + repo = Repo(".") if repo.is_dirty(): - print('Current repository is dirty. Please commit any changes and try again.') + print("Current repository is dirty. Please commit any changes and try again.") raise invoke.Exit(code=2) tag_names = [x.name for x in repo.tags] @@ -76,31 +88,31 @@ def make_tag(ctx, version): repo.create_tag(version) -@invoke.task(help={ - 'version': 'version being released', -}) +@invoke.task(help={"version": "version being released"}) def pre_release(ctx, version): """Generates new docs, release announcements and creates a local tag.""" announce(ctx, version) regen(ctx) changelog(ctx, version, write_out=True) - msg = 'Preparing release version {}'.format(version) - check_call(['git', 'commit', '-a', '-m', msg]) + msg = "Preparing release version {}".format(version) + check_call(["git", "commit", "-a", "-m", msg]) make_tag(ctx, version) print() - print('[generate.pre_release] Please push your branch and open a PR.') + print("[generate.pre_release] Please push your branch and open a PR.") -@invoke.task(help={ - 'version': 'version being released', - 'write_out': 'write changes to the actual changelog' -}) +@invoke.task( + help={ + "version": "version being released", + "write_out": "write changes to the actual changelog", + } +) def changelog(ctx, version, write_out=False): if write_out: addopts = [] else: - addopts = ['--draft'] - check_call(['towncrier', '--yes', '--version', version] + addopts) + addopts = ["--draft"] + check_call(["towncrier", "--yes", "--version", version] + addopts) diff --git a/tasks/release.patch.rst b/tasks/release.patch.rst index 56764b913..1982dc353 100644 --- a/tasks/release.patch.rst +++ b/tasks/release.patch.rst @@ -6,7 +6,7 @@ pytest {version} has just been released to PyPI. This is a bug-fix release, being a drop-in replacement. To upgrade:: pip install --upgrade pytest - + The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. Thanks to all who contributed to this release, among them: diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 8ceb3bae1..c2eed419c 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -13,17 +13,18 @@ from _pytest.main import EXIT_NOTESTSCOLLECTED, EXIT_USAGEERROR class TestGeneralUsage(object): + def test_config_error(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_configure(config): import pytest raise pytest.UsageError("hello") - """) + """ + ) result = testdir.runpytest(testdir.tmpdir) assert result.ret != 0 - result.stderr.fnmatch_lines([ - '*ERROR: hello' - ]) + result.stderr.fnmatch_lines(["*ERROR: hello"]) def test_root_conftest_syntax_error(self, testdir): testdir.makepyfile(conftest="raise SyntaxError\n") @@ -32,37 +33,38 @@ class TestGeneralUsage(object): assert result.ret != 0 def test_early_hook_error_issue38_1(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_sessionstart(): 0 / 0 - """) + """ + ) result = testdir.runpytest(testdir.tmpdir) assert result.ret != 0 # tracestyle is native by default for hook failures - result.stdout.fnmatch_lines([ - '*INTERNALERROR*File*conftest.py*line 2*', - '*0 / 0*', - ]) + result.stdout.fnmatch_lines( + ["*INTERNALERROR*File*conftest.py*line 2*", "*0 / 0*"] + ) result = testdir.runpytest(testdir.tmpdir, "--fulltrace") assert result.ret != 0 # tracestyle is native by default for hook failures - result.stdout.fnmatch_lines([ - '*INTERNALERROR*def pytest_sessionstart():*', - '*INTERNALERROR*0 / 0*', - ]) + result.stdout.fnmatch_lines( + ["*INTERNALERROR*def pytest_sessionstart():*", "*INTERNALERROR*0 / 0*"] + ) def test_early_hook_configure_error_issue38(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_configure(): 0 / 0 - """) + """ + ) result = testdir.runpytest(testdir.tmpdir) assert result.ret != 0 # here we get it on stderr - result.stderr.fnmatch_lines([ - '*INTERNALERROR*File*conftest.py*line 2*', - '*0 / 0*', - ]) + result.stderr.fnmatch_lines( + ["*INTERNALERROR*File*conftest.py*line 2*", "*0 / 0*"] + ) def test_file_not_found(self, testdir): result = testdir.runpytest("asd") @@ -70,61 +72,65 @@ class TestGeneralUsage(object): result.stderr.fnmatch_lines(["ERROR: file not found*asd"]) def test_file_not_found_unconfigure_issue143(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_configure(): print("---configure") def pytest_unconfigure(): print("---unconfigure") - """) + """ + ) result = testdir.runpytest("-s", "asd") assert result.ret == 4 # EXIT_USAGEERROR result.stderr.fnmatch_lines(["ERROR: file not found*asd"]) - result.stdout.fnmatch_lines([ - "*---configure", - "*---unconfigure", - ]) + result.stdout.fnmatch_lines(["*---configure", "*---unconfigure"]) def test_config_preparse_plugin_option(self, testdir): - testdir.makepyfile(pytest_xyz=""" + testdir.makepyfile( + pytest_xyz=""" def pytest_addoption(parser): parser.addoption("--xyz", dest="xyz", action="store") - """) - testdir.makepyfile(test_one=""" + """ + ) + testdir.makepyfile( + test_one=""" def test_option(pytestconfig): assert pytestconfig.option.xyz == "123" - """) + """ + ) result = testdir.runpytest("-p", "pytest_xyz", "--xyz=123", syspathinsert=True) assert result.ret == 0 - result.stdout.fnmatch_lines([ - '*1 passed*', - ]) + result.stdout.fnmatch_lines(["*1 passed*"]) def test_assertion_magic(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def test_this(): x = 0 assert x - """) + """ + ) result = testdir.runpytest(p) - result.stdout.fnmatch_lines([ - "> assert x", - "E assert 0", - ]) + result.stdout.fnmatch_lines(["> assert x", "E assert 0"]) assert result.ret == 1 def test_nested_import_error(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import import_fails def test_this(): assert import_fails.a == 1 - """) + """ + ) testdir.makepyfile(import_fails="import does_not_work") result = testdir.runpytest(p) - result.stdout.fnmatch_lines([ - # XXX on jython this fails: "> import import_fails", - "ImportError while importing test module*", - "*No module named *does_not_work*", - ]) + result.stdout.fnmatch_lines( + [ + # XXX on jython this fails: "> import import_fails", + "ImportError while importing test module*", + "*No module named *does_not_work*", + ] + ) assert result.ret == 2 def test_not_collectable_arguments(self, testdir): @@ -132,38 +138,41 @@ class TestGeneralUsage(object): p2 = testdir.makefile(".pyc", "123") result = testdir.runpytest(p1, p2) assert result.ret - result.stderr.fnmatch_lines([ - "*ERROR: not found:*%s" % (p2.basename,) - ]) + result.stderr.fnmatch_lines(["*ERROR: not found:*%s" % (p2.basename,)]) def test_issue486_better_reporting_on_conftest_load_failure(self, testdir): testdir.makepyfile("") testdir.makeconftest("import qwerty") result = testdir.runpytest("--help") - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *--version* *warning*conftest.py* - """) + """ + ) result = testdir.runpytest() - result.stderr.fnmatch_lines(""" + result.stderr.fnmatch_lines( + """ *ERROR*could not load*conftest.py* - """) + """ + ) def test_early_skip(self, testdir): testdir.mkdir("xyz") - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest def pytest_collect_directory(): pytest.skip("early") - """) + """ + ) result = testdir.runpytest() assert result.ret == EXIT_NOTESTSCOLLECTED - result.stdout.fnmatch_lines([ - "*1 skip*" - ]) + result.stdout.fnmatch_lines(["*1 skip*"]) def test_issue88_initial_file_multinodes(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest class MyFile(pytest.File): def collect(self): @@ -172,40 +181,46 @@ class TestGeneralUsage(object): return MyFile(path, parent) class MyItem(pytest.Item): pass - """) + """ + ) p = testdir.makepyfile("def test_hello(): pass") result = testdir.runpytest(p, "--collect-only") - result.stdout.fnmatch_lines([ - "*MyFile*test_issue88*", - "*Module*test_issue88*", - ]) + result.stdout.fnmatch_lines(["*MyFile*test_issue88*", "*Module*test_issue88*"]) def test_issue93_initialnode_importing_capturing(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import sys print ("should not be seen") sys.stderr.write("stder42\\n") - """) + """ + ) result = testdir.runpytest() assert result.ret == EXIT_NOTESTSCOLLECTED assert "should not be seen" not in result.stdout.str() assert "stderr42" not in result.stderr.str() def test_conftest_printing_shows_if_error(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ print ("should be seen") assert 0 - """) + """ + ) result = testdir.runpytest() assert result.ret != 0 assert "should be seen" in result.stdout.str() - @pytest.mark.skipif(not hasattr(py.path.local, 'mksymlinkto'), - reason="symlink not available on this platform") + @pytest.mark.skipif( + not hasattr(py.path.local, "mksymlinkto"), + reason="symlink not available on this platform", + ) def test_chdir(self, testdir): testdir.tmpdir.join("py").mksymlinkto(py._pydir) p = testdir.tmpdir.join("main.py") - p.write(_pytest._code.Source(""" + p.write( + _pytest._code.Source( + """ import sys, os sys.path.insert(0, '') import py @@ -213,7 +228,9 @@ class TestGeneralUsage(object): print (py.__path__) os.chdir(os.path.dirname(os.getcwd())) print (py.log) - """)) + """ + ) + ) result = testdir.runpython(p) assert not result.ret @@ -231,20 +248,21 @@ class TestGeneralUsage(object): assert result.ret == EXIT_USAGEERROR def test_directory_skipped(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest def pytest_ignore_collect(): pytest.skip("intentional") - """) + """ + ) testdir.makepyfile("def test_hello(): pass") result = testdir.runpytest() assert result.ret == EXIT_NOTESTSCOLLECTED - result.stdout.fnmatch_lines([ - "*1 skipped*" - ]) + result.stdout.fnmatch_lines(["*1 skipped*"]) def test_multiple_items_per_collector_byid(self, testdir): - c = testdir.makeconftest(""" + c = testdir.makeconftest( + """ import pytest class MyItem(pytest.Item): def runtest(self): @@ -255,15 +273,15 @@ class TestGeneralUsage(object): def pytest_collect_file(path, parent): if path.basename.startswith("conftest"): return MyCollector(path, parent) - """) + """ + ) result = testdir.runpytest(c.basename + "::" + "xyz") assert result.ret == 0 - result.stdout.fnmatch_lines([ - "*1 pass*", - ]) + result.stdout.fnmatch_lines(["*1 pass*"]) def test_skip_on_generated_funcarg_id(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest def pytest_generate_tests(metafunc): metafunc.addcall({'x': 3}, id='hello-123') @@ -272,75 +290,77 @@ class TestGeneralUsage(object): if 'hello-123' in item.keywords: pytest.skip("hello") assert 0 - """) + """ + ) p = testdir.makepyfile("""def test_func(x): pass""") res = testdir.runpytest(p) assert res.ret == 0 res.stdout.fnmatch_lines(["*1 skipped*"]) def test_direct_addressing_selects(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def pytest_generate_tests(metafunc): metafunc.addcall({'i': 1}, id="1") metafunc.addcall({'i': 2}, id="2") def test_func(i): pass - """) + """ + ) res = testdir.runpytest(p.basename + "::" + "test_func[1]") assert res.ret == 0 res.stdout.fnmatch_lines(["*1 passed*"]) def test_direct_addressing_notfound(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def test_func(): pass - """) + """ + ) res = testdir.runpytest(p.basename + "::" + "test_notfound") assert res.ret res.stderr.fnmatch_lines(["*ERROR*not found*"]) def test_docstring_on_hookspec(self): from _pytest import hookspec + for name, value in vars(hookspec).items(): if name.startswith("pytest_"): assert value.__doc__, "no docstring for %s" % name def test_initialization_error_issue49(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_configure(): x - """) + """ + ) result = testdir.runpytest() assert result.ret == 3 # internal error - result.stderr.fnmatch_lines([ - "INTERNAL*pytest_configure*", - "INTERNAL*x*", - ]) - assert 'sessionstarttime' not in result.stderr.str() + result.stderr.fnmatch_lines(["INTERNAL*pytest_configure*", "INTERNAL*x*"]) + assert "sessionstarttime" not in result.stderr.str() - @pytest.mark.parametrize('lookfor', ['test_fun.py::test_a']) + @pytest.mark.parametrize("lookfor", ["test_fun.py::test_a"]) def test_issue134_report_error_when_collecting_member(self, testdir, lookfor): - testdir.makepyfile(test_fun=""" + testdir.makepyfile( + test_fun=""" def test_a(): pass - def""") + def""" + ) result = testdir.runpytest(lookfor) - result.stdout.fnmatch_lines(['*SyntaxError*']) - if '::' in lookfor: - result.stderr.fnmatch_lines([ - '*ERROR*', - ]) + result.stdout.fnmatch_lines(["*SyntaxError*"]) + if "::" in lookfor: + result.stderr.fnmatch_lines(["*ERROR*"]) assert result.ret == 4 # usage error only if item not found def test_report_all_failed_collections_initargs(self, testdir): testdir.makepyfile(test_a="def", test_b="def") result = testdir.runpytest("test_a.py::a", "test_b.py::b") - result.stderr.fnmatch_lines([ - "*ERROR*test_a.py::a*", - "*ERROR*test_b.py::b*", - ]) + result.stderr.fnmatch_lines(["*ERROR*test_a.py::a*", "*ERROR*test_b.py::b*"]) - @pytest.mark.usefixtures('recwarn') + @pytest.mark.usefixtures("recwarn") def test_namespace_import_doesnt_confuse_import_hook(self, testdir): """ Ref #383. Python 3.3's namespace package messed with our import hooks @@ -350,8 +370,9 @@ class TestGeneralUsage(object): Use recwarn here to silence this warning in Python 2.7: ImportWarning: Not importing directory '...\not_a_package': missing __init__.py """ - testdir.mkdir('not_a_package') - p = testdir.makepyfile(""" + testdir.mkdir("not_a_package") + p = testdir.makepyfile( + """ try: from not_a_package import doesnt_exist except ImportError: @@ -360,19 +381,23 @@ class TestGeneralUsage(object): def test_whatever(): pass - """) + """ + ) res = testdir.runpytest(p.basename) assert res.ret == 0 def test_unknown_option(self, testdir): result = testdir.runpytest("--qwlkej") - result.stderr.fnmatch_lines(""" + result.stderr.fnmatch_lines( + """ *unrecognized* - """) + """ + ) def test_getsourcelines_error_issue553(self, testdir, monkeypatch): monkeypatch.setattr("inspect.getsourcelines", None) - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def raise_error(obj): raise IOError('source code not available') @@ -381,12 +406,12 @@ class TestGeneralUsage(object): def test_foo(invalid_fixture): pass - """) + """ + ) res = testdir.runpytest(p) - res.stdout.fnmatch_lines([ - "*source code not available*", - "E*fixture 'invalid_fixture' not found", - ]) + res.stdout.fnmatch_lines( + ["*source code not available*", "E*fixture 'invalid_fixture' not found"] + ) def test_plugins_given_as_strings(self, tmpdir, monkeypatch): """test that str values passed to main() as `plugins` arg @@ -394,62 +419,67 @@ class TestGeneralUsage(object): #855. """ with pytest.raises(ImportError) as excinfo: - pytest.main([str(tmpdir)], plugins=['invalid.module']) - assert 'invalid' in str(excinfo.value) + pytest.main([str(tmpdir)], plugins=["invalid.module"]) + assert "invalid" in str(excinfo.value) - p = tmpdir.join('test_test_plugins_given_as_strings.py') - p.write('def test_foo(): pass') + p = tmpdir.join("test_test_plugins_given_as_strings.py") + p.write("def test_foo(): pass") mod = types.ModuleType("myplugin") - monkeypatch.setitem(sys.modules, 'myplugin', mod) - assert pytest.main(args=[str(tmpdir)], plugins=['myplugin']) == 0 + monkeypatch.setitem(sys.modules, "myplugin", mod) + assert pytest.main(args=[str(tmpdir)], plugins=["myplugin"]) == 0 def test_parametrized_with_bytes_regex(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import re import pytest @pytest.mark.parametrize('r', [re.compile(b'foo')]) def test_stuff(r): pass """ - ) + ) res = testdir.runpytest(p) - res.stdout.fnmatch_lines([ - '*1 passed*' - ]) + res.stdout.fnmatch_lines(["*1 passed*"]) def test_parametrized_with_null_bytes(self, testdir): """Test parametrization with values that contain null bytes and unicode characters (#2644, #2957)""" - p = testdir.makepyfile(u""" + p = testdir.makepyfile( + u""" # encoding: UTF-8 import pytest @pytest.mark.parametrize("data", [b"\\x00", "\\x00", u'ação']) def test_foo(data): assert data - """) + """ + ) res = testdir.runpytest(p) res.assert_outcomes(passed=3) class TestInvocationVariants(object): + def test_earlyinit(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest assert hasattr(pytest, 'mark') - """) + """ + ) result = testdir.runpython(p) assert result.ret == 0 @pytest.mark.xfail("sys.platform.startswith('java')") def test_pydoc(self, testdir): - for name in ('py.test', 'pytest'): + for name in ("py.test", "pytest"): result = testdir.runpython_c("import %s;help(%s)" % (name, name)) assert result.ret == 0 s = result.stdout.str() - assert 'MarkGenerator' in s + assert "MarkGenerator" in s def test_import_star_py_dot_test(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ from py.test import * #collect #cmdline @@ -459,37 +489,41 @@ class TestInvocationVariants(object): main skip xfail - """) + """ + ) result = testdir.runpython(p) assert result.ret == 0 def test_import_star_pytest(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ from pytest import * #Item #File main skip xfail - """) + """ + ) result = testdir.runpython(p) assert result.ret == 0 def test_double_pytestcmdline(self, testdir): - p = testdir.makepyfile(run=""" + p = testdir.makepyfile( + run=""" import pytest pytest.main() pytest.main() - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ def test_hello(): pass - """) + """ + ) result = testdir.runpython(p) - result.stdout.fnmatch_lines([ - "*1 passed*", - "*1 passed*", - ]) + result.stdout.fnmatch_lines(["*1 passed*", "*1 passed*"]) def test_python_minus_m_invocation_ok(self, testdir): p1 = testdir.makepyfile("def test_hello(): pass") @@ -523,7 +557,9 @@ class TestInvocationVariants(object): out, err = capsys.readouterr() def test_invoke_plugin_api(self, testdir, capsys): + class MyPlugin(object): + def pytest_addoption(self, parser): parser.addoption("--myopt") @@ -532,63 +568,55 @@ class TestInvocationVariants(object): assert "--myopt" in out def test_pyargs_importerror(self, testdir, monkeypatch): - monkeypatch.delenv('PYTHONDONTWRITEBYTECODE', False) + monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", False) path = testdir.mkpydir("tpkg") - path.join("test_hello.py").write('raise ImportError') + path.join("test_hello.py").write("raise ImportError") result = testdir.runpytest("--pyargs", "tpkg.test_hello", syspathinsert=True) assert result.ret != 0 - result.stdout.fnmatch_lines([ - "collected*0*items*/*1*errors" - ]) + result.stdout.fnmatch_lines(["collected*0*items*/*1*errors"]) def test_cmdline_python_package(self, testdir, monkeypatch): import warnings - monkeypatch.delenv('PYTHONDONTWRITEBYTECODE', False) + + monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", False) path = testdir.mkpydir("tpkg") path.join("test_hello.py").write("def test_hello(): pass") path.join("test_world.py").write("def test_world(): pass") result = testdir.runpytest("--pyargs", "tpkg") assert result.ret == 0 - result.stdout.fnmatch_lines([ - "*2 passed*" - ]) + result.stdout.fnmatch_lines(["*2 passed*"]) result = testdir.runpytest("--pyargs", "tpkg.test_hello", syspathinsert=True) assert result.ret == 0 - result.stdout.fnmatch_lines([ - "*1 passed*" - ]) + result.stdout.fnmatch_lines(["*1 passed*"]) def join_pythonpath(what): - cur = os.environ.get('PYTHONPATH') + cur = os.environ.get("PYTHONPATH") if cur: return str(what) + os.pathsep + cur return what + empty_package = testdir.mkpydir("empty_package") - monkeypatch.setenv('PYTHONPATH', join_pythonpath(empty_package)) + monkeypatch.setenv("PYTHONPATH", join_pythonpath(empty_package)) # the path which is not a package raises a warning on pypy; # no idea why only pypy and not normal python warn about it here with warnings.catch_warnings(): - warnings.simplefilter('ignore', ImportWarning) + warnings.simplefilter("ignore", ImportWarning) result = testdir.runpytest("--pyargs", ".") assert result.ret == 0 - result.stdout.fnmatch_lines([ - "*2 passed*" - ]) + result.stdout.fnmatch_lines(["*2 passed*"]) - monkeypatch.setenv('PYTHONPATH', join_pythonpath(testdir)) + monkeypatch.setenv("PYTHONPATH", join_pythonpath(testdir)) result = testdir.runpytest("--pyargs", "tpkg.test_missing", syspathinsert=True) assert result.ret != 0 - result.stderr.fnmatch_lines([ - "*not*found*test_missing*", - ]) + result.stderr.fnmatch_lines(["*not*found*test_missing*"]) def test_cmdline_python_namespace_package(self, testdir, monkeypatch): """ test --pyargs option with namespace packages (#1567) """ - monkeypatch.delenv('PYTHONDONTWRITEBYTECODE', raising=False) + monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False) search_path = [] for dirname in "hello", "world": @@ -596,12 +624,13 @@ class TestInvocationVariants(object): search_path.append(d) ns = d.mkdir("ns_pkg") ns.join("__init__.py").write( - "__import__('pkg_resources').declare_namespace(__name__)") + "__import__('pkg_resources').declare_namespace(__name__)" + ) lib = ns.mkdir(dirname) lib.ensure("__init__.py") - lib.join("test_{0}.py".format(dirname)). \ - write("def test_{0}(): pass\n" - "def test_other():pass".format(dirname)) + lib.join("test_{}.py".format(dirname)).write( + "def test_{}(): pass\n" "def test_other():pass".format(dirname) + ) # The structure of the test directory is now: # . @@ -619,34 +648,38 @@ class TestInvocationVariants(object): # └── test_world.py def join_pythonpath(*dirs): - cur = os.environ.get('PYTHONPATH') + cur = os.environ.get("PYTHONPATH") if cur: dirs += (cur,) return os.pathsep.join(str(p) for p in dirs) - monkeypatch.setenv('PYTHONPATH', join_pythonpath(*search_path)) + + monkeypatch.setenv("PYTHONPATH", join_pythonpath(*search_path)) for p in search_path: monkeypatch.syspath_prepend(p) # mixed module and filenames: - os.chdir('world') + os.chdir("world") result = testdir.runpytest("--pyargs", "-v", "ns_pkg.hello", "ns_pkg/world") assert result.ret == 0 - result.stdout.fnmatch_lines([ - "*test_hello.py::test_hello*PASSED*", - "*test_hello.py::test_other*PASSED*", - "*test_world.py::test_world*PASSED*", - "*test_world.py::test_other*PASSED*", - "*4 passed*" - ]) + result.stdout.fnmatch_lines( + [ + "*test_hello.py::test_hello*PASSED*", + "*test_hello.py::test_other*PASSED*", + "*test_world.py::test_world*PASSED*", + "*test_world.py::test_other*PASSED*", + "*4 passed*", + ] + ) # specify tests within a module testdir.chdir() - result = testdir.runpytest("--pyargs", "-v", "ns_pkg.world.test_world::test_other") + result = testdir.runpytest( + "--pyargs", "-v", "ns_pkg.world.test_world::test_other" + ) assert result.ret == 0 - result.stdout.fnmatch_lines([ - "*test_world.py::test_other*PASSED*", - "*1 passed*" - ]) + result.stdout.fnmatch_lines( + ["*test_world.py::test_other*PASSED*", "*1 passed*"] + ) @pytest.mark.skipif(not hasattr(os, "symlink"), reason="requires symlinks") def test_cmdline_python_package_symlink(self, testdir, monkeypatch): @@ -656,12 +689,15 @@ class TestInvocationVariants(object): """ # dummy check that we can actually create symlinks: on Windows `os.symlink` is available, # but normal users require special admin privileges to create symlinks. - if sys.platform == 'win32': + if sys.platform == "win32": try: - os.symlink(str(testdir.tmpdir.ensure('tmpfile')), str(testdir.tmpdir.join('tmpfile2'))) + os.symlink( + str(testdir.tmpdir.ensure("tmpfile")), + str(testdir.tmpdir.join("tmpfile2")), + ) except OSError as e: pytest.skip(six.text_type(e.args[0])) - monkeypatch.delenv('PYTHONDONTWRITEBYTECODE', raising=False) + monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False) search_path = ["lib", os.path.join("local", "lib")] @@ -669,15 +705,14 @@ class TestInvocationVariants(object): d = testdir.mkdir(dirname) foo = d.mkdir("foo") foo.ensure("__init__.py") - lib = foo.mkdir('bar') + lib = foo.mkdir("bar") lib.ensure("__init__.py") - lib.join("test_bar.py"). \ - write("def test_bar(): pass\n" - "def test_other(a_fixture):pass") - lib.join("conftest.py"). \ - write("import pytest\n" - "@pytest.fixture\n" - "def a_fixture():pass") + lib.join("test_bar.py").write( + "def test_bar(): pass\n" "def test_other(a_fixture):pass" + ) + lib.join("conftest.py").write( + "import pytest\n" "@pytest.fixture\n" "def a_fixture():pass" + ) d_local = testdir.mkdir("local") symlink_location = os.path.join(str(d_local), "lib") @@ -699,12 +734,12 @@ class TestInvocationVariants(object): # └── test_bar.py def join_pythonpath(*dirs): - cur = os.getenv('PYTHONPATH') + cur = os.getenv("PYTHONPATH") if cur: dirs += (cur,) return os.pathsep.join(str(p) for p in dirs) - monkeypatch.setenv('PYTHONPATH', join_pythonpath(*search_path)) + monkeypatch.setenv("PYTHONPATH", join_pythonpath(*search_path)) for p in search_path: monkeypatch.syspath_prepend(p) @@ -712,22 +747,23 @@ class TestInvocationVariants(object): result = testdir.runpytest("--pyargs", "-v", "foo.bar") testdir.chdir() assert result.ret == 0 - result.stdout.fnmatch_lines([ - "*lib/foo/bar/test_bar.py::test_bar*PASSED*", - "*lib/foo/bar/test_bar.py::test_other*PASSED*", - "*2 passed*" - ]) + result.stdout.fnmatch_lines( + [ + "*lib/foo/bar/test_bar.py::test_bar*PASSED*", + "*lib/foo/bar/test_bar.py::test_other*PASSED*", + "*2 passed*", + ] + ) def test_cmdline_python_package_not_exists(self, testdir): result = testdir.runpytest("--pyargs", "tpkgwhatv") assert result.ret - result.stderr.fnmatch_lines([ - "ERROR*file*or*package*not*found*", - ]) + result.stderr.fnmatch_lines(["ERROR*file*or*package*not*found*"]) @pytest.mark.xfail(reason="decide: feature or bug") def test_noclass_discovery_if_not_testcase(self, testdir): - testpath = testdir.makepyfile(""" + testpath = testdir.makepyfile( + """ import unittest class TestHello(object): def test_hello(self): @@ -735,36 +771,40 @@ class TestInvocationVariants(object): class RealTest(unittest.TestCase, TestHello): attr = 42 - """) + """ + ) reprec = testdir.inline_run(testpath) reprec.assertoutcome(passed=1) def test_doctest_id(self, testdir): - testdir.makefile('.txt', """ + testdir.makefile( + ".txt", + """ >>> x=3 >>> x 4 - """) + """, + ) result = testdir.runpytest("-rf") lines = result.stdout.str().splitlines() for line in lines: if line.startswith("FAIL "): testid = line[5:].strip() break - result = testdir.runpytest(testid, '-rf') - result.stdout.fnmatch_lines([ - line, - "*1 failed*", - ]) + result = testdir.runpytest(testid, "-rf") + result.stdout.fnmatch_lines([line, "*1 failed*"]) def test_core_backward_compatibility(self): """Test backward compatibility for get_plugin_manager function. See #787.""" import _pytest.config - assert type(_pytest.config.get_plugin_manager()) is _pytest.config.PytestPluginManager + + assert type( + _pytest.config.get_plugin_manager() + ) is _pytest.config.PytestPluginManager def test_has_plugin(self, request): """Test hasplugin function of the plugin manager (#932).""" - assert request.config.pluginmanager.hasplugin('python') + assert request.config.pluginmanager.hasplugin("python") class TestDurations(object): @@ -785,12 +825,9 @@ class TestDurations(object): testdir.makepyfile(self.source) result = testdir.runpytest("--durations=10") assert result.ret == 0 - result.stdout.fnmatch_lines_random([ - "*durations*", - "*call*test_3*", - "*call*test_2*", - "*call*test_1*", - ]) + result.stdout.fnmatch_lines_random( + ["*durations*", "*call*test_3*", "*call*test_2*", "*call*test_1*"] + ) def test_calls_show_2(self, testdir): testdir.makepyfile(self.source) @@ -804,7 +841,7 @@ class TestDurations(object): result = testdir.runpytest("--durations=0") assert result.ret == 0 for x in "123": - for y in 'call', : # 'setup', 'call', 'teardown': + for y in ("call",): # 'setup', 'call', 'teardown': for line in result.stdout.lines: if ("test_%s" % x) in line and y in line: break @@ -815,19 +852,14 @@ class TestDurations(object): testdir.makepyfile(self.source) result = testdir.runpytest("--durations=2", "-k test_1") assert result.ret == 0 - result.stdout.fnmatch_lines([ - "*durations*", - "*call*test_1*", - ]) + result.stdout.fnmatch_lines(["*durations*", "*call*test_1*"]) def test_with_failing_collection(self, testdir): testdir.makepyfile(self.source) testdir.makepyfile(test_collecterror="""xyz""") result = testdir.runpytest("--durations=2", "-k test_1") assert result.ret == 2 - result.stdout.fnmatch_lines([ - "*Interrupted: 1 errors during collection*", - ]) + result.stdout.fnmatch_lines(["*Interrupted: 1 errors during collection*"]) # Collection errors abort test execution, therefore no duration is # output assert "duration" not in result.stdout.str() @@ -855,42 +887,48 @@ class TestDurationWithFixture(object): result = testdir.runpytest("--durations=10") assert result.ret == 0 - result.stdout.fnmatch_lines_random(""" + result.stdout.fnmatch_lines_random( + """ *durations* * setup *test_1* * call *test_1* - """) + """ + ) def test_zipimport_hook(testdir, tmpdir): """Test package loader is being used correctly (see #1837).""" - zipapp = pytest.importorskip('zipapp') - testdir.tmpdir.join('app').ensure(dir=1) - testdir.makepyfile(**{ - 'app/foo.py': """ + zipapp = pytest.importorskip("zipapp") + testdir.tmpdir.join("app").ensure(dir=1) + testdir.makepyfile( + **{ + "app/foo.py": """ import pytest def main(): pytest.main(['--pyarg', 'foo']) - """, - }) - target = tmpdir.join('foo.zip') - zipapp.create_archive(str(testdir.tmpdir.join('app')), str(target), main='foo:main') + """ + } + ) + target = tmpdir.join("foo.zip") + zipapp.create_archive(str(testdir.tmpdir.join("app")), str(target), main="foo:main") result = testdir.runpython(target) assert result.ret == 0 - result.stderr.fnmatch_lines(['*not found*foo*']) - assert 'INTERNALERROR>' not in result.stdout.str() + result.stderr.fnmatch_lines(["*not found*foo*"]) + assert "INTERNALERROR>" not in result.stdout.str() def test_import_plugin_unicode_name(testdir): + testdir.makepyfile(myplugin="") testdir.makepyfile( - myplugin='', - ) - testdir.makepyfile(""" + """ def test(): pass - """) - testdir.makeconftest(""" + """ + ) + testdir.makeconftest( + """ pytest_plugins = [u'myplugin'] - """) + """ + ) r = testdir.runpytest() assert r.ret == 0 @@ -900,8 +938,9 @@ def test_deferred_hook_checking(testdir): Check hooks as late as possible (#1821). """ testdir.syspathinsert() - testdir.makepyfile(**{ - 'plugin.py': """ + testdir.makepyfile( + **{ + "plugin.py": """ class Hooks(object): def pytest_my_hook(self, config): pass @@ -909,25 +948,27 @@ def test_deferred_hook_checking(testdir): def pytest_configure(config): config.pluginmanager.add_hookspecs(Hooks) """, - 'conftest.py': """ + "conftest.py": """ pytest_plugins = ['plugin'] def pytest_my_hook(config): return 40 """, - 'test_foo.py': """ + "test_foo.py": """ def test(request): assert request.config.hook.pytest_my_hook(config=request.config) == [40] - """ - }) + """, + } + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(['* 1 passed *']) + result.stdout.fnmatch_lines(["* 1 passed *"]) def test_fixture_values_leak(testdir): """Ensure that fixture objects are properly destroyed by the garbage collector at the end of their expected life-times (#2981). """ - testdir.makepyfile(""" + testdir.makepyfile( + """ import attr import gc import pytest @@ -961,15 +1002,17 @@ def test_fixture_values_leak(testdir): gc.collect() # fixture "fix" created during test1 must have been destroyed by now assert fix_of_test1_ref() is None - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(['* 2 passed *']) + result.stdout.fnmatch_lines(["* 2 passed *"]) def test_fixture_order_respects_scope(testdir): """Ensure that fixtures are created according to scope order, regression test for #2405 """ - testdir.makepyfile(''' + testdir.makepyfile( + """ import pytest data = {} @@ -985,7 +1028,8 @@ def test_fixture_order_respects_scope(testdir): @pytest.mark.usefixtures('clean_data') def test_value(): assert data.get('value') - ''') + """ + ) result = testdir.runpytest() assert result.ret == 0 @@ -997,7 +1041,8 @@ def test_frame_leak_on_failing_test(testdir): are made of traceback objects which cannot be weakly referenced. Those objects at least can be eventually claimed by the garbage collector. """ - testdir.makepyfile(''' + testdir.makepyfile( + """ import gc import weakref @@ -1015,6 +1060,7 @@ def test_frame_leak_on_failing_test(testdir): def test2(): gc.collect() assert ref() is None - ''') + """ + ) result = testdir.runpytest_subprocess() - result.stdout.fnmatch_lines(['*1 failed, 1 passed in*']) + result.stdout.fnmatch_lines(["*1 failed, 1 passed in*"]) diff --git a/testing/code/test_code.py b/testing/code/test_code.py index 209a8ef19..bfae36918 100644 --- a/testing/code/test_code.py +++ b/testing/code/test_code.py @@ -6,18 +6,19 @@ import _pytest._code import py import pytest from test_excinfo import TWMock +from six import text_type def test_ne(): - code1 = _pytest._code.Code(compile('foo = "bar"', '', 'exec')) + code1 = _pytest._code.Code(compile('foo = "bar"', "", "exec")) assert code1 == code1 - code2 = _pytest._code.Code(compile('foo = "baz"', '', 'exec')) + code2 = _pytest._code.Code(compile('foo = "baz"', "", "exec")) assert code2 != code1 def test_code_gives_back_name_for_not_existing_file(): - name = 'abc-123' - co_code = compile("pass\n", name, 'exec') + name = "abc-123" + co_code = compile("pass\n", name, "exec") assert co_code.co_filename == name code = _pytest._code.Code(co_code) assert str(code.path) == name @@ -25,12 +26,15 @@ def test_code_gives_back_name_for_not_existing_file(): def test_code_with_class(): + class A(object): pass + pytest.raises(TypeError, "_pytest._code.Code(A)") if True: + def x(): pass @@ -38,7 +42,7 @@ if True: def test_code_fullsource(): code = _pytest._code.Code(x) full = code.fullsource - assert 'test_code_fullsource()' in str(full) + assert "test_code_fullsource()" in str(full) def test_code_source(): @@ -50,8 +54,10 @@ def test_code_source(): def test_frame_getsourcelineno_myself(): + def func(): return sys._getframe(0) + f = func() f = _pytest._code.Frame(f) source, lineno = f.code.fullsource, f.lineno @@ -59,8 +65,10 @@ def test_frame_getsourcelineno_myself(): def test_getstatement_empty_fullsource(): + def func(): return sys._getframe(0) + f = func() f = _pytest._code.Frame(f) prop = f.code.__class__.fullsource @@ -78,7 +86,7 @@ def test_code_from_func(): def test_unicode_handling(): - value = py.builtin._totext('\xc4\x85\xc4\x87\n', 'utf-8').encode('utf8') + value = py.builtin._totext("\xc4\x85\xc4\x87\n", "utf-8").encode("utf8") def f(): raise Exception(value) @@ -86,65 +94,74 @@ def test_unicode_handling(): excinfo = pytest.raises(Exception, f) str(excinfo) if sys.version_info[0] < 3: - unicode(excinfo) + text_type(excinfo) -@pytest.mark.skipif(sys.version_info[0] >= 3, reason='python 2 only issue') +@pytest.mark.skipif(sys.version_info[0] >= 3, reason="python 2 only issue") def test_unicode_handling_syntax_error(): - value = py.builtin._totext('\xc4\x85\xc4\x87\n', 'utf-8').encode('utf8') + value = py.builtin._totext("\xc4\x85\xc4\x87\n", "utf-8").encode("utf8") def f(): - raise SyntaxError('invalid syntax', (None, 1, 3, value)) + raise SyntaxError("invalid syntax", (None, 1, 3, value)) excinfo = pytest.raises(Exception, f) str(excinfo) if sys.version_info[0] < 3: - unicode(excinfo) + text_type(excinfo) def test_code_getargs(): + def f1(x): pass + c1 = _pytest._code.Code(f1) - assert c1.getargs(var=True) == ('x',) + assert c1.getargs(var=True) == ("x",) def f2(x, *y): pass + c2 = _pytest._code.Code(f2) - assert c2.getargs(var=True) == ('x', 'y') + assert c2.getargs(var=True) == ("x", "y") def f3(x, **z): pass + c3 = _pytest._code.Code(f3) - assert c3.getargs(var=True) == ('x', 'z') + assert c3.getargs(var=True) == ("x", "z") def f4(x, *y, **z): pass + c4 = _pytest._code.Code(f4) - assert c4.getargs(var=True) == ('x', 'y', 'z') + assert c4.getargs(var=True) == ("x", "y", "z") def test_frame_getargs(): + def f1(x): return sys._getframe(0) - fr1 = _pytest._code.Frame(f1('a')) - assert fr1.getargs(var=True) == [('x', 'a')] + + fr1 = _pytest._code.Frame(f1("a")) + assert fr1.getargs(var=True) == [("x", "a")] def f2(x, *y): return sys._getframe(0) - fr2 = _pytest._code.Frame(f2('a', 'b', 'c')) - assert fr2.getargs(var=True) == [('x', 'a'), ('y', ('b', 'c'))] + + fr2 = _pytest._code.Frame(f2("a", "b", "c")) + assert fr2.getargs(var=True) == [("x", "a"), ("y", ("b", "c"))] def f3(x, **z): return sys._getframe(0) - fr3 = _pytest._code.Frame(f3('a', b='c')) - assert fr3.getargs(var=True) == [('x', 'a'), ('z', {'b': 'c'})] + + fr3 = _pytest._code.Frame(f3("a", b="c")) + assert fr3.getargs(var=True) == [("x", "a"), ("z", {"b": "c"})] def f4(x, *y, **z): return sys._getframe(0) - fr4 = _pytest._code.Frame(f4('a', 'b', c='d')) - assert fr4.getargs(var=True) == [('x', 'a'), ('y', ('b',)), - ('z', {'c': 'd'})] + + fr4 = _pytest._code.Frame(f4("a", "b", c="d")) + assert fr4.getargs(var=True) == [("x", "a"), ("y", ("b",)), ("z", {"c": "d"})] class TestExceptionInfo(object): @@ -173,7 +190,7 @@ class TestTracebackEntry(object): entry = exci.traceback[0] source = entry.getsource() assert len(source) == 6 - assert 'assert False' in source[5] + assert "assert False" in source[5] class TestReprFuncArgs(object): @@ -183,14 +200,11 @@ class TestReprFuncArgs(object): tw = TWMock() - args = [ - ('unicode_string', u"São Paulo"), - ('utf8_string', 'S\xc3\xa3o Paulo'), - ] + args = [("unicode_string", u"São Paulo"), ("utf8_string", "S\xc3\xa3o Paulo")] r = ReprFuncArgs(args) r.toterminal(tw) if sys.version_info[0] >= 3: - assert tw.lines[0] == 'unicode_string = São Paulo, utf8_string = São Paulo' + assert tw.lines[0] == "unicode_string = São Paulo, utf8_string = São Paulo" else: - assert tw.lines[0] == 'unicode_string = São Paulo, utf8_string = São Paulo' + assert tw.lines[0] == "unicode_string = São Paulo, utf8_string = São Paulo" diff --git a/testing/code/test_excinfo.py b/testing/code/test_excinfo.py index 8fd59423f..463b2e38e 100644 --- a/testing/code/test_excinfo.py +++ b/testing/code/test_excinfo.py @@ -11,7 +11,8 @@ from _pytest._code.code import ( ExceptionInfo, FormattedExcinfo, ReprExceptionInfo, - ExceptionChainRepr) + ExceptionChainRepr, +) from test_source import astonly @@ -22,7 +23,7 @@ except ImportError: else: invalidate_import_caches = getattr(importlib, "invalidate_caches", None) -queue = py.builtin._tryimport('queue', 'Queue') +queue = py.builtin._tryimport("queue", "Queue") failsonjython = pytest.mark.xfail("sys.platform.startswith('java')") @@ -65,6 +66,7 @@ def test_excinfo_simple(): def test_excinfo_getstatement(): + def g(): raise ValueError @@ -75,9 +77,11 @@ def test_excinfo_getstatement(): f() except ValueError: excinfo = _pytest._code.ExceptionInfo() - linenumbers = [_pytest._code.getrawcode(f).co_firstlineno - 1 + 4, - _pytest._code.getrawcode(f).co_firstlineno - 1 + 1, - _pytest._code.getrawcode(g).co_firstlineno - 1 + 1, ] + linenumbers = [ + _pytest._code.getrawcode(f).co_firstlineno - 1 + 4, + _pytest._code.getrawcode(f).co_firstlineno - 1 + 1, + _pytest._code.getrawcode(g).co_firstlineno - 1 + 1, + ] values = list(excinfo.traceback) foundlinenumbers = [x.lineno for x in values] assert foundlinenumbers == linenumbers @@ -85,6 +89,7 @@ def test_excinfo_getstatement(): # print "%s:%d %s" %(x.path.relto(root), x.lineno, x.statement) # xxx + # testchain for getentries test below @@ -108,6 +113,7 @@ def h(): class TestTraceback_f_g_h(object): + def setup_method(self, method): try: h() @@ -119,7 +125,7 @@ class TestTraceback_f_g_h(object): entries = list(tb) assert len(tb) == 4 # maybe fragile test assert len(entries) == 4 # maybe fragile test - names = ['f', 'g', 'h'] + names = ["f", "g", "h"] for entry in entries: try: names.remove(entry.frame.code.name) @@ -136,14 +142,16 @@ class TestTraceback_f_g_h(object): @astonly @failsonjython def test_traceback_entry_getsource_in_construct(self): - source = _pytest._code.Source("""\ + source = _pytest._code.Source( + """\ def xyz(): try: raise ValueError except somenoname: pass xyz() - """) + """ + ) try: exec(source.compile()) except NameError: @@ -168,7 +176,7 @@ class TestTraceback_f_g_h(object): basedir = py.path.local(pytest.__file__).dirpath() newtraceback = excinfo.traceback.cut(excludepath=basedir) for x in newtraceback: - if hasattr(x, 'path'): + if hasattr(x, "path"): assert not py.path.local(x.path).relto(basedir) assert newtraceback[-1].frame.code.path == p @@ -177,13 +185,17 @@ class TestTraceback_f_g_h(object): ntraceback = traceback.filter() assert len(ntraceback) == len(traceback) - 1 - @pytest.mark.parametrize('tracebackhide, matching', [ - (lambda info: True, True), - (lambda info: False, False), - (operator.methodcaller('errisinstance', ValueError), True), - (operator.methodcaller('errisinstance', IndexError), False), - ]) + @pytest.mark.parametrize( + "tracebackhide, matching", + [ + (lambda info: True, True), + (lambda info: False, False), + (operator.methodcaller("errisinstance", ValueError), True), + (operator.methodcaller("errisinstance", IndexError), False), + ], + ) def test_traceback_filter_selective(self, tracebackhide, matching): + def f(): # raise ValueError @@ -203,8 +215,8 @@ class TestTraceback_f_g_h(object): excinfo = pytest.raises(ValueError, h) traceback = excinfo.traceback ntraceback = traceback.filter() - print('old: {0!r}'.format(traceback)) - print('new: {0!r}'.format(ntraceback)) + print("old: {!r}".format(traceback)) + print("new: {!r}".format(ntraceback)) if matching: assert len(ntraceback) == len(traceback) - 2 @@ -213,16 +225,19 @@ class TestTraceback_f_g_h(object): assert len(ntraceback) == len(traceback) - 1 def test_traceback_recursion_index(self): + def f(n): if n < 10: n += 1 f(n) + excinfo = pytest.raises(RuntimeError, f, 8) traceback = excinfo.traceback recindex = traceback.recursionindex() assert recindex == 3 def test_traceback_only_specific_recursion_errors(self, monkeypatch): + def f(n): if n == 0: raise RuntimeError("hello") @@ -234,11 +249,13 @@ class TestTraceback_f_g_h(object): assert "RuntimeError: hello" in str(repr.reprcrash) def test_traceback_no_recursion_index(self): + def do_stuff(): raise RuntimeError def reraise_me(): import sys + exc, val, tb = sys.exc_info() py.builtin._reraise(exc, val, tb) @@ -255,15 +272,16 @@ class TestTraceback_f_g_h(object): def test_traceback_messy_recursion(self): # XXX: simplified locally testable version - decorator = pytest.importorskip('decorator').decorator + decorator = pytest.importorskip("decorator").decorator def log(f, *k, **kw): - print('%s %s' % (k, kw)) + print("%s %s" % (k, kw)) f(*k, **kw) + log = decorator(log) def fail(): - raise ValueError('') + raise ValueError("") fail = log(log(fail)) @@ -271,6 +289,7 @@ class TestTraceback_f_g_h(object): assert excinfo.traceback.recursionindex() is None def test_traceback_getcrashentry(self): + def i(): __tracebackhide__ = True raise ValueError @@ -291,9 +310,10 @@ class TestTraceback_f_g_h(object): co = _pytest._code.Code(h) assert entry.frame.code.path == co.path assert entry.lineno == co.firstlineno + 1 - assert entry.frame.code.name == 'h' + assert entry.frame.code.name == "h" def test_traceback_getcrashentry_empty(self): + def g(): __tracebackhide__ = True raise ValueError @@ -308,16 +328,15 @@ class TestTraceback_f_g_h(object): co = _pytest._code.Code(g) assert entry.frame.code.path == co.path assert entry.lineno == co.firstlineno + 2 - assert entry.frame.code.name == 'g' + assert entry.frame.code.name == "g" def test_excinfo_exconly(): excinfo = pytest.raises(ValueError, h) - assert excinfo.exconly().startswith('ValueError') - excinfo = pytest.raises(ValueError, - "raise ValueError('hello\\nworld')") + assert excinfo.exconly().startswith("ValueError") + excinfo = pytest.raises(ValueError, "raise ValueError('hello\\nworld')") msg = excinfo.exconly(tryshort=True) - assert msg.startswith('ValueError') + assert msg.startswith("ValueError") assert msg.endswith("world") @@ -351,19 +370,18 @@ def test_excinfo_no_sourcecode(): def test_excinfo_no_python_sourcecode(tmpdir): # XXX: simplified locally testable version - tmpdir.join('test.txt').write("{{ h()}}:") + tmpdir.join("test.txt").write("{{ h()}}:") - jinja2 = pytest.importorskip('jinja2') + jinja2 = pytest.importorskip("jinja2") loader = jinja2.FileSystemLoader(str(tmpdir)) env = jinja2.Environment(loader=loader) - template = env.get_template('test.txt') - excinfo = pytest.raises(ValueError, - template.render, h=h) + template = env.get_template("test.txt") + excinfo = pytest.raises(ValueError, template.render, h=h) for item in excinfo.traceback: print(item) # XXX: for some reason jinja.Template.render is printed in full item.source # shouldnt fail - if item.path.basename == 'test.txt': - assert str(item.source) == '{{ h()}}:' + if item.path.basename == "test.txt": + assert str(item.source) == "{{ h()}}:" def test_entrysource_Queue_example(): @@ -393,28 +411,29 @@ def test_codepath_Queue_example(): def test_match_succeeds(): with pytest.raises(ZeroDivisionError) as excinfo: 0 // 0 - excinfo.match(r'.*zero.*') + excinfo.match(r".*zero.*") def test_match_raises_error(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def test_division_zero(): with pytest.raises(ZeroDivisionError) as excinfo: 0 / 0 excinfo.match(r'[123]+') - """) + """ + ) result = testdir.runpytest() assert result.ret != 0 - result.stdout.fnmatch_lines([ - "*AssertionError*Pattern*[123]*not found*", - ]) + result.stdout.fnmatch_lines(["*AssertionError*Pattern*[123]*not found*"]) class TestFormattedExcinfo(object): @pytest.fixture def importasmod(self, request): + def importasmod(source): source = _pytest._code.Source(source) tmpdir = request.getfixturevalue("tmpdir") @@ -424,6 +443,7 @@ class TestFormattedExcinfo(object): if invalidate_import_caches is not None: invalidate_import_caches() return modpath.pyimport() + return importasmod def excinfo_from_exec(self, source): @@ -438,10 +458,12 @@ class TestFormattedExcinfo(object): def test_repr_source(self): pr = FormattedExcinfo() - source = _pytest._code.Source(""" + source = _pytest._code.Source( + """ def f(x): pass - """).strip() + """ + ).strip() pr.flow_marker = "|" lines = pr.get_source(source, 0) assert len(lines) == 2 @@ -451,19 +473,17 @@ class TestFormattedExcinfo(object): def test_repr_source_excinfo(self): """ check if indentation is right """ pr = FormattedExcinfo() - excinfo = self.excinfo_from_exec(""" + excinfo = self.excinfo_from_exec( + """ def f(): assert 0 f() - """) + """ + ) pr = FormattedExcinfo() source = pr._getentrysource(excinfo.traceback[-1]) lines = pr.get_source(source, 1, excinfo) - assert lines == [ - ' def f():', - '> assert 0', - 'E AssertionError' - ] + assert lines == [" def f():", "> assert 0", "E AssertionError"] def test_repr_source_not_existing(self): pr = FormattedExcinfo() @@ -479,10 +499,14 @@ class TestFormattedExcinfo(object): def test_repr_many_line_source_not_existing(self): pr = FormattedExcinfo() - co = compile(""" + co = compile( + """ a = 1 raise ValueError() -""", "", "exec") +""", + "", + "exec", + ) try: exec(co) except ValueError: @@ -496,10 +520,11 @@ raise ValueError() pr = FormattedExcinfo() class FakeCode(object): - class raw(object): - co_filename = '?' - path = '?' + class raw(object): + co_filename = "?" + + path = "?" firstlineno = 5 def fullsource(self): @@ -513,6 +538,7 @@ raise ValueError() f_globals = {} class FakeTracebackEntry(_pytest._code.Traceback.Entry): + def __init__(self, tb, excinfo=None): self.lineno = 5 + 3 @@ -558,19 +584,21 @@ raise ValueError() def test_repr_local(self): p = FormattedExcinfo(showlocals=True) - loc = {'y': 5, 'z': 7, 'x': 3, '@x': 2, '__builtins__': {}} + loc = {"y": 5, "z": 7, "x": 3, "@x": 2, "__builtins__": {}} reprlocals = p.repr_locals(loc) assert reprlocals.lines - assert reprlocals.lines[0] == '__builtins__ = ' - assert reprlocals.lines[1] == 'x = 3' - assert reprlocals.lines[2] == 'y = 5' - assert reprlocals.lines[3] == 'z = 7' + assert reprlocals.lines[0] == "__builtins__ = " + assert reprlocals.lines[1] == "x = 3" + assert reprlocals.lines[2] == "y = 5" + assert reprlocals.lines[3] == "z = 7" def test_repr_tracebackentry_lines(self, importasmod): - mod = importasmod(""" + mod = importasmod( + """ def func1(): raise ValueError("hello\\nworld") - """) + """ + ) excinfo = pytest.raises(ValueError, mod.func1) excinfo.traceback = excinfo.traceback.filter() p = FormattedExcinfo() @@ -578,17 +606,17 @@ raise ValueError() # test as intermittent entry lines = reprtb.lines - assert lines[0] == ' def func1():' + assert lines[0] == " def func1():" assert lines[1] == '> raise ValueError("hello\\nworld")' # test as last entry p = FormattedExcinfo(showlocals=True) repr_entry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo) lines = repr_entry.lines - assert lines[0] == ' def func1():' + assert lines[0] == " def func1():" assert lines[1] == '> raise ValueError("hello\\nworld")' - assert lines[2] == 'E ValueError: hello' - assert lines[3] == 'E world' + assert lines[2] == "E ValueError: hello" + assert lines[3] == "E world" assert not lines[4:] loc = repr_entry.reprlocals is not None @@ -598,42 +626,46 @@ raise ValueError() # assert loc.message == "ValueError: hello" def test_repr_tracebackentry_lines2(self, importasmod): - mod = importasmod(""" + mod = importasmod( + """ def func1(m, x, y, z): raise ValueError("hello\\nworld") - """) + """ + ) excinfo = pytest.raises(ValueError, mod.func1, "m" * 90, 5, 13, "z" * 120) excinfo.traceback = excinfo.traceback.filter() entry = excinfo.traceback[-1] p = FormattedExcinfo(funcargs=True) reprfuncargs = p.repr_args(entry) - assert reprfuncargs.args[0] == ('m', repr("m" * 90)) - assert reprfuncargs.args[1] == ('x', '5') - assert reprfuncargs.args[2] == ('y', '13') - assert reprfuncargs.args[3] == ('z', repr("z" * 120)) + assert reprfuncargs.args[0] == ("m", repr("m" * 90)) + assert reprfuncargs.args[1] == ("x", "5") + assert reprfuncargs.args[2] == ("y", "13") + assert reprfuncargs.args[3] == ("z", repr("z" * 120)) p = FormattedExcinfo(funcargs=True) repr_entry = p.repr_traceback_entry(entry) assert repr_entry.reprfuncargs.args == reprfuncargs.args tw = TWMock() repr_entry.toterminal(tw) - assert tw.lines[0] == "m = " + repr('m' * 90) + assert tw.lines[0] == "m = " + repr("m" * 90) assert tw.lines[1] == "x = 5, y = 13" - assert tw.lines[2] == "z = " + repr('z' * 120) + assert tw.lines[2] == "z = " + repr("z" * 120) def test_repr_tracebackentry_lines_var_kw_args(self, importasmod): - mod = importasmod(""" + mod = importasmod( + """ def func1(x, *y, **z): raise ValueError("hello\\nworld") - """) - excinfo = pytest.raises(ValueError, mod.func1, 'a', 'b', c='d') + """ + ) + excinfo = pytest.raises(ValueError, mod.func1, "a", "b", c="d") excinfo.traceback = excinfo.traceback.filter() entry = excinfo.traceback[-1] p = FormattedExcinfo(funcargs=True) reprfuncargs = p.repr_args(entry) - assert reprfuncargs.args[0] == ('x', repr('a')) - assert reprfuncargs.args[1] == ('y', repr(('b',))) - assert reprfuncargs.args[2] == ('z', repr({'c': 'd'})) + assert reprfuncargs.args[0] == ("x", repr("a")) + assert reprfuncargs.args[1] == ("y", repr(("b",))) + assert reprfuncargs.args[2] == ("z", repr({"c": "d"})) p = FormattedExcinfo(funcargs=True) repr_entry = p.repr_traceback_entry(entry) @@ -643,18 +675,20 @@ raise ValueError() assert tw.lines[0] == "x = 'a', y = ('b',), z = {'c': 'd'}" def test_repr_tracebackentry_short(self, importasmod): - mod = importasmod(""" + mod = importasmod( + """ def func1(): raise ValueError("hello") def entry(): func1() - """) + """ + ) excinfo = pytest.raises(ValueError, mod.entry) p = FormattedExcinfo(style="short") reprtb = p.repr_traceback_entry(excinfo.traceback[-2]) lines = reprtb.lines basename = py.path.local(mod.__file__).basename - assert lines[0] == ' func1()' + assert lines[0] == " func1()" assert basename in str(reprtb.reprfileloc.path) assert reprtb.reprfileloc.lineno == 5 @@ -663,17 +697,19 @@ raise ValueError() reprtb = p.repr_traceback_entry(excinfo.traceback[-1], excinfo) lines = reprtb.lines assert lines[0] == ' raise ValueError("hello")' - assert lines[1] == 'E ValueError: hello' + assert lines[1] == "E ValueError: hello" assert basename in str(reprtb.reprfileloc.path) assert reprtb.reprfileloc.lineno == 3 def test_repr_tracebackentry_no(self, importasmod): - mod = importasmod(""" + mod = importasmod( + """ def func1(): raise ValueError("hello") def entry(): func1() - """) + """ + ) excinfo = pytest.raises(ValueError, mod.entry) p = FormattedExcinfo(style="no") p.repr_traceback_entry(excinfo.traceback[-2]) @@ -681,16 +717,18 @@ raise ValueError() p = FormattedExcinfo(style="no") reprentry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo) lines = reprentry.lines - assert lines[0] == 'E ValueError: hello' + assert lines[0] == "E ValueError: hello" assert not lines[1:] def test_repr_traceback_tbfilter(self, importasmod): - mod = importasmod(""" + mod = importasmod( + """ def f(x): raise ValueError(x) def entry(): f(0) - """) + """ + ) excinfo = pytest.raises(ValueError, mod.entry) p = FormattedExcinfo(tbfilter=True) reprtb = p.repr_traceback(excinfo) @@ -700,15 +738,18 @@ raise ValueError() assert len(reprtb.reprentries) == 3 def test_traceback_short_no_source(self, importasmod, monkeypatch): - mod = importasmod(""" + mod = importasmod( + """ def func1(): raise ValueError("hello") def entry(): func1() - """) + """ + ) excinfo = pytest.raises(ValueError, mod.entry) from _pytest._code.code import Code - monkeypatch.setattr(Code, 'path', 'bogus') + + monkeypatch.setattr(Code, "path", "bogus") excinfo.traceback[0].frame.code.path = "bogus" p = FormattedExcinfo(style="short") reprtb = p.repr_traceback_entry(excinfo.traceback[-2]) @@ -717,18 +758,20 @@ raise ValueError() last_reprtb = last_p.repr_traceback_entry(excinfo.traceback[-1], excinfo) last_lines = last_reprtb.lines monkeypatch.undo() - assert lines[0] == ' func1()' + assert lines[0] == " func1()" assert last_lines[0] == ' raise ValueError("hello")' - assert last_lines[1] == 'E ValueError: hello' + assert last_lines[1] == "E ValueError: hello" def test_repr_traceback_and_excinfo(self, importasmod): - mod = importasmod(""" + mod = importasmod( + """ def f(x): raise ValueError(x) def entry(): f(0) - """) + """ + ) excinfo = pytest.raises(ValueError, mod.entry) for style in ("long", "short"): @@ -747,12 +790,14 @@ raise ValueError() assert repr.reprcrash.message == "ValueError: 0" def test_repr_traceback_with_invalid_cwd(self, importasmod, monkeypatch): - mod = importasmod(""" + mod = importasmod( + """ def f(x): raise ValueError(x) def entry(): f(0) - """) + """ + ) excinfo = pytest.raises(ValueError, mod.entry) p = FormattedExcinfo() @@ -760,15 +805,17 @@ raise ValueError() def raiseos(): raise OSError(2) - monkeypatch.setattr(os, 'getcwd', raiseos) + monkeypatch.setattr(os, "getcwd", raiseos) assert p._makepath(__file__) == __file__ p.repr_traceback(excinfo) def test_repr_excinfo_addouterr(self, importasmod): - mod = importasmod(""" + mod = importasmod( + """ def entry(): raise ValueError() - """) + """ + ) excinfo = pytest.raises(ValueError, mod.entry) repr = excinfo.getrepr() repr.addsection("title", "content") @@ -778,10 +825,12 @@ raise ValueError() assert twmock.lines[-2] == ("-", "title") def test_repr_excinfo_reprcrash(self, importasmod): - mod = importasmod(""" + mod = importasmod( + """ def entry(): raise ValueError() - """) + """ + ) excinfo = pytest.raises(ValueError, mod.entry) repr = excinfo.getrepr() assert repr.reprcrash.path.endswith("mod.py") @@ -790,14 +839,16 @@ raise ValueError() assert str(repr.reprcrash).endswith("mod.py:3: ValueError") def test_repr_traceback_recursion(self, importasmod): - mod = importasmod(""" + mod = importasmod( + """ def rec2(x): return rec1(x+1) def rec1(x): return rec2(x-1) def entry(): rec1(42) - """) + """ + ) excinfo = pytest.raises(RuntimeError, mod.entry) for style in ("short", "long", "no"): @@ -807,12 +858,14 @@ raise ValueError() assert str(reprtb) def test_reprexcinfo_getrepr(self, importasmod): - mod = importasmod(""" + mod = importasmod( + """ def f(x): raise ValueError(x) def entry(): f(0) - """) + """ + ) excinfo = pytest.raises(ValueError, mod.entry) for style in ("short", "long", "no"): @@ -830,6 +883,7 @@ raise ValueError() from _pytest._code.code import TerminalRepr class MyRepr(TerminalRepr): + def toterminal(self, tw): tw.line(py.builtin._totext("я", "utf-8")) @@ -837,12 +891,14 @@ raise ValueError() assert x == py.builtin._totext("я", "utf-8") def test_toterminal_long(self, importasmod): - mod = importasmod(""" + mod = importasmod( + """ def g(x): raise ValueError(x) def f(): g(3) - """) + """ + ) excinfo = pytest.raises(ValueError, mod.f) excinfo.traceback = excinfo.traceback.filter() repr = excinfo.getrepr() @@ -867,14 +923,16 @@ raise ValueError() assert tw.lines[12] == ":3: ValueError" def test_toterminal_long_missing_source(self, importasmod, tmpdir): - mod = importasmod(""" + mod = importasmod( + """ def g(x): raise ValueError(x) def f(): g(3) - """) + """ + ) excinfo = pytest.raises(ValueError, mod.f) - tmpdir.join('mod.py').remove() + tmpdir.join("mod.py").remove() excinfo.traceback = excinfo.traceback.filter() repr = excinfo.getrepr() tw = TWMock() @@ -896,14 +954,16 @@ raise ValueError() assert tw.lines[10] == ":3: ValueError" def test_toterminal_long_incomplete_source(self, importasmod, tmpdir): - mod = importasmod(""" + mod = importasmod( + """ def g(x): raise ValueError(x) def f(): g(3) - """) + """ + ) excinfo = pytest.raises(ValueError, mod.f) - tmpdir.join('mod.py').write('asdf') + tmpdir.join("mod.py").write("asdf") excinfo.traceback = excinfo.traceback.filter() repr = excinfo.getrepr() tw = TWMock() @@ -925,10 +985,12 @@ raise ValueError() assert tw.lines[10] == ":3: ValueError" def test_toterminal_long_filenames(self, importasmod): - mod = importasmod(""" + mod = importasmod( + """ def f(): raise ValueError() - """) + """ + ) excinfo = pytest.raises(ValueError, mod.f) tw = TWMock() path = py.path.local(mod.__file__) @@ -951,20 +1013,30 @@ raise ValueError() finally: old.chdir() - @pytest.mark.parametrize('reproptions', [ - {'style': style, 'showlocals': showlocals, - 'funcargs': funcargs, 'tbfilter': tbfilter - } for style in ("long", "short", "no") - for showlocals in (True, False) - for tbfilter in (True, False) - for funcargs in (True, False)]) + @pytest.mark.parametrize( + "reproptions", + [ + { + "style": style, + "showlocals": showlocals, + "funcargs": funcargs, + "tbfilter": tbfilter, + } + for style in ("long", "short", "no") + for showlocals in (True, False) + for tbfilter in (True, False) + for funcargs in (True, False) + ], + ) def test_format_excinfo(self, importasmod, reproptions): - mod = importasmod(""" + mod = importasmod( + """ def g(x): raise ValueError(x) def f(): g(3) - """) + """ + ) excinfo = pytest.raises(ValueError, mod.f) tw = py.io.TerminalWriter(stringio=True) repr = excinfo.getrepr(**reproptions) @@ -972,7 +1044,8 @@ raise ValueError() assert tw.stringio.getvalue() def test_traceback_repr_style(self, importasmod): - mod = importasmod(""" + mod = importasmod( + """ def f(): g() def g(): @@ -981,7 +1054,8 @@ raise ValueError() i() def i(): raise ValueError() - """) + """ + ) excinfo = pytest.raises(ValueError, mod.f) excinfo.traceback = excinfo.traceback.filter() excinfo.traceback[1].set_repr_style("short") @@ -1017,7 +1091,8 @@ raise ValueError() @pytest.mark.skipif("sys.version_info[0] < 3") def test_exc_chain_repr(self, importasmod): - mod = importasmod(""" + mod = importasmod( + """ class Err(Exception): pass def f(): @@ -1032,7 +1107,8 @@ raise ValueError() def h(): raise AttributeError() - """) + """ + ) excinfo = pytest.raises(AttributeError, mod.f) r = excinfo.getrepr(style="long") tw = TWMock() @@ -1045,8 +1121,8 @@ raise ValueError() assert tw.lines[3] == "> g()" assert tw.lines[4] == "" line = tw.get_write_msg(5) - assert line.endswith('mod.py') - assert tw.lines[6] == ':6: ' + assert line.endswith("mod.py") + assert tw.lines[6] == ":6: " assert tw.lines[7] == ("_ ", None) assert tw.lines[8] == "" assert tw.lines[9] == " def g():" @@ -1054,10 +1130,13 @@ raise ValueError() assert tw.lines[11] == "E ValueError" assert tw.lines[12] == "" line = tw.get_write_msg(13) - assert line.endswith('mod.py') - assert tw.lines[14] == ':12: ValueError' + assert line.endswith("mod.py") + assert tw.lines[14] == ":12: ValueError" assert tw.lines[15] == "" - assert tw.lines[16] == "The above exception was the direct cause of the following exception:" + assert ( + tw.lines[16] + == "The above exception was the direct cause of the following exception:" + ) assert tw.lines[17] == "" assert tw.lines[18] == " def f():" assert tw.lines[19] == " try:" @@ -1067,10 +1146,13 @@ raise ValueError() assert tw.lines[23] == "E test_exc_chain_repr0.mod.Err" assert tw.lines[24] == "" line = tw.get_write_msg(25) - assert line.endswith('mod.py') + assert line.endswith("mod.py") assert tw.lines[26] == ":8: Err" assert tw.lines[27] == "" - assert tw.lines[28] == "During handling of the above exception, another exception occurred:" + assert ( + tw.lines[28] + == "During handling of the above exception, another exception occurred:" + ) assert tw.lines[29] == "" assert tw.lines[30] == " def f():" assert tw.lines[31] == " try:" @@ -1081,21 +1163,22 @@ raise ValueError() assert tw.lines[36] == "> h()" assert tw.lines[37] == "" line = tw.get_write_msg(38) - assert line.endswith('mod.py') + assert line.endswith("mod.py") assert tw.lines[39] == ":10: " - assert tw.lines[40] == ('_ ', None) + assert tw.lines[40] == ("_ ", None) assert tw.lines[41] == "" assert tw.lines[42] == " def h():" assert tw.lines[43] == "> raise AttributeError()" assert tw.lines[44] == "E AttributeError" assert tw.lines[45] == "" line = tw.get_write_msg(46) - assert line.endswith('mod.py') + assert line.endswith("mod.py") assert tw.lines[47] == ":15: AttributeError" @pytest.mark.skipif("sys.version_info[0] < 3") def test_exc_repr_with_raise_from_none_chain_suppression(self, importasmod): - mod = importasmod(""" + mod = importasmod( + """ def f(): try: g() @@ -1103,7 +1186,8 @@ raise ValueError() raise AttributeError() from None def g(): raise ValueError() - """) + """ + ) excinfo = pytest.raises(AttributeError, mod.f) r = excinfo.getrepr(style="long") tw = TWMock() @@ -1119,15 +1203,24 @@ raise ValueError() assert tw.lines[6] == "E AttributeError" assert tw.lines[7] == "" line = tw.get_write_msg(8) - assert line.endswith('mod.py') + assert line.endswith("mod.py") assert tw.lines[9] == ":6: AttributeError" assert len(tw.lines) == 10 @pytest.mark.skipif("sys.version_info[0] < 3") - @pytest.mark.parametrize('reason, description', [ - ('cause', 'The above exception was the direct cause of the following exception:'), - ('context', 'During handling of the above exception, another exception occurred:'), - ]) + @pytest.mark.parametrize( + "reason, description", + [ + ( + "cause", + "The above exception was the direct cause of the following exception:", + ), + ( + "context", + "During handling of the above exception, another exception occurred:", + ), + ], + ) def test_exc_chain_repr_without_traceback(self, importasmod, reason, description): """ Handle representation of exception chains where one of the exceptions doesn't have a @@ -1135,8 +1228,10 @@ raise ValueError() module (#1984). """ from _pytest.pytester import LineMatcher - exc_handling_code = ' from e' if reason == 'cause' else '' - mod = importasmod(""" + + exc_handling_code = " from e" if reason == "cause" else "" + mod = importasmod( + """ def f(): try: g() @@ -1144,13 +1239,16 @@ raise ValueError() raise RuntimeError('runtime problem'){exc_handling_code} def g(): raise ValueError('invalid value') - """.format(exc_handling_code=exc_handling_code)) + """.format( + exc_handling_code=exc_handling_code + ) + ) with pytest.raises(RuntimeError) as excinfo: mod.f() # emulate the issue described in #1984 - attr = '__%s__' % reason + attr = "__%s__" % reason getattr(excinfo.value, attr).__traceback__ = None r = excinfo.getrepr() @@ -1159,19 +1257,21 @@ raise ValueError() r.toterminal(tw) matcher = LineMatcher(tw.stringio.getvalue().splitlines()) - matcher.fnmatch_lines([ - "ValueError: invalid value", - description, - "* except Exception as e:", - "> * raise RuntimeError('runtime problem')" + exc_handling_code, - "E *RuntimeError: runtime problem", - ]) + matcher.fnmatch_lines( + [ + "ValueError: invalid value", + description, + "* except Exception as e:", + "> * raise RuntimeError('runtime problem')" + exc_handling_code, + "E *RuntimeError: runtime problem", + ] + ) @pytest.mark.parametrize("style", ["short", "long"]) @pytest.mark.parametrize("encoding", [None, "utf8", "utf16"]) def test_repr_traceback_with_unicode(style, encoding): - msg = u'☹' + msg = u"☹" if encoding is not None: msg = msg.encode(encoding) try: @@ -1184,15 +1284,17 @@ def test_repr_traceback_with_unicode(style, encoding): def test_cwd_deleted(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test(tmpdir): tmpdir.chdir() tmpdir.remove() assert False - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(['* 1 failed in *']) - assert 'INTERNALERROR' not in result.stdout.str() + result.stderr.str() + result.stdout.fnmatch_lines(["* 1 failed in *"]) + assert "INTERNALERROR" not in result.stdout.str() + result.stderr.str() def test_exception_repr_extraction_error_on_recursion(): @@ -1200,12 +1302,15 @@ def test_exception_repr_extraction_error_on_recursion(): Ensure we can properly detect a recursion error even if some locals raise error on comparison (#2459). """ + class numpy_like(object): def __eq__(self, other): if type(other) is numpy_like: - raise ValueError('The truth value of an array ' - 'with more than one element is ambiguous.') + raise ValueError( + "The truth value of an array " + "with more than one element is ambiguous." + ) def a(x): return b(numpy_like()) @@ -1218,14 +1323,17 @@ def test_exception_repr_extraction_error_on_recursion(): except: # noqa from _pytest._code.code import ExceptionInfo from _pytest.pytester import LineMatcher + exc_info = ExceptionInfo() matcher = LineMatcher(str(exc_info.getrepr()).splitlines()) - matcher.fnmatch_lines([ - '!!! Recursion error detected, but an error occurred locating the origin of recursion.', - '*The following exception happened*', - '*ValueError: The truth value of an array*', - ]) + matcher.fnmatch_lines( + [ + "!!! Recursion error detected, but an error occurred locating the origin of recursion.", + "*The following exception happened*", + "*ValueError: The truth value of an array*", + ] + ) def test_no_recursion_index_on_recursion_error(): @@ -1234,14 +1342,17 @@ def test_no_recursion_index_on_recursion_error(): during a recursion error (#2486). """ try: + class RecursionDepthError(object): + def __getattr__(self, attr): - return getattr(self, '_' + attr) + return getattr(self, "_" + attr) RecursionDepthError().trigger except: # noqa from _pytest._code.code import ExceptionInfo + exc_info = ExceptionInfo() - assert 'maximum recursion' in str(exc_info.getrepr()) + assert "maximum recursion" in str(exc_info.getrepr()) else: assert 0 diff --git a/testing/code/test_source.py b/testing/code/test_source.py index ee731ed4f..56dad7567 100644 --- a/testing/code/test_source.py +++ b/testing/code/test_source.py @@ -23,14 +23,20 @@ def test_source_str_function(): x = Source(" 3") assert str(x) == "3" - x = Source(""" + x = Source( + """ 3 - """, rstrip=False) + """, + rstrip=False, + ) assert str(x) == "\n3\n " - x = Source(""" + x = Source( + """ 3 - """, rstrip=True) + """, + rstrip=True, + ) assert str(x) == "\n3" @@ -41,70 +47,81 @@ def test_unicode(): return x = Source(unicode("4")) assert str(x) == "4" - co = _pytest._code.compile(unicode('u"\xc3\xa5"', 'utf8'), mode='eval') + co = _pytest._code.compile(unicode('u"\xc3\xa5"', "utf8"), mode="eval") val = eval(co) assert isinstance(val, unicode) def test_source_from_function(): source = _pytest._code.Source(test_source_str_function) - assert str(source).startswith('def test_source_str_function():') + assert str(source).startswith("def test_source_str_function():") def test_source_from_method(): + class TestClass(object): + def test_method(self): pass + source = _pytest._code.Source(TestClass().test_method) - assert source.lines == ["def test_method(self):", - " pass"] + assert source.lines == ["def test_method(self):", " pass"] def test_source_from_lines(): lines = ["a \n", "b\n", "c"] source = _pytest._code.Source(lines) - assert source.lines == ['a ', 'b', 'c'] + assert source.lines == ["a ", "b", "c"] def test_source_from_inner_function(): + def f(): pass + source = _pytest._code.Source(f, deindent=False) - assert str(source).startswith(' def f():') + assert str(source).startswith(" def f():") source = _pytest._code.Source(f) - assert str(source).startswith('def f():') + assert str(source).startswith("def f():") def test_source_putaround_simple(): source = Source("raise ValueError") source = source.putaround( - "try:", """\ + "try:", + """\ except ValueError: x = 42 else: - x = 23""") - assert str(source) == """\ + x = 23""", + ) + assert ( + str(source) + == """\ try: raise ValueError except ValueError: x = 42 else: x = 23""" + ) def test_source_putaround(): source = Source() - source = source.putaround(""" + source = source.putaround( + """ if 1: x=1 - """) + """ + ) assert str(source).strip() == "if 1:\n x=1" def test_source_strips(): source = Source("") assert source == Source() - assert str(source) == '' + assert str(source) == "" assert source.strip() == source @@ -116,10 +133,10 @@ def test_source_strip_multiline(): def test_syntaxerror_rerepresentation(): - ex = pytest.raises(SyntaxError, _pytest._code.compile, 'xyz xyz') + ex = pytest.raises(SyntaxError, _pytest._code.compile, "xyz xyz") assert ex.value.lineno == 1 assert ex.value.offset in (4, 7) # XXX pypy/jython versus cpython? - assert ex.value.text.strip(), 'x x' + assert ex.value.text.strip(), "x x" def test_isparseable(): @@ -132,12 +149,14 @@ def test_isparseable(): class TestAccesses(object): - source = Source("""\ + source = Source( + """\ def f(x): pass def g(x): pass - """) + """ + ) def test_getrange(self): x = self.source[0:2] @@ -158,18 +177,20 @@ class TestAccesses(object): class TestSourceParsingAndCompiling(object): - source = Source("""\ + source = Source( + """\ def f(x): assert (x == 3 + 4) - """).strip() + """ + ).strip() def test_compile(self): co = _pytest._code.compile("x=3") d = {} exec(co, d) - assert d['x'] == 3 + assert d["x"] == 3 def test_compile_and_getsource_simple(self): co = _pytest._code.compile("x=3") @@ -178,20 +199,26 @@ class TestSourceParsingAndCompiling(object): assert str(source) == "x=3" def test_compile_and_getsource_through_same_function(self): + def gensource(source): return _pytest._code.compile(source) - co1 = gensource(""" + + co1 = gensource( + """ def f(): raise KeyError() - """) - co2 = gensource(""" + """ + ) + co2 = gensource( + """ def f(): raise ValueError() - """) + """ + ) source1 = inspect.getsource(co1) - assert 'KeyError' in source1 + assert "KeyError" in source1 source2 = inspect.getsource(co2) - assert 'ValueError' in source2 + assert "ValueError" in source2 def test_getstatement(self): # print str(self.source) @@ -199,13 +226,15 @@ class TestSourceParsingAndCompiling(object): for i in range(1, 4): # print "trying start in line %r" % self.source[i] s = self.source.getstatement(i) - #x = s.deindent() + # x = s.deindent() assert str(s) == ass def test_getstatementrange_triple_quoted(self): # print str(self.source) - source = Source("""hello(''' - ''')""") + source = Source( + """hello(''' + ''')""" + ) s = source.getstatement(0) assert s == str(source) s = source.getstatement(1) @@ -213,7 +242,8 @@ class TestSourceParsingAndCompiling(object): @astonly def test_getstatementrange_within_constructs(self): - source = Source("""\ + source = Source( + """\ try: try: raise ValueError @@ -221,7 +251,8 @@ class TestSourceParsingAndCompiling(object): pass finally: 42 - """) + """ + ) assert len(source) == 7 # check all lineno's that could occur in a traceback # assert source.getstatementrange(0) == (0, 7) @@ -233,19 +264,22 @@ class TestSourceParsingAndCompiling(object): assert source.getstatementrange(6) == (6, 7) def test_getstatementrange_bug(self): - source = Source("""\ + source = Source( + """\ try: x = ( y + z) except: pass - """) + """ + ) assert len(source) == 6 assert source.getstatementrange(2) == (1, 4) def test_getstatementrange_bug2(self): - source = Source("""\ + source = Source( + """\ assert ( 33 == @@ -255,19 +289,22 @@ class TestSourceParsingAndCompiling(object): ), ] ) - """) + """ + ) assert len(source) == 9 assert source.getstatementrange(5) == (0, 9) def test_getstatementrange_ast_issue58(self): - source = Source("""\ + source = Source( + """\ def test_some(): for a in [a for a in CAUSE_ERROR]: pass x = 3 - """) + """ + ) assert getstatement(2, source).lines == source.lines[2:3] assert getstatement(3, source).lines == source.lines[3:4] @@ -282,6 +319,7 @@ class TestSourceParsingAndCompiling(object): def test_compile_to_ast(self): import ast + source = Source("x = 4") mod = source.compile(flag=ast.PyCF_ONLY_AST) assert isinstance(mod, ast.Module) @@ -295,16 +333,17 @@ class TestSourceParsingAndCompiling(object): frame = excinfo.traceback[-1].frame stmt = frame.code.fullsource.getstatement(frame.lineno) # print "block", str(block) - assert str(stmt).strip().startswith('assert') + assert str(stmt).strip().startswith("assert") - @pytest.mark.parametrize('name', ['', None, 'my']) + @pytest.mark.parametrize("name", ["", None, "my"]) def test_compilefuncs_and_path_sanity(self, name): + def check(comp, name): co = comp(self.source, name) if not name: - expected = "codegen %s:%d>" % (mypath, mylineno + 2 + 2) + expected = "codegen %s:%d>" % (mypath, mylineno + 2 + 3) else: - expected = "codegen %r %s:%d>" % (name, mypath, mylineno + 2 + 2) + expected = "codegen %r %s:%d>" % (name, mypath, mylineno + 2 + 3) fn = co.co_filename assert fn.endswith(expected) @@ -316,33 +355,41 @@ class TestSourceParsingAndCompiling(object): check(comp, name) def test_offsetless_synerr(self): - pytest.raises(SyntaxError, _pytest._code.compile, "lambda a,a: 0", mode='eval') + pytest.raises(SyntaxError, _pytest._code.compile, "lambda a,a: 0", mode="eval") def test_getstartingblock_singleline(): + class A(object): + def __init__(self, *args): frame = sys._getframe(1) self.source = _pytest._code.Frame(frame).statement - x = A('x', 'y') + x = A("x", "y") values = [i for i in x.source.lines if i.strip()] assert len(values) == 1 def test_getline_finally(): - def c(): pass - excinfo = pytest.raises(TypeError, """ + + def c(): + pass + + excinfo = pytest.raises( + TypeError, + """ teardown = None try: c(1) finally: if teardown: teardown() - """) + """, + ) source = excinfo.traceback[-1].statement - assert str(source).strip() == 'c(1)' + assert str(source).strip() == "c(1)" def test_getfuncsource_dynamic(): @@ -354,28 +401,35 @@ def test_getfuncsource_dynamic(): """ co = _pytest._code.compile(source) py.builtin.exec_(co, globals()) - assert str(_pytest._code.Source(f)).strip() == 'def f():\n raise ValueError' - assert str(_pytest._code.Source(g)).strip() == 'def g(): pass' + assert str(_pytest._code.Source(f)).strip() == "def f():\n raise ValueError" + assert str(_pytest._code.Source(g)).strip() == "def g(): pass" def test_getfuncsource_with_multine_string(): + def f(): - c = '''while True: + c = """while True: pass -''' - assert str(_pytest._code.Source(f)).strip() == "def f():\n c = '''while True:\n pass\n'''" +""" + + assert ( + str(_pytest._code.Source(f)).strip() + == 'def f():\n c = """while True:\n pass\n"""' + ) def test_deindent(): from _pytest._code.source import deindent as deindent - assert deindent(['\tfoo', '\tbar', ]) == ['foo', 'bar'] + + assert deindent(["\tfoo", "\tbar"]) == ["foo", "bar"] def f(): - c = '''while True: + c = """while True: pass -''' +""" + lines = deindent(inspect.getsource(f).splitlines()) - assert lines == ["def f():", " c = '''while True:", " pass", "'''"] + assert lines == ["def f():", ' c = """while True:', " pass", '"""'] source = """ def f(): @@ -383,17 +437,19 @@ def test_deindent(): pass """ lines = deindent(source.splitlines()) - assert lines == ['', 'def f():', ' def g():', ' pass', ' '] + assert lines == ["", "def f():", " def g():", " pass", " "] def test_source_of_class_at_eof_without_newline(tmpdir): # this test fails because the implicit inspect.getsource(A) below # does not return the "x = 1" last line. - source = _pytest._code.Source(''' + source = _pytest._code.Source( + """ class A(object): def method(self): x = 1 - ''') + """ + ) path = tmpdir.join("a.py") path.write(source) s2 = _pytest._code.Source(tmpdir.join("a.py").pyimport().A) @@ -401,12 +457,14 @@ def test_source_of_class_at_eof_without_newline(tmpdir): if True: + def x(): pass def test_getsource_fallback(): from _pytest._code.source import getsource + expected = """def x(): pass""" src = getsource(x) @@ -415,6 +473,7 @@ def test_getsource_fallback(): def test_idem_compile_and_getsource(): from _pytest._code.source import getsource + expected = "def x(): pass" co = _pytest._code.compile(expected) src = getsource(co) @@ -423,25 +482,29 @@ def test_idem_compile_and_getsource(): def test_findsource_fallback(): from _pytest._code.source import findsource + src, lineno = findsource(x) - assert 'test_findsource_simple' in str(src) - assert src[lineno] == ' def x():' + assert "test_findsource_simple" in str(src) + assert src[lineno] == " def x():" def test_findsource(): from _pytest._code.source import findsource - co = _pytest._code.compile("""if 1: + + co = _pytest._code.compile( + """if 1: def x(): pass -""") +""" + ) src, lineno = findsource(co) - assert 'if 1:' in str(src) + assert "if 1:" in str(src) d = {} eval(co, d) - src, lineno = findsource(d['x']) - assert 'if 1:' in str(src) + src, lineno = findsource(d["x"]) + assert "if 1:" in str(src) assert src[lineno] == " def x():" @@ -469,30 +532,37 @@ def test_getfslineno(): class B(object): pass + B.__name__ = "B2" assert getfslineno(B)[1] == -1 def test_code_of_object_instance_with_call(): + class A(object): pass + pytest.raises(TypeError, lambda: _pytest._code.Source(A())) class WithCall(object): + def __call__(self): pass code = _pytest._code.Code(WithCall()) - assert 'pass' in str(code.source()) + assert "pass" in str(code.source()) class Hello(object): + def __call__(self): pass + pytest.raises(TypeError, lambda: _pytest._code.Code(Hello)) def getstatement(lineno, source): from _pytest._code.source import getstatementrange_ast + source = _pytest._code.Source(source, deindent=False) ast, start, end = getstatementrange_ast(lineno, source) return source[start:end] @@ -505,9 +575,14 @@ def test_oneline(): def test_comment_and_no_newline_at_end(): from _pytest._code.source import getstatementrange_ast - source = Source(['def test_basic_complex():', - ' assert 1 == 2', - '# vim: filetype=pyopencl:fdm=marker']) + + source = Source( + [ + "def test_basic_complex():", + " assert 1 == 2", + "# vim: filetype=pyopencl:fdm=marker", + ] + ) ast, start, end = getstatementrange_ast(1, source) assert end == 2 @@ -517,8 +592,7 @@ def test_oneline_and_comment(): assert str(source) == "raise ValueError" -@pytest.mark.xfail(hasattr(sys, "pypy_version_info"), - reason='does not work on pypy') +@pytest.mark.xfail(hasattr(sys, "pypy_version_info"), reason="does not work on pypy") def test_comments(): source = '''def test(): "comment 1" @@ -533,20 +607,22 @@ comment 4 """ ''' for line in range(2, 6): - assert str(getstatement(line, source)) == ' x = 1' + assert str(getstatement(line, source)) == " x = 1" for line in range(6, 10): - assert str(getstatement(line, source)) == ' assert False' + assert str(getstatement(line, source)) == " assert False" assert str(getstatement(10, source)) == '"""' def test_comment_in_statement(): - source = '''test(foo=1, + source = """test(foo=1, # comment 1 bar=2) -''' +""" for line in range(1, 3): - assert str(getstatement(line, source)) == \ - 'test(foo=1,\n # comment 1\n bar=2)' + assert ( + str(getstatement(line, source)) + == "test(foo=1,\n # comment 1\n bar=2)" + ) def test_single_line_else(): @@ -560,19 +636,24 @@ def test_single_line_finally(): def test_issue55(): - source = ('def round_trip(dinp):\n assert 1 == dinp\n' - 'def test_rt():\n round_trip("""\n""")\n') + source = ( + "def round_trip(dinp):\n assert 1 == dinp\n" + 'def test_rt():\n round_trip("""\n""")\n' + ) s = getstatement(3, source) assert str(s) == ' round_trip("""\n""")' def XXXtest_multiline(): - source = getstatement(0, """\ + source = getstatement( + 0, + """\ raise ValueError( 23 ) x = 3 -""") +""", + ) assert str(source) == "raise ValueError(\n 23\n)" diff --git a/testing/code/test_source_multiline_block.py b/testing/code/test_source_multiline_block.py index b356d191f..92f7412eb 100644 --- a/testing/code/test_source_multiline_block.py +++ b/testing/code/test_source_multiline_block.py @@ -12,15 +12,18 @@ def test_getstartingblock_multiline(): see hhatto/autopep8#307). It was considered better to just move this single test to its own file and exclude it from autopep8 than try to complicate things. """ + class A(object): + def __init__(self, *args): frame = sys._getframe(1) self.source = _pytest._code.Frame(frame).statement + # fmt: off x = A('x', 'y' , 'z') - + # fmt: on values = [i for i in x.source.lines if i.strip()] assert len(values) == 4 diff --git a/testing/deprecated_test.py b/testing/deprecated_test.py index cb66472c9..39ff1f1fc 100644 --- a/testing/deprecated_test.py +++ b/testing/deprecated_test.py @@ -3,7 +3,8 @@ import pytest def test_yield_tests_deprecation(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def func1(arg, arg2): assert arg == arg2 def test_gen(): @@ -12,101 +13,129 @@ def test_yield_tests_deprecation(testdir): def test_gen2(): for k in range(10): yield func1, 1, 1 - """) - result = testdir.runpytest('-ra') - result.stdout.fnmatch_lines([ - '*yield tests are deprecated, and scheduled to be removed in pytest 4.0*', - '*2 passed*', - ]) - assert result.stdout.str().count('yield tests are deprecated') == 2 + """ + ) + result = testdir.runpytest("-ra") + result.stdout.fnmatch_lines( + [ + "*yield tests are deprecated, and scheduled to be removed in pytest 4.0*", + "*2 passed*", + ] + ) + assert result.stdout.str().count("yield tests are deprecated") == 2 def test_funcarg_prefix_deprecation(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def pytest_funcarg__value(): return 10 def test_funcarg_prefix(value): assert value == 10 - """) - result = testdir.runpytest('-ra') - result.stdout.fnmatch_lines([ - ('*pytest_funcarg__value: ' - 'declaring fixtures using "pytest_funcarg__" prefix is deprecated ' - 'and scheduled to be removed in pytest 4.0. ' - 'Please remove the prefix and use the @pytest.fixture decorator instead.'), - '*1 passed*', - ]) + """ + ) + result = testdir.runpytest("-ra") + result.stdout.fnmatch_lines( + [ + ( + "*pytest_funcarg__value: " + 'declaring fixtures using "pytest_funcarg__" prefix is deprecated ' + "and scheduled to be removed in pytest 4.0. " + "Please remove the prefix and use the @pytest.fixture decorator instead." + ), + "*1 passed*", + ] + ) def test_pytest_setup_cfg_deprecated(testdir): - testdir.makefile('.cfg', setup=''' + testdir.makefile( + ".cfg", + setup=""" [pytest] addopts = --verbose - ''') + """, + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(['*pytest*section in setup.cfg files is deprecated*use*tool:pytest*instead*']) + result.stdout.fnmatch_lines( + ["*pytest*section in setup.cfg files is deprecated*use*tool:pytest*instead*"] + ) def test_pytest_custom_cfg_deprecated(testdir): - testdir.makefile('.cfg', custom=''' + testdir.makefile( + ".cfg", + custom=""" [pytest] addopts = --verbose - ''') + """, + ) result = testdir.runpytest("-c", "custom.cfg") - result.stdout.fnmatch_lines(['*pytest*section in custom.cfg files is deprecated*use*tool:pytest*instead*']) + result.stdout.fnmatch_lines( + ["*pytest*section in custom.cfg files is deprecated*use*tool:pytest*instead*"] + ) def test_str_args_deprecated(tmpdir, testdir): """Deprecate passing strings to pytest.main(). Scheduled for removal in pytest-4.0.""" from _pytest.main import EXIT_NOTESTSCOLLECTED + warnings = [] class Collect(object): + def pytest_logwarning(self, message): warnings.append(message) ret = pytest.main("%s -x" % tmpdir, plugins=[Collect()]) - msg = ('passing a string to pytest.main() is deprecated, ' - 'pass a list of arguments instead.') + msg = ( + "passing a string to pytest.main() is deprecated, " + "pass a list of arguments instead." + ) assert msg in warnings assert ret == EXIT_NOTESTSCOLLECTED def test_getfuncargvalue_is_deprecated(request): - pytest.deprecated_call(request.getfuncargvalue, 'tmpdir') + pytest.deprecated_call(request.getfuncargvalue, "tmpdir") def test_resultlog_is_deprecated(testdir): - result = testdir.runpytest('--help') - result.stdout.fnmatch_lines(['*DEPRECATED path for machine-readable result log*']) + result = testdir.runpytest("--help") + result.stdout.fnmatch_lines(["*DEPRECATED path for machine-readable result log*"]) - testdir.makepyfile(''' + testdir.makepyfile( + """ def test(): pass - ''') - result = testdir.runpytest('--result-log=%s' % testdir.tmpdir.join('result.log')) - result.stdout.fnmatch_lines([ - '*--result-log is deprecated and scheduled for removal in pytest 4.0*', - '*See https://docs.pytest.org/*/usage.html#creating-resultlog-format-files for more information*', - ]) + """ + ) + result = testdir.runpytest("--result-log=%s" % testdir.tmpdir.join("result.log")) + result.stdout.fnmatch_lines( + [ + "*--result-log is deprecated and scheduled for removal in pytest 4.0*", + "*See https://docs.pytest.org/*/usage.html#creating-resultlog-format-files for more information*", + ] + ) -@pytest.mark.filterwarnings('always:Metafunc.addcall is deprecated') +@pytest.mark.filterwarnings("always:Metafunc.addcall is deprecated") def test_metafunc_addcall_deprecated(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def pytest_generate_tests(metafunc): metafunc.addcall({'i': 1}) metafunc.addcall({'i': 2}) def test_func(i): pass - """) - res = testdir.runpytest('-s') + """ + ) + res = testdir.runpytest("-s") assert res.ret == 0 - res.stdout.fnmatch_lines([ - "*Metafunc.addcall is deprecated*", - "*2 passed, 2 warnings*", - ]) + res.stdout.fnmatch_lines( + ["*Metafunc.addcall is deprecated*", "*2 passed, 2 warnings*"] + ) def test_terminal_reporter_writer_attr(pytestconfig): @@ -115,89 +144,122 @@ def test_terminal_reporter_writer_attr(pytestconfig): """ try: import xdist # noqa - pytest.skip('xdist workers disable the terminal reporter plugin') + + pytest.skip("xdist workers disable the terminal reporter plugin") except ImportError: pass - terminal_reporter = pytestconfig.pluginmanager.get_plugin('terminalreporter') + terminal_reporter = pytestconfig.pluginmanager.get_plugin("terminalreporter") assert terminal_reporter.writer is terminal_reporter._tw -@pytest.mark.parametrize('plugin', ['catchlog', 'capturelog']) +@pytest.mark.parametrize("plugin", ["catchlog", "capturelog"]) def test_pytest_catchlog_deprecated(testdir, plugin): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_func(pytestconfig): - pytestconfig.pluginmanager.register(None, 'pytest_{0}') - """.format(plugin)) + pytestconfig.pluginmanager.register(None, 'pytest_{}') + """.format( + plugin + ) + ) res = testdir.runpytest() assert res.ret == 0 - res.stdout.fnmatch_lines([ - "*pytest-*log plugin has been merged into the core*", - "*1 passed, 1 warnings*", - ]) + res.stdout.fnmatch_lines( + ["*pytest-*log plugin has been merged into the core*", "*1 passed, 1 warnings*"] + ) def test_pytest_plugins_in_non_top_level_conftest_deprecated(testdir): from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST + subdirectory = testdir.tmpdir.join("subdirectory") subdirectory.mkdir() # create the inner conftest with makeconftest and then move it to the subdirectory - testdir.makeconftest(""" + testdir.makeconftest( + """ pytest_plugins=['capture'] - """) + """ + ) testdir.tmpdir.join("conftest.py").move(subdirectory.join("conftest.py")) # make the top level conftest - testdir.makeconftest(""" + testdir.makeconftest( + """ import warnings warnings.filterwarnings('always', category=DeprecationWarning) - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ def test_func(): pass - """) + """ + ) res = testdir.runpytest_subprocess() assert res.ret == 0 - res.stderr.fnmatch_lines('*' + str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0]) + res.stderr.fnmatch_lines( + "*" + str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0] + ) -def test_pytest_plugins_in_non_top_level_conftest_deprecated_no_top_level_conftest(testdir): +def test_pytest_plugins_in_non_top_level_conftest_deprecated_no_top_level_conftest( + testdir +): from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST - subdirectory = testdir.tmpdir.join('subdirectory') + + subdirectory = testdir.tmpdir.join("subdirectory") subdirectory.mkdir() - testdir.makeconftest(""" + testdir.makeconftest( + """ import warnings warnings.filterwarnings('always', category=DeprecationWarning) pytest_plugins=['capture'] - """) + """ + ) testdir.tmpdir.join("conftest.py").move(subdirectory.join("conftest.py")) - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_func(): pass - """) + """ + ) res = testdir.runpytest_subprocess() assert res.ret == 0 - res.stderr.fnmatch_lines('*' + str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0]) + res.stderr.fnmatch_lines( + "*" + str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0] + ) -def test_pytest_plugins_in_non_top_level_conftest_deprecated_no_false_positives(testdir): +def test_pytest_plugins_in_non_top_level_conftest_deprecated_no_false_positives( + testdir +): from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST - subdirectory = testdir.tmpdir.join('subdirectory') + + subdirectory = testdir.tmpdir.join("subdirectory") subdirectory.mkdir() - testdir.makeconftest(""" + testdir.makeconftest( + """ pass - """) + """ + ) testdir.tmpdir.join("conftest.py").move(subdirectory.join("conftest.py")) - testdir.makeconftest(""" + testdir.makeconftest( + """ import warnings warnings.filterwarnings('always', category=DeprecationWarning) pytest_plugins=['capture'] - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ def test_func(): pass - """) + """ + ) res = testdir.runpytest_subprocess() assert res.ret == 0 - assert str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0] not in res.stderr.str() + assert str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[ + 0 + ] not in res.stderr.str() diff --git a/testing/freeze/.gitignore b/testing/freeze/.gitignore index 490310b6c..b53319087 100644 --- a/testing/freeze/.gitignore +++ b/testing/freeze/.gitignore @@ -1,3 +1,3 @@ build/ dist/ -*.spec \ No newline at end of file +*.spec diff --git a/testing/freeze/create_executable.py b/testing/freeze/create_executable.py index f4f6088ef..98aa2034c 100644 --- a/testing/freeze/create_executable.py +++ b/testing/freeze/create_executable.py @@ -1,12 +1,12 @@ """ Generates an executable with pytest runner embedded using PyInstaller. """ -if __name__ == '__main__': +if __name__ == "__main__": import pytest import subprocess hidden = [] for x in pytest.freeze_includes(): - hidden.extend(['--hidden-import', x]) - args = ['pyinstaller', '--noconfirm'] + hidden + ['runtests_script.py'] - subprocess.check_call(' '.join(args), shell=True) + hidden.extend(["--hidden-import", x]) + args = ["pyinstaller", "--noconfirm"] + hidden + ["runtests_script.py"] + subprocess.check_call(" ".join(args), shell=True) diff --git a/testing/freeze/runtests_script.py b/testing/freeze/runtests_script.py index d281601c0..d03bca840 100644 --- a/testing/freeze/runtests_script.py +++ b/testing/freeze/runtests_script.py @@ -3,7 +3,8 @@ This is the script that is actually frozen into an executable: simply executes py.test main(). """ -if __name__ == '__main__': +if __name__ == "__main__": import sys import pytest + sys.exit(pytest.main()) diff --git a/testing/freeze/tests/test_trivial.py b/testing/freeze/tests/test_trivial.py index 45622b850..08a55552a 100644 --- a/testing/freeze/tests/test_trivial.py +++ b/testing/freeze/tests/test_trivial.py @@ -1,7 +1,6 @@ - def test_upper(): - assert 'foo'.upper() == 'FOO' + assert "foo".upper() == "FOO" def test_lower(): - assert 'FOO'.lower() == 'foo' + assert "FOO".lower() == "foo" diff --git a/testing/freeze/tox_run.py b/testing/freeze/tox_run.py index 3fc388040..678a69c85 100644 --- a/testing/freeze/tox_run.py +++ b/testing/freeze/tox_run.py @@ -2,11 +2,11 @@ Called by tox.ini: uses the generated executable to run the tests in ./tests/ directory. """ -if __name__ == '__main__': +if __name__ == "__main__": import os import sys - executable = os.path.join(os.getcwd(), 'dist', 'runtests_script', 'runtests_script') - if sys.platform.startswith('win'): - executable += '.exe' - sys.exit(os.system('%s tests' % executable)) + executable = os.path.join(os.getcwd(), "dist", "runtests_script", "runtests_script") + if sys.platform.startswith("win"): + executable += ".exe" + sys.exit(os.system("%s tests" % executable)) diff --git a/testing/logging/test_fixture.py b/testing/logging/test_fixture.py index 24576719d..8d9ae6b51 100644 --- a/testing/logging/test_fixture.py +++ b/testing/logging/test_fixture.py @@ -4,32 +4,33 @@ import logging import pytest logger = logging.getLogger(__name__) -sublogger = logging.getLogger(__name__ + '.baz') +sublogger = logging.getLogger(__name__ + ".baz") def test_fixture_help(testdir): - result = testdir.runpytest('--fixtures') - result.stdout.fnmatch_lines(['*caplog*']) + result = testdir.runpytest("--fixtures") + result.stdout.fnmatch_lines(["*caplog*"]) def test_change_level(caplog): caplog.set_level(logging.INFO) - logger.debug('handler DEBUG level') - logger.info('handler INFO level') + logger.debug("handler DEBUG level") + logger.info("handler INFO level") caplog.set_level(logging.CRITICAL, logger=sublogger.name) - sublogger.warning('logger WARNING level') - sublogger.critical('logger CRITICAL level') + sublogger.warning("logger WARNING level") + sublogger.critical("logger CRITICAL level") - assert 'DEBUG' not in caplog.text - assert 'INFO' in caplog.text - assert 'WARNING' not in caplog.text - assert 'CRITICAL' in caplog.text + assert "DEBUG" not in caplog.text + assert "INFO" in caplog.text + assert "WARNING" not in caplog.text + assert "CRITICAL" in caplog.text def test_change_level_undo(testdir): """Ensure that 'set_level' is undone after the end of the test""" - testdir.makepyfile(''' + testdir.makepyfile( + """ import logging def test1(caplog): @@ -42,58 +43,54 @@ def test_change_level_undo(testdir): # using + operator here so fnmatch_lines doesn't match the code in the traceback logging.info('log from ' + 'test2') assert 0 - ''') + """ + ) result = testdir.runpytest_subprocess() - result.stdout.fnmatch_lines([ - '*log from test1*', - '*2 failed in *', - ]) - assert 'log from test2' not in result.stdout.str() + result.stdout.fnmatch_lines(["*log from test1*", "*2 failed in *"]) + assert "log from test2" not in result.stdout.str() def test_with_statement(caplog): with caplog.at_level(logging.INFO): - logger.debug('handler DEBUG level') - logger.info('handler INFO level') + logger.debug("handler DEBUG level") + logger.info("handler INFO level") with caplog.at_level(logging.CRITICAL, logger=sublogger.name): - sublogger.warning('logger WARNING level') - sublogger.critical('logger CRITICAL level') + sublogger.warning("logger WARNING level") + sublogger.critical("logger CRITICAL level") - assert 'DEBUG' not in caplog.text - assert 'INFO' in caplog.text - assert 'WARNING' not in caplog.text - assert 'CRITICAL' in caplog.text + assert "DEBUG" not in caplog.text + assert "INFO" in caplog.text + assert "WARNING" not in caplog.text + assert "CRITICAL" in caplog.text def test_log_access(caplog): caplog.set_level(logging.INFO) - logger.info('boo %s', 'arg') - assert caplog.records[0].levelname == 'INFO' - assert caplog.records[0].msg == 'boo %s' - assert 'boo arg' in caplog.text + logger.info("boo %s", "arg") + assert caplog.records[0].levelname == "INFO" + assert caplog.records[0].msg == "boo %s" + assert "boo arg" in caplog.text def test_record_tuples(caplog): caplog.set_level(logging.INFO) - logger.info('boo %s', 'arg') + logger.info("boo %s", "arg") - assert caplog.record_tuples == [ - (__name__, logging.INFO, 'boo arg'), - ] + assert caplog.record_tuples == [(__name__, logging.INFO, "boo arg")] def test_unicode(caplog): caplog.set_level(logging.INFO) - logger.info(u'bū') - assert caplog.records[0].levelname == 'INFO' - assert caplog.records[0].msg == u'bū' - assert u'bū' in caplog.text + logger.info(u"bū") + assert caplog.records[0].levelname == "INFO" + assert caplog.records[0].msg == u"bū" + assert u"bū" in caplog.text def test_clear(caplog): caplog.set_level(logging.INFO) - logger.info(u'bū') + logger.info(u"bū") assert len(caplog.records) assert caplog.text caplog.clear() @@ -103,20 +100,20 @@ def test_clear(caplog): @pytest.fixture def logging_during_setup_and_teardown(caplog): - caplog.set_level('INFO') - logger.info('a_setup_log') + caplog.set_level("INFO") + logger.info("a_setup_log") yield - logger.info('a_teardown_log') - assert [x.message for x in caplog.get_records('teardown')] == ['a_teardown_log'] + logger.info("a_teardown_log") + assert [x.message for x in caplog.get_records("teardown")] == ["a_teardown_log"] def test_caplog_captures_for_all_stages(caplog, logging_during_setup_and_teardown): assert not caplog.records - assert not caplog.get_records('call') - logger.info('a_call_log') - assert [x.message for x in caplog.get_records('call')] == ['a_call_log'] + assert not caplog.get_records("call") + logger.info("a_call_log") + assert [x.message for x in caplog.get_records("call")] == ["a_call_log"] - assert [x.message for x in caplog.get_records('setup')] == ['a_setup_log'] + assert [x.message for x in caplog.get_records("setup")] == ["a_setup_log"] # This reachers into private API, don't use this type of thing in real tests! - assert set(caplog._item.catch_log_handlers.keys()) == {'setup', 'call'} + assert set(caplog._item.catch_log_handlers.keys()) == {"setup", "call"} diff --git a/testing/logging/test_formatter.py b/testing/logging/test_formatter.py index 10a921470..ca2a41065 100644 --- a/testing/logging/test_formatter.py +++ b/testing/logging/test_formatter.py @@ -5,13 +5,20 @@ from _pytest.logging import ColoredLevelFormatter def test_coloredlogformatter(): - logfmt = '%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s' + logfmt = "%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s" record = logging.LogRecord( - name='dummy', level=logging.INFO, pathname='dummypath', lineno=10, - msg='Test Message', args=(), exc_info=False) + name="dummy", + level=logging.INFO, + pathname="dummypath", + lineno=10, + msg="Test Message", + args=(), + exc_info=False, + ) class ColorConfig(object): + class option(object): pass @@ -19,11 +26,12 @@ def test_coloredlogformatter(): tw.hasmarkup = True formatter = ColoredLevelFormatter(tw, logfmt) output = formatter.format(record) - assert output == ('dummypath 10 ' - '\x1b[32mINFO \x1b[0m Test Message') + assert ( + output + == ("dummypath 10 " "\x1b[32mINFO \x1b[0m Test Message") + ) tw.hasmarkup = False formatter = ColoredLevelFormatter(tw, logfmt) output = formatter.format(record) - assert output == ('dummypath 10 ' - 'INFO Test Message') + assert output == ("dummypath 10 " "INFO Test Message") diff --git a/testing/logging/test_reporting.py b/testing/logging/test_reporting.py index 699df0e60..91ed2e475 100644 --- a/testing/logging/test_reporting.py +++ b/testing/logging/test_reporting.py @@ -8,26 +8,27 @@ import pytest def test_nothing_logged(testdir): - testdir.makepyfile(''' + testdir.makepyfile( + """ import sys def test_foo(): sys.stdout.write('text going to stdout') sys.stderr.write('text going to stderr') assert False - ''') + """ + ) result = testdir.runpytest() assert result.ret == 1 - result.stdout.fnmatch_lines(['*- Captured stdout call -*', - 'text going to stdout']) - result.stdout.fnmatch_lines(['*- Captured stderr call -*', - 'text going to stderr']) + result.stdout.fnmatch_lines(["*- Captured stdout call -*", "text going to stdout"]) + result.stdout.fnmatch_lines(["*- Captured stderr call -*", "text going to stderr"]) with pytest.raises(pytest.fail.Exception): - result.stdout.fnmatch_lines(['*- Captured *log call -*']) + result.stdout.fnmatch_lines(["*- Captured *log call -*"]) def test_messages_logged(testdir): - testdir.makepyfile(''' + testdir.makepyfile( + """ import sys import logging @@ -38,19 +39,18 @@ def test_messages_logged(testdir): sys.stderr.write('text going to stderr') logger.info('text going to logger') assert False - ''') - result = testdir.runpytest('--log-level=INFO') + """ + ) + result = testdir.runpytest("--log-level=INFO") assert result.ret == 1 - result.stdout.fnmatch_lines(['*- Captured *log call -*', - '*text going to logger*']) - result.stdout.fnmatch_lines(['*- Captured stdout call -*', - 'text going to stdout']) - result.stdout.fnmatch_lines(['*- Captured stderr call -*', - 'text going to stderr']) + result.stdout.fnmatch_lines(["*- Captured *log call -*", "*text going to logger*"]) + result.stdout.fnmatch_lines(["*- Captured stdout call -*", "text going to stdout"]) + result.stdout.fnmatch_lines(["*- Captured stderr call -*", "text going to stderr"]) def test_root_logger_affected(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import logging logger = logging.getLogger() def test_foo(): @@ -59,18 +59,19 @@ def test_root_logger_affected(testdir): logger.error('error text ' + 'going to logger') assert 0 - """) - log_file = testdir.tmpdir.join('pytest.log').strpath - result = testdir.runpytest('--log-level=ERROR', '--log-file=pytest.log') + """ + ) + log_file = testdir.tmpdir.join("pytest.log").strpath + result = testdir.runpytest("--log-level=ERROR", "--log-file=pytest.log") assert result.ret == 1 # the capture log calls in the stdout section only contain the # logger.error msg, because --log-level=ERROR - result.stdout.fnmatch_lines(['*error text going to logger*']) + result.stdout.fnmatch_lines(["*error text going to logger*"]) with pytest.raises(pytest.fail.Exception): - result.stdout.fnmatch_lines(['*warning text going to logger*']) + result.stdout.fnmatch_lines(["*warning text going to logger*"]) with pytest.raises(pytest.fail.Exception): - result.stdout.fnmatch_lines(['*info text going to logger*']) + result.stdout.fnmatch_lines(["*info text going to logger*"]) # the log file should contain the warning and the error log messages and # not the info one, because the default level of the root logger is @@ -84,7 +85,8 @@ def test_root_logger_affected(testdir): def test_log_cli_level_log_level_interaction(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import logging logger = logging.getLogger() @@ -94,23 +96,27 @@ def test_log_cli_level_log_level_interaction(testdir): logger.warning('warning text ' + 'going to logger') logger.error('error text ' + 'going to logger') assert 0 - """) + """ + ) - result = testdir.runpytest('--log-cli-level=INFO', '--log-level=ERROR') + result = testdir.runpytest("--log-cli-level=INFO", "--log-level=ERROR") assert result.ret == 1 - result.stdout.fnmatch_lines([ - '*-- live log call --*', - '*INFO*info text going to logger', - '*WARNING*warning text going to logger', - '*ERROR*error text going to logger', - '=* 1 failed in *=', - ]) - assert 'DEBUG' not in result.stdout.str() + result.stdout.fnmatch_lines( + [ + "*-- live log call --*", + "*INFO*info text going to logger", + "*WARNING*warning text going to logger", + "*ERROR*error text going to logger", + "=* 1 failed in *=", + ] + ) + assert "DEBUG" not in result.stdout.str() def test_setup_logging(testdir): - testdir.makepyfile(''' + testdir.makepyfile( + """ import logging logger = logging.getLogger(__name__) @@ -121,17 +127,23 @@ def test_setup_logging(testdir): def test_foo(): logger.info('text going to logger from call') assert False - ''') - result = testdir.runpytest('--log-level=INFO') + """ + ) + result = testdir.runpytest("--log-level=INFO") assert result.ret == 1 - result.stdout.fnmatch_lines(['*- Captured *log setup -*', - '*text going to logger from setup*', - '*- Captured *log call -*', - '*text going to logger from call*']) + result.stdout.fnmatch_lines( + [ + "*- Captured *log setup -*", + "*text going to logger from setup*", + "*- Captured *log call -*", + "*text going to logger from call*", + ] + ) def test_teardown_logging(testdir): - testdir.makepyfile(''' + testdir.makepyfile( + """ import logging logger = logging.getLogger(__name__) @@ -142,17 +154,23 @@ def test_teardown_logging(testdir): def teardown_function(function): logger.info('text going to logger from teardown') assert False - ''') - result = testdir.runpytest('--log-level=INFO') + """ + ) + result = testdir.runpytest("--log-level=INFO") assert result.ret == 1 - result.stdout.fnmatch_lines(['*- Captured *log call -*', - '*text going to logger from call*', - '*- Captured *log teardown -*', - '*text going to logger from teardown*']) + result.stdout.fnmatch_lines( + [ + "*- Captured *log call -*", + "*text going to logger from call*", + "*- Captured *log teardown -*", + "*text going to logger from teardown*", + ] + ) def test_disable_log_capturing(testdir): - testdir.makepyfile(''' + testdir.makepyfile( + """ import sys import logging @@ -163,26 +181,26 @@ def test_disable_log_capturing(testdir): logger.warning('catch me if you can!') sys.stderr.write('text going to stderr') assert False - ''') - result = testdir.runpytest('--no-print-logs') + """ + ) + result = testdir.runpytest("--no-print-logs") print(result.stdout) assert result.ret == 1 - result.stdout.fnmatch_lines(['*- Captured stdout call -*', - 'text going to stdout']) - result.stdout.fnmatch_lines(['*- Captured stderr call -*', - 'text going to stderr']) + result.stdout.fnmatch_lines(["*- Captured stdout call -*", "text going to stdout"]) + result.stdout.fnmatch_lines(["*- Captured stderr call -*", "text going to stderr"]) with pytest.raises(pytest.fail.Exception): - result.stdout.fnmatch_lines(['*- Captured *log call -*']) + result.stdout.fnmatch_lines(["*- Captured *log call -*"]) def test_disable_log_capturing_ini(testdir): testdir.makeini( - ''' + """ [pytest] log_print=False - ''' + """ ) - testdir.makepyfile(''' + testdir.makepyfile( + """ import sys import logging @@ -193,46 +211,54 @@ def test_disable_log_capturing_ini(testdir): logger.warning('catch me if you can!') sys.stderr.write('text going to stderr') assert False - ''') + """ + ) result = testdir.runpytest() print(result.stdout) assert result.ret == 1 - result.stdout.fnmatch_lines(['*- Captured stdout call -*', - 'text going to stdout']) - result.stdout.fnmatch_lines(['*- Captured stderr call -*', - 'text going to stderr']) + result.stdout.fnmatch_lines(["*- Captured stdout call -*", "text going to stdout"]) + result.stdout.fnmatch_lines(["*- Captured stderr call -*", "text going to stderr"]) with pytest.raises(pytest.fail.Exception): - result.stdout.fnmatch_lines(['*- Captured *log call -*']) + result.stdout.fnmatch_lines(["*- Captured *log call -*"]) -@pytest.mark.parametrize('enabled', [True, False]) +@pytest.mark.parametrize("enabled", [True, False]) def test_log_cli_enabled_disabled(testdir, enabled): - msg = 'critical message logged by test' - testdir.makepyfile(''' + msg = "critical message logged by test" + testdir.makepyfile( + """ import logging def test_log_cli(): logging.critical("{}") - '''.format(msg)) + """.format( + msg + ) + ) if enabled: - testdir.makeini(''' + testdir.makeini( + """ [pytest] log_cli=true - ''') + """ + ) result = testdir.runpytest() if enabled: - result.stdout.fnmatch_lines([ - 'test_log_cli_enabled_disabled.py::test_log_cli ', - '*-- live log call --*', - 'test_log_cli_enabled_disabled.py* CRITICAL critical message logged by test', - 'PASSED*', - ]) + result.stdout.fnmatch_lines( + [ + "test_log_cli_enabled_disabled.py::test_log_cli ", + "*-- live log call --*", + "test_log_cli_enabled_disabled.py* CRITICAL critical message logged by test", + "PASSED*", + ] + ) else: assert msg not in result.stdout.str() def test_log_cli_default_level(testdir): # Default log file level - testdir.makepyfile(''' + testdir.makepyfile( + """ import pytest import logging def test_log_cli(request): @@ -240,19 +266,24 @@ def test_log_cli_default_level(testdir): assert plugin.log_cli_handler.level == logging.NOTSET logging.getLogger('catchlog').info("INFO message won't be shown") logging.getLogger('catchlog').warning("WARNING message will be shown") - ''') - testdir.makeini(''' + """ + ) + testdir.makeini( + """ [pytest] log_cli=true - ''') + """ + ) result = testdir.runpytest() # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines([ - 'test_log_cli_default_level.py::test_log_cli ', - 'test_log_cli_default_level.py*WARNING message will be shown*', - ]) + result.stdout.fnmatch_lines( + [ + "test_log_cli_default_level.py::test_log_cli ", + "test_log_cli_default_level.py*WARNING message will be shown*", + ] + ) assert "INFO message won't be shown" not in result.stdout.str() # make sure that that we get a '0' exit code for the testsuite assert result.ret == 0 @@ -260,8 +291,9 @@ def test_log_cli_default_level(testdir): def test_log_cli_default_level_multiple_tests(testdir, request): """Ensure we reset the first newline added by the live logger between tests""" - filename = request.node.name + '.py' - testdir.makepyfile(''' + filename = request.node.name + ".py" + testdir.makepyfile( + """ import logging def test_log_1(): @@ -269,29 +301,35 @@ def test_log_cli_default_level_multiple_tests(testdir, request): def test_log_2(): logging.warning("log message from test_log_2") - ''') - testdir.makeini(''' + """ + ) + testdir.makeini( + """ [pytest] log_cli=true - ''') + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - '{}::test_log_1 '.format(filename), - '*WARNING*log message from test_log_1*', - 'PASSED *50%*', - '{}::test_log_2 '.format(filename), - '*WARNING*log message from test_log_2*', - 'PASSED *100%*', - '=* 2 passed in *=', - ]) + result.stdout.fnmatch_lines( + [ + "{}::test_log_1 ".format(filename), + "*WARNING*log message from test_log_1*", + "PASSED *50%*", + "{}::test_log_2 ".format(filename), + "*WARNING*log message from test_log_2*", + "PASSED *100%*", + "=* 2 passed in *=", + ] + ) def test_log_cli_default_level_sections(testdir, request): """Check that with live logging enable we are printing the correct headers during start/setup/call/teardown/finish.""" - filename = request.node.name + '.py' - testdir.makeconftest(''' + filename = request.node.name + ".py" + testdir.makeconftest( + """ import pytest import logging @@ -300,9 +338,11 @@ def test_log_cli_default_level_sections(testdir, request): def pytest_runtest_logfinish(): logging.warning('<<<<< END <<<<<<<') - ''') + """ + ) - testdir.makepyfile(''' + testdir.makepyfile( + """ import pytest import logging @@ -317,48 +357,53 @@ def test_log_cli_default_level_sections(testdir, request): def test_log_2(fix): logging.warning("log message from test_log_2") - ''') - testdir.makeini(''' + """ + ) + testdir.makeini( + """ [pytest] log_cli=true - ''') + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - '{}::test_log_1 '.format(filename), - '*-- live log start --*', - '*WARNING* >>>>> START >>>>>*', - '*-- live log setup --*', - '*WARNING*log message from setup of test_log_1*', - '*-- live log call --*', - '*WARNING*log message from test_log_1*', - 'PASSED *50%*', - '*-- live log teardown --*', - '*WARNING*log message from teardown of test_log_1*', - '*-- live log finish --*', - '*WARNING* <<<<< END <<<<<<<*', - - '{}::test_log_2 '.format(filename), - '*-- live log start --*', - '*WARNING* >>>>> START >>>>>*', - '*-- live log setup --*', - '*WARNING*log message from setup of test_log_2*', - '*-- live log call --*', - '*WARNING*log message from test_log_2*', - 'PASSED *100%*', - '*-- live log teardown --*', - '*WARNING*log message from teardown of test_log_2*', - '*-- live log finish --*', - '*WARNING* <<<<< END <<<<<<<*', - '=* 2 passed in *=', - ]) + result.stdout.fnmatch_lines( + [ + "{}::test_log_1 ".format(filename), + "*-- live log start --*", + "*WARNING* >>>>> START >>>>>*", + "*-- live log setup --*", + "*WARNING*log message from setup of test_log_1*", + "*-- live log call --*", + "*WARNING*log message from test_log_1*", + "PASSED *50%*", + "*-- live log teardown --*", + "*WARNING*log message from teardown of test_log_1*", + "*-- live log finish --*", + "*WARNING* <<<<< END <<<<<<<*", + "{}::test_log_2 ".format(filename), + "*-- live log start --*", + "*WARNING* >>>>> START >>>>>*", + "*-- live log setup --*", + "*WARNING*log message from setup of test_log_2*", + "*-- live log call --*", + "*WARNING*log message from test_log_2*", + "PASSED *100%*", + "*-- live log teardown --*", + "*WARNING*log message from teardown of test_log_2*", + "*-- live log finish --*", + "*WARNING* <<<<< END <<<<<<<*", + "=* 2 passed in *=", + ] + ) def test_live_logs_unknown_sections(testdir, request): """Check that with live logging enable we are printing the correct headers during start/setup/call/teardown/finish.""" - filename = request.node.name + '.py' - testdir.makeconftest(''' + filename = request.node.name + ".py" + testdir.makeconftest( + """ import pytest import logging @@ -370,9 +415,11 @@ def test_live_logs_unknown_sections(testdir, request): def pytest_runtest_logfinish(): logging.warning('<<<<< END <<<<<<<') - ''') + """ + ) - testdir.makepyfile(''' + testdir.makepyfile( + """ import pytest import logging @@ -385,34 +432,40 @@ def test_live_logs_unknown_sections(testdir, request): def test_log_1(fix): logging.warning("log message from test_log_1") - ''') - testdir.makeini(''' + """ + ) + testdir.makeini( + """ [pytest] log_cli=true - ''') + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - '*WARNING*Unknown Section*', - '{}::test_log_1 '.format(filename), - '*WARNING* >>>>> START >>>>>*', - '*-- live log setup --*', - '*WARNING*log message from setup of test_log_1*', - '*-- live log call --*', - '*WARNING*log message from test_log_1*', - 'PASSED *100%*', - '*-- live log teardown --*', - '*WARNING*log message from teardown of test_log_1*', - '*WARNING* <<<<< END <<<<<<<*', - '=* 1 passed in *=', - ]) + result.stdout.fnmatch_lines( + [ + "*WARNING*Unknown Section*", + "{}::test_log_1 ".format(filename), + "*WARNING* >>>>> START >>>>>*", + "*-- live log setup --*", + "*WARNING*log message from setup of test_log_1*", + "*-- live log call --*", + "*WARNING*log message from test_log_1*", + "PASSED *100%*", + "*-- live log teardown --*", + "*WARNING*log message from teardown of test_log_1*", + "*WARNING* <<<<< END <<<<<<<*", + "=* 1 passed in *=", + ] + ) def test_sections_single_new_line_after_test_outcome(testdir, request): """Check that only a single new line is written between log messages during teardown/finish.""" - filename = request.node.name + '.py' - testdir.makeconftest(''' + filename = request.node.name + ".py" + testdir.makeconftest( + """ import pytest import logging @@ -422,9 +475,11 @@ def test_sections_single_new_line_after_test_outcome(testdir, request): def pytest_runtest_logfinish(): logging.warning('<<<<< END <<<<<<<') logging.warning('<<<<< END <<<<<<<') - ''') + """ + ) - testdir.makepyfile(''' + testdir.makepyfile( + """ import pytest import logging @@ -437,38 +492,50 @@ def test_sections_single_new_line_after_test_outcome(testdir, request): def test_log_1(fix): logging.warning("log message from test_log_1") - ''') - testdir.makeini(''' + """ + ) + testdir.makeini( + """ [pytest] log_cli=true - ''') + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - '{}::test_log_1 '.format(filename), - '*-- live log start --*', - '*WARNING* >>>>> START >>>>>*', - '*-- live log setup --*', - '*WARNING*log message from setup of test_log_1*', - '*-- live log call --*', - '*WARNING*log message from test_log_1*', - 'PASSED *100%*', - '*-- live log teardown --*', - '*WARNING*log message from teardown of test_log_1*', - '*-- live log finish --*', - '*WARNING* <<<<< END <<<<<<<*', - '*WARNING* <<<<< END <<<<<<<*', - '=* 1 passed in *=', - ]) - assert re.search(r'(.+)live log teardown(.+)\n(.+)WARNING(.+)\n(.+)WARNING(.+)', - result.stdout.str(), re.MULTILINE) is not None - assert re.search(r'(.+)live log finish(.+)\n(.+)WARNING(.+)\n(.+)WARNING(.+)', - result.stdout.str(), re.MULTILINE) is not None + result.stdout.fnmatch_lines( + [ + "{}::test_log_1 ".format(filename), + "*-- live log start --*", + "*WARNING* >>>>> START >>>>>*", + "*-- live log setup --*", + "*WARNING*log message from setup of test_log_1*", + "*-- live log call --*", + "*WARNING*log message from test_log_1*", + "PASSED *100%*", + "*-- live log teardown --*", + "*WARNING*log message from teardown of test_log_1*", + "*-- live log finish --*", + "*WARNING* <<<<< END <<<<<<<*", + "*WARNING* <<<<< END <<<<<<<*", + "=* 1 passed in *=", + ] + ) + assert re.search( + r"(.+)live log teardown(.+)\n(.+)WARNING(.+)\n(.+)WARNING(.+)", + result.stdout.str(), + re.MULTILINE, + ) is not None + assert re.search( + r"(.+)live log finish(.+)\n(.+)WARNING(.+)\n(.+)WARNING(.+)", + result.stdout.str(), + re.MULTILINE, + ) is not None def test_log_cli_level(testdir): # Default log file level - testdir.makepyfile(''' + testdir.makepyfile( + """ import pytest import logging def test_log_cli(request): @@ -477,31 +544,38 @@ def test_log_cli_level(testdir): logging.getLogger('catchlog').debug("This log message won't be shown") logging.getLogger('catchlog').info("This log message will be shown") print('PASSED') - ''') - testdir.makeini(''' + """ + ) + testdir.makeini( + """ [pytest] log_cli=true - ''') + """ + ) - result = testdir.runpytest('-s', '--log-cli-level=INFO') + result = testdir.runpytest("-s", "--log-cli-level=INFO") # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines([ - 'test_log_cli_level.py*This log message will be shown', - 'PASSED', # 'PASSED' on its own line because the log message prints a new line - ]) + result.stdout.fnmatch_lines( + [ + "test_log_cli_level.py*This log message will be shown", + "PASSED", # 'PASSED' on its own line because the log message prints a new line + ] + ) assert "This log message won't be shown" not in result.stdout.str() # make sure that that we get a '0' exit code for the testsuite assert result.ret == 0 - result = testdir.runpytest('-s', '--log-level=INFO') + result = testdir.runpytest("-s", "--log-level=INFO") # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines([ - 'test_log_cli_level.py* This log message will be shown', - 'PASSED', # 'PASSED' on its own line because the log message prints a new line - ]) + result.stdout.fnmatch_lines( + [ + "test_log_cli_level.py* This log message will be shown", + "PASSED", # 'PASSED' on its own line because the log message prints a new line + ] + ) assert "This log message won't be shown" not in result.stdout.str() # make sure that that we get a '0' exit code for the testsuite @@ -514,8 +588,10 @@ def test_log_cli_ini_level(testdir): [pytest] log_cli=true log_cli_level = INFO - """) - testdir.makepyfile(''' + """ + ) + testdir.makepyfile( + """ import pytest import logging def test_log_cli(request): @@ -524,30 +600,34 @@ def test_log_cli_ini_level(testdir): logging.getLogger('catchlog').debug("This log message won't be shown") logging.getLogger('catchlog').info("This log message will be shown") print('PASSED') - ''') + """ + ) - result = testdir.runpytest('-s') + result = testdir.runpytest("-s") # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines([ - 'test_log_cli_ini_level.py* This log message will be shown', - 'PASSED', # 'PASSED' on its own line because the log message prints a new line - ]) + result.stdout.fnmatch_lines( + [ + "test_log_cli_ini_level.py* This log message will be shown", + "PASSED", # 'PASSED' on its own line because the log message prints a new line + ] + ) assert "This log message won't be shown" not in result.stdout.str() # make sure that that we get a '0' exit code for the testsuite assert result.ret == 0 -@pytest.mark.parametrize('cli_args', ['', - '--log-level=WARNING', - '--log-file-level=WARNING', - '--log-cli-level=WARNING']) +@pytest.mark.parametrize( + "cli_args", + ["", "--log-level=WARNING", "--log-file-level=WARNING", "--log-cli-level=WARNING"], +) def test_log_cli_auto_enable(testdir, request, cli_args): """Check that live logs are enabled if --log-level or --log-cli-level is passed on the CLI. It should not be auto enabled if the same configs are set on the INI file. """ - testdir.makepyfile(''' + testdir.makepyfile( + """ import pytest import logging @@ -555,35 +635,40 @@ def test_log_cli_auto_enable(testdir, request, cli_args): logging.info("log message from test_log_1 not to be shown") logging.warning("log message from test_log_1") - ''') - testdir.makeini(''' + """ + ) + testdir.makeini( + """ [pytest] log_level=INFO log_cli_level=INFO - ''') + """ + ) result = testdir.runpytest(cli_args) - if cli_args == '--log-cli-level=WARNING': - result.stdout.fnmatch_lines([ - '*::test_log_1 ', - '*-- live log call --*', - '*WARNING*log message from test_log_1*', - 'PASSED *100%*', - '=* 1 passed in *=', - ]) - assert 'INFO' not in result.stdout.str() + if cli_args == "--log-cli-level=WARNING": + result.stdout.fnmatch_lines( + [ + "*::test_log_1 ", + "*-- live log call --*", + "*WARNING*log message from test_log_1*", + "PASSED *100%*", + "=* 1 passed in *=", + ] + ) + assert "INFO" not in result.stdout.str() else: - result.stdout.fnmatch_lines([ - '*test_log_cli_auto_enable*100%*', - '=* 1 passed in *=', - ]) - assert 'INFO' not in result.stdout.str() - assert 'WARNING' not in result.stdout.str() + result.stdout.fnmatch_lines( + ["*test_log_cli_auto_enable*100%*", "=* 1 passed in *="] + ) + assert "INFO" not in result.stdout.str() + assert "WARNING" not in result.stdout.str() def test_log_file_cli(testdir): # Default log file level - testdir.makepyfile(''' + testdir.makepyfile( + """ import pytest import logging def test_log_file(request): @@ -592,16 +677,17 @@ def test_log_file_cli(testdir): logging.getLogger('catchlog').info("This log message won't be shown") logging.getLogger('catchlog').warning("This log message will be shown") print('PASSED') - ''') + """ + ) - log_file = testdir.tmpdir.join('pytest.log').strpath + log_file = testdir.tmpdir.join("pytest.log").strpath - result = testdir.runpytest('-s', '--log-file={0}'.format(log_file), '--log-file-level=WARNING') + result = testdir.runpytest( + "-s", "--log-file={}".format(log_file), "--log-file-level=WARNING" + ) # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines([ - 'test_log_file_cli.py PASSED', - ]) + result.stdout.fnmatch_lines(["test_log_file_cli.py PASSED"]) # make sure that that we get a '0' exit code for the testsuite assert result.ret == 0 @@ -614,7 +700,8 @@ def test_log_file_cli(testdir): def test_log_file_cli_level(testdir): # Default log file level - testdir.makepyfile(''' + testdir.makepyfile( + """ import pytest import logging def test_log_file(request): @@ -623,18 +710,17 @@ def test_log_file_cli_level(testdir): logging.getLogger('catchlog').debug("This log message won't be shown") logging.getLogger('catchlog').info("This log message will be shown") print('PASSED') - ''') + """ + ) - log_file = testdir.tmpdir.join('pytest.log').strpath + log_file = testdir.tmpdir.join("pytest.log").strpath - result = testdir.runpytest('-s', - '--log-file={0}'.format(log_file), - '--log-file-level=INFO') + result = testdir.runpytest( + "-s", "--log-file={}".format(log_file), "--log-file-level=INFO" + ) # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines([ - 'test_log_file_cli_level.py PASSED', - ]) + result.stdout.fnmatch_lines(["test_log_file_cli_level.py PASSED"]) # make sure that that we get a '0' exit code for the testsuite assert result.ret == 0 @@ -646,25 +732,31 @@ def test_log_file_cli_level(testdir): def test_log_level_not_changed_by_default(testdir): - testdir.makepyfile(''' + testdir.makepyfile( + """ import logging def test_log_file(): assert logging.getLogger().level == logging.WARNING - ''') - result = testdir.runpytest('-s') - result.stdout.fnmatch_lines('* 1 passed in *') + """ + ) + result = testdir.runpytest("-s") + result.stdout.fnmatch_lines("* 1 passed in *") def test_log_file_ini(testdir): - log_file = testdir.tmpdir.join('pytest.log').strpath + log_file = testdir.tmpdir.join("pytest.log").strpath testdir.makeini( """ [pytest] - log_file={0} + log_file={} log_file_level=WARNING - """.format(log_file)) - testdir.makepyfile(''' + """.format( + log_file + ) + ) + testdir.makepyfile( + """ import pytest import logging def test_log_file(request): @@ -673,14 +765,13 @@ def test_log_file_ini(testdir): logging.getLogger('catchlog').info("This log message won't be shown") logging.getLogger('catchlog').warning("This log message will be shown") print('PASSED') - ''') + """ + ) - result = testdir.runpytest('-s') + result = testdir.runpytest("-s") # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines([ - 'test_log_file_ini.py PASSED', - ]) + result.stdout.fnmatch_lines(["test_log_file_ini.py PASSED"]) # make sure that that we get a '0' exit code for the testsuite assert result.ret == 0 @@ -692,15 +783,19 @@ def test_log_file_ini(testdir): def test_log_file_ini_level(testdir): - log_file = testdir.tmpdir.join('pytest.log').strpath + log_file = testdir.tmpdir.join("pytest.log").strpath testdir.makeini( """ [pytest] - log_file={0} + log_file={} log_file_level = INFO - """.format(log_file)) - testdir.makepyfile(''' + """.format( + log_file + ) + ) + testdir.makepyfile( + """ import pytest import logging def test_log_file(request): @@ -709,14 +804,13 @@ def test_log_file_ini_level(testdir): logging.getLogger('catchlog').debug("This log message won't be shown") logging.getLogger('catchlog').info("This log message will be shown") print('PASSED') - ''') + """ + ) - result = testdir.runpytest('-s') + result = testdir.runpytest("-s") # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines([ - 'test_log_file_ini_level.py PASSED', - ]) + result.stdout.fnmatch_lines(["test_log_file_ini_level.py PASSED"]) # make sure that that we get a '0' exit code for the testsuite assert result.ret == 0 @@ -727,7 +821,7 @@ def test_log_file_ini_level(testdir): assert "This log message won't be shown" not in contents -@pytest.mark.parametrize('has_capture_manager', [True, False]) +@pytest.mark.parametrize("has_capture_manager", [True, False]) def test_live_logging_suspends_capture(has_capture_manager, request): """Test that capture manager is suspended when we emitting messages for live logging. @@ -746,10 +840,10 @@ def test_live_logging_suspends_capture(has_capture_manager, request): calls = [] def suspend_global_capture(self): - self.calls.append('suspend_global_capture') + self.calls.append("suspend_global_capture") def resume_global_capture(self): - self.calls.append('resume_global_capture') + self.calls.append("resume_global_capture") # sanity check assert CaptureManager.suspend_capture_item @@ -763,15 +857,18 @@ def test_live_logging_suspends_capture(has_capture_manager, request): out_file = DummyTerminal() capture_manager = MockCaptureManager() if has_capture_manager else None handler = _LiveLoggingStreamHandler(out_file, capture_manager) - handler.set_when('call') + handler.set_when("call") - logger = logging.getLogger(__name__ + '.test_live_logging_suspends_capture') + logger = logging.getLogger(__name__ + ".test_live_logging_suspends_capture") logger.addHandler(handler) request.addfinalizer(partial(logger.removeHandler, handler)) - logger.critical('some message') + logger.critical("some message") if has_capture_manager: - assert MockCaptureManager.calls == ['suspend_global_capture', 'resume_global_capture'] + assert ( + MockCaptureManager.calls + == ["suspend_global_capture", "resume_global_capture"] + ) else: assert MockCaptureManager.calls == [] - assert out_file.getvalue() == '\nsome message\n' + assert out_file.getvalue() == "\nsome message\n" diff --git a/testing/python/approx.py b/testing/python/approx.py index 9ca21bdf8..9e25feb0b 100644 --- a/testing/python/approx.py +++ b/testing/python/approx.py @@ -8,7 +8,8 @@ from pytest import approx from operator import eq, ne from decimal import Decimal from fractions import Fraction -inf, nan = float('inf'), float('nan') + +inf, nan = float("inf"), float("nan") class MyDocTestRunner(doctest.DocTestRunner): @@ -17,29 +18,47 @@ class MyDocTestRunner(doctest.DocTestRunner): doctest.DocTestRunner.__init__(self) def report_failure(self, out, test, example, got): - raise AssertionError("'{}' evaluates to '{}', not '{}'".format( - example.source.strip(), got.strip(), example.want.strip())) + raise AssertionError( + "'{}' evaluates to '{}', not '{}'".format( + example.source.strip(), got.strip(), example.want.strip() + ) + ) class TestApprox(object): def test_repr_string(self): - plus_minus = u'\u00b1' if sys.version_info[0] > 2 else u'+-' - tol1, tol2, infr = '1.0e-06', '2.0e-06', 'inf' - assert repr(approx(1.0)) == '1.0 {pm} {tol1}'.format(pm=plus_minus, tol1=tol1) - assert repr(approx([1.0, 2.0])) == 'approx([1.0 {pm} {tol1}, 2.0 {pm} {tol2}])'.format( - pm=plus_minus, tol1=tol1, tol2=tol2) - assert repr(approx((1.0, 2.0))) == 'approx((1.0 {pm} {tol1}, 2.0 {pm} {tol2}))'.format( - pm=plus_minus, tol1=tol1, tol2=tol2) - assert repr(approx(inf)) == 'inf' - assert repr(approx(1.0, rel=nan)) == '1.0 {pm} ???'.format(pm=plus_minus) - assert repr(approx(1.0, rel=inf)) == '1.0 {pm} {infr}'.format(pm=plus_minus, infr=infr) - assert repr(approx(1.0j, rel=inf)) == '1j' + plus_minus = u"\u00b1" if sys.version_info[0] > 2 else u"+-" + tol1, tol2, infr = "1.0e-06", "2.0e-06", "inf" + assert repr(approx(1.0)) == "1.0 {pm} {tol1}".format(pm=plus_minus, tol1=tol1) + assert ( + repr(approx([1.0, 2.0])) + == "approx([1.0 {pm} {tol1}, 2.0 {pm} {tol2}])".format( + pm=plus_minus, tol1=tol1, tol2=tol2 + ) + ) + assert ( + repr(approx((1.0, 2.0))) + == "approx((1.0 {pm} {tol1}, 2.0 {pm} {tol2}))".format( + pm=plus_minus, tol1=tol1, tol2=tol2 + ) + ) + assert repr(approx(inf)) == "inf" + assert repr(approx(1.0, rel=nan)) == "1.0 {pm} ???".format(pm=plus_minus) + assert ( + repr(approx(1.0, rel=inf)) + == "1.0 {pm} {infr}".format(pm=plus_minus, infr=infr) + ) + assert repr(approx(1.0j, rel=inf)) == "1j" # Dictionaries aren't ordered, so we need to check both orders. - assert repr(approx({'a': 1.0, 'b': 2.0})) in ( - "approx({{'a': 1.0 {pm} {tol1}, 'b': 2.0 {pm} {tol2}}})".format(pm=plus_minus, tol1=tol1, tol2=tol2), - "approx({{'b': 2.0 {pm} {tol2}, 'a': 1.0 {pm} {tol1}}})".format(pm=plus_minus, tol1=tol1, tol2=tol2), + assert repr(approx({"a": 1.0, "b": 2.0})) in ( + "approx({{'a': 1.0 {pm} {tol1}, 'b': 2.0 {pm} {tol2}}})".format( + pm=plus_minus, tol1=tol1, tol2=tol2 + ), + "approx({{'b': 2.0 {pm} {tol2}, 'a': 1.0 {pm} {tol1}}})".format( + pm=plus_minus, tol1=tol1, tol2=tol2 + ), ) def test_operator_overloading(self): @@ -56,25 +75,19 @@ class TestApprox(object): (12345, 12345.0), (0.0, -0.0), (345678, 345678), - (Decimal('1.0001'), Decimal('1.0001')), + (Decimal("1.0001"), Decimal("1.0001")), (Fraction(1, 3), Fraction(-1, -3)), ] for a, x in examples: assert a == approx(x) def test_opposite_sign(self): - examples = [ - (eq, 1e-100, -1e-100), - (ne, 1e100, -1e100), - ] + examples = [(eq, 1e-100, -1e-100), (ne, 1e100, -1e100)] for op, a, x in examples: assert op(a, approx(x)) def test_zero_tolerance(self): - within_1e10 = [ - (1.1e-100, 1e-100), - (-1.1e-100, -1e-100), - ] + within_1e10 = [(1.1e-100, 1e-100), (-1.1e-100, -1e-100)] for a, x in within_1e10: assert x == approx(x, rel=0.0, abs=0.0) assert a != approx(x, rel=0.0, abs=0.0) @@ -98,12 +111,7 @@ class TestApprox(object): def test_inf_tolerance(self): # Everything should be equal if the tolerance is infinite. - large_diffs = [ - (1, 1000), - (1e-50, 1e50), - (-1.0, -1e300), - (0.0, 10), - ] + large_diffs = [(1, 1000), (1e-50, 1e50), (-1.0, -1e300), (0.0, 10)] for a, x in large_diffs: assert a != approx(x, rel=0.0, abs=0.0) assert a == approx(x, rel=inf, abs=0.0) @@ -113,20 +121,13 @@ class TestApprox(object): def test_inf_tolerance_expecting_zero(self): # If the relative tolerance is zero but the expected value is infinite, # the actual tolerance is a NaN, which should be an error. - illegal_kwargs = [ - dict(rel=inf, abs=0.0), - dict(rel=inf, abs=inf), - ] + illegal_kwargs = [dict(rel=inf, abs=0.0), dict(rel=inf, abs=inf)] for kwargs in illegal_kwargs: with pytest.raises(ValueError): 1 == approx(0, **kwargs) def test_nan_tolerance(self): - illegal_kwargs = [ - dict(rel=nan), - dict(abs=nan), - dict(rel=nan, abs=nan), - ] + illegal_kwargs = [dict(rel=nan), dict(abs=nan), dict(rel=nan, abs=nan)] for kwargs in illegal_kwargs: with pytest.raises(ValueError): 1.1 == approx(1, **kwargs) @@ -148,8 +149,8 @@ class TestApprox(object): (eq, 1e0 + 1e-6, 1e0), (ne, 1e0 + 2e-6, 1e0), # Absolute tolerance used. - (eq, 1e-100, + 1e-106), - (eq, 1e-100, + 2e-106), + (eq, 1e-100, +1e-106), + (eq, 1e-100, +2e-106), (eq, 1e-100, 0), ] for op, a, x in examples: @@ -172,21 +173,13 @@ class TestApprox(object): assert 1e-8 + 1e-16 != approx(1e-8, rel=5e-9, abs=5e-17) def test_relative_tolerance(self): - within_1e8_rel = [ - (1e8 + 1e0, 1e8), - (1e0 + 1e-8, 1e0), - (1e-8 + 1e-16, 1e-8), - ] + within_1e8_rel = [(1e8 + 1e0, 1e8), (1e0 + 1e-8, 1e0), (1e-8 + 1e-16, 1e-8)] for a, x in within_1e8_rel: assert a == approx(x, rel=5e-8, abs=0.0) assert a != approx(x, rel=5e-9, abs=0.0) def test_absolute_tolerance(self): - within_1e8_abs = [ - (1e8 + 9e-9, 1e8), - (1e0 + 9e-9, 1e0), - (1e-8 + 9e-9, 1e-8), - ] + within_1e8_abs = [(1e8 + 9e-9, 1e8), (1e0 + 9e-9, 1e0), (1e-8 + 9e-9, 1e-8)] for a, x in within_1e8_abs: assert a == approx(x, rel=0, abs=5e-8) assert a != approx(x, rel=0, abs=5e-9) @@ -233,10 +226,7 @@ class TestApprox(object): assert op(a, approx(x, nan_ok=True)) def test_int(self): - within_1e6 = [ - (1000001, 1000000), - (-1000001, -1000000), - ] + within_1e6 = [(1000001, 1000000), (-1000001, -1000000)] for a, x in within_1e6: assert a == approx(x, rel=5e-6, abs=0) assert a != approx(x, rel=5e-7, abs=0) @@ -245,15 +235,15 @@ class TestApprox(object): def test_decimal(self): within_1e6 = [ - (Decimal('1.000001'), Decimal('1.0')), - (Decimal('-1.000001'), Decimal('-1.0')), + (Decimal("1.000001"), Decimal("1.0")), + (Decimal("-1.000001"), Decimal("-1.0")), ] for a, x in within_1e6: assert a == approx(x) - assert a == approx(x, rel=Decimal('5e-6'), abs=0) - assert a != approx(x, rel=Decimal('5e-7'), abs=0) - assert approx(x, rel=Decimal('5e-6'), abs=0) == a - assert approx(x, rel=Decimal('5e-7'), abs=0) != a + assert a == approx(x, rel=Decimal("5e-6"), abs=0) + assert a != approx(x, rel=Decimal("5e-7"), abs=0) + assert approx(x, rel=Decimal("5e-6"), abs=0) == a + assert approx(x, rel=Decimal("5e-7"), abs=0) != a def test_fraction(self): within_1e6 = [ @@ -308,10 +298,10 @@ class TestApprox(object): assert (1, 2) != approx((1, 2, 3)) def test_dict(self): - actual = {'a': 1 + 1e-7, 'b': 2 + 1e-8} + actual = {"a": 1 + 1e-7, "b": 2 + 1e-8} # Dictionaries became ordered in python3.6, so switch up the order here # to make sure it doesn't matter. - expected = {'b': 2, 'a': 1} + expected = {"b": 2, "a": 1} # Return false if any element is outside the tolerance. assert actual == approx(expected, rel=5e-7, abs=0) @@ -320,12 +310,12 @@ class TestApprox(object): assert approx(expected, rel=5e-8, abs=0) != actual def test_dict_wrong_len(self): - assert {'a': 1, 'b': 2} != approx({'a': 1}) - assert {'a': 1, 'b': 2} != approx({'a': 1, 'c': 2}) - assert {'a': 1, 'b': 2} != approx({'a': 1, 'b': 2, 'c': 3}) + assert {"a": 1, "b": 2} != approx({"a": 1}) + assert {"a": 1, "b": 2} != approx({"a": 1, "c": 2}) + assert {"a": 1, "b": 2} != approx({"a": 1, "b": 2, "c": 3}) def test_numpy_array(self): - np = pytest.importorskip('numpy') + np = pytest.importorskip("numpy") actual = np.array([1 + 1e-7, 2 + 1e-8]) expected = np.array([1, 2]) @@ -343,7 +333,7 @@ class TestApprox(object): assert actual != approx(list(expected), rel=5e-8, abs=0) def test_numpy_array_wrong_shape(self): - np = pytest.importorskip('numpy') + np = pytest.importorskip("numpy") a12 = np.array([[1, 2]]) a21 = np.array([[1], [2]]) @@ -354,10 +344,7 @@ class TestApprox(object): def test_doctests(self): parser = doctest.DocTestParser() test = parser.get_doctest( - approx.__doc__, - {'approx': approx}, - approx.__name__, - None, None, + approx.__doc__, {"approx": approx}, approx.__name__, None, None ) runner = MyDocTestRunner() runner.run(test) @@ -367,24 +354,28 @@ class TestApprox(object): Comparing approx instances inside lists should not produce an error in the detailed diff. Integration test for issue #2111. """ - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def test_foo(): assert [3] == [pytest.approx(4)] - """) - expected = '4.0e-06' + """ + ) + expected = "4.0e-06" result = testdir.runpytest() - result.stdout.fnmatch_lines([ - '*At index 0 diff: 3 != 4 * {0}'.format(expected), - '=* 1 failed in *=', - ]) + result.stdout.fnmatch_lines( + ["*At index 0 diff: 3 != 4 * {}".format(expected), "=* 1 failed in *="] + ) - @pytest.mark.parametrize('op', [ - pytest.param(operator.le, id='<='), - pytest.param(operator.lt, id='<'), - pytest.param(operator.ge, id='>='), - pytest.param(operator.gt, id='>'), - ]) + @pytest.mark.parametrize( + "op", + [ + pytest.param(operator.le, id="<="), + pytest.param(operator.lt, id="<"), + pytest.param(operator.ge, id=">="), + pytest.param(operator.gt, id=">"), + ], + ) def test_comparison_operator_type_error(self, op): """ pytest.approx should raise TypeError for operators other than == and != (#2003). @@ -393,7 +384,7 @@ class TestApprox(object): op(1, approx(1, rel=1e-6, abs=1e-12)) def test_numpy_array_with_scalar(self): - np = pytest.importorskip('numpy') + np = pytest.importorskip("numpy") actual = np.array([1 + 1e-7, 1 - 1e-8]) expected = 1.0 @@ -404,7 +395,7 @@ class TestApprox(object): assert approx(expected, rel=5e-8, abs=0) != actual def test_numpy_scalar_with_array(self): - np = pytest.importorskip('numpy') + np = pytest.importorskip("numpy") actual = 1.0 expected = np.array([1 + 1e-7, 1 - 1e-8]) diff --git a/testing/python/collect.py b/testing/python/collect.py index de40486a8..724504b1a 100644 --- a/testing/python/collect.py +++ b/testing/python/collect.py @@ -8,10 +8,13 @@ import pytest from _pytest.main import EXIT_NOTESTSCOLLECTED from _pytest.nodes import Collector -ignore_parametrized_marks = pytest.mark.filterwarnings('ignore:Applying marks directly to parameters') +ignore_parametrized_marks = pytest.mark.filterwarnings( + "ignore:Applying marks directly to parameters" +) class TestModule(object): + def test_failing_import(self, testdir): modcol = testdir.getmodulecol("import alksdjalskdjalkjals") pytest.raises(Collector.CollectError, modcol.collect) @@ -21,17 +24,19 @@ class TestModule(object): b = testdir.mkdir("b") p = a.ensure("test_whatever.py") p.pyimport() - del sys.modules['test_whatever'] + del sys.modules["test_whatever"] b.ensure("test_whatever.py") result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*import*mismatch*", - "*imported*test_whatever*", - "*%s*" % a.join("test_whatever.py"), - "*not the same*", - "*%s*" % b.join("test_whatever.py"), - "*HINT*", - ]) + result.stdout.fnmatch_lines( + [ + "*import*mismatch*", + "*imported*test_whatever*", + "*%s*" % a.join("test_whatever.py"), + "*not the same*", + "*%s*" % b.join("test_whatever.py"), + "*HINT*", + ] + ) def test_import_prepend_append(self, testdir, monkeypatch): syspath = list(sys.path) @@ -42,11 +47,16 @@ class TestModule(object): root2.ensure("x456.py") p = root2.join("test_x456.py") monkeypatch.syspath_prepend(str(root1)) - p.write(dedent("""\ + p.write( + dedent( + """\ import x456 def test(): assert x456.__file__.startswith(%r) - """ % str(root2))) + """ + % str(root2) + ) + ) with root2.as_cwd(): reprec = testdir.inline_run("--import-mode=append") reprec.assertoutcome(passed=0, failed=1) @@ -63,15 +73,17 @@ class TestModule(object): pytest.raises(ImportError, lambda: modcol.obj) def test_invalid_test_module_name(self, testdir): - a = testdir.mkdir('a') - a.ensure('test_one.part1.py') + a = testdir.mkdir("a") + a.ensure("test_one.part1.py") result = testdir.runpytest("-rw") - result.stdout.fnmatch_lines([ - "ImportError while importing test module*test_one.part1*", - "Hint: make sure your test modules/packages have valid Python names.", - ]) + result.stdout.fnmatch_lines( + [ + "ImportError while importing test module*test_one.part1*", + "Hint: make sure your test modules/packages have valid Python names.", + ] + ) - @pytest.mark.parametrize('verbose', [0, 1, 2]) + @pytest.mark.parametrize("verbose", [0, 1, 2]) def test_show_traceback_import_error(self, testdir, verbose): """Import errors when collecting modules should display the traceback (#1976). @@ -83,21 +95,25 @@ class TestModule(object): """, bar_traceback_import_error="", ) - testdir.makepyfile(""" + testdir.makepyfile( + """ import foo_traceback_import_error - """) - args = ('-v',) * verbose + """ + ) + args = ("-v",) * verbose result = testdir.runpytest(*args) - result.stdout.fnmatch_lines([ - "ImportError while importing test module*", - "Traceback:", - "*from bar_traceback_import_error import NOT_AVAILABLE", - "*cannot import name *NOT_AVAILABLE*", - ]) + result.stdout.fnmatch_lines( + [ + "ImportError while importing test module*", + "Traceback:", + "*from bar_traceback_import_error import NOT_AVAILABLE", + "*cannot import name *NOT_AVAILABLE*", + ] + ) assert result.ret == 2 stdout = result.stdout.str() - for name in ('_pytest', os.path.join('py', '_path')): + for name in ("_pytest", os.path.join("py", "_path")): if verbose == 2: assert name in stdout else: @@ -107,44 +123,54 @@ class TestModule(object): """Check test modules collected which raise ImportError with unicode messages are handled properly (#2336). """ - testdir.makepyfile(u""" + testdir.makepyfile( + u""" # -*- coding: utf-8 -*- raise ImportError(u'Something bad happened ☺') - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "ImportError while importing test module*", - "Traceback:", - "*raise ImportError*Something bad happened*", - ]) + result.stdout.fnmatch_lines( + [ + "ImportError while importing test module*", + "Traceback:", + "*raise ImportError*Something bad happened*", + ] + ) assert result.ret == 2 class TestClass(object): + def test_class_with_init_warning(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ class TestClass1(object): def __init__(self): pass - """) + """ + ) result = testdir.runpytest("-rw") - result.stdout.fnmatch_lines([ - "*cannot collect test class 'TestClass1' because it has a __init__ constructor", - ]) + result.stdout.fnmatch_lines( + [ + "*cannot collect test class 'TestClass1' because it has a __init__ constructor" + ] + ) def test_class_subclassobject(self, testdir): - testdir.getmodulecol(""" + testdir.getmodulecol( + """ class test(object): pass - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*collected 0*", - ]) + result.stdout.fnmatch_lines(["*collected 0*"]) def test_static_method(self, testdir): """Support for collecting staticmethod tests (#2528, #2699)""" - testdir.getmodulecol(""" + testdir.getmodulecol( + """ import pytest class Test(object): @staticmethod @@ -158,15 +184,14 @@ class TestClass(object): @staticmethod def test_fix(fix): assert fix == 1 - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*collected 2 items*", - "*2 passed in*", - ]) + result.stdout.fnmatch_lines(["*collected 2 items*", "*2 passed in*"]) def test_setup_teardown_class_as_classmethod(self, testdir): - testdir.makepyfile(test_mod1=""" + testdir.makepyfile( + test_mod1=""" class TestClassMethod(object): @classmethod def setup_class(cls): @@ -176,55 +201,63 @@ class TestClass(object): @classmethod def teardown_class(cls): pass - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*1 passed*", - ]) + result.stdout.fnmatch_lines(["*1 passed*"]) def test_issue1035_obj_has_getattr(self, testdir): - modcol = testdir.getmodulecol(""" + modcol = testdir.getmodulecol( + """ class Chameleon(object): def __getattr__(self, name): return True chameleon = Chameleon() - """) + """ + ) colitems = modcol.collect() assert len(colitems) == 0 def test_issue1579_namedtuple(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import collections TestCase = collections.namedtuple('TestCase', ['a']) - """) - result = testdir.runpytest('-rw') + """ + ) + result = testdir.runpytest("-rw") result.stdout.fnmatch_lines( "*cannot collect test class 'TestCase' " "because it has a __new__ constructor*" ) def test_issue2234_property(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ class TestCase(object): @property def prop(self): raise NotImplementedError() - """) + """ + ) result = testdir.runpytest() assert result.ret == EXIT_NOTESTSCOLLECTED class TestGenerator(object): + def test_generative_functions(self, testdir): - modcol = testdir.getmodulecol(""" + modcol = testdir.getmodulecol( + """ def func1(arg, arg2): assert arg == arg2 def test_gen(): yield func1, 17, 3*5 yield func1, 42, 6*7 - """) + """ + ) colitems = modcol.collect() assert len(colitems) == 1 gencol = colitems[0] @@ -233,36 +266,40 @@ class TestGenerator(object): assert len(gencolitems) == 2 assert isinstance(gencolitems[0], pytest.Function) assert isinstance(gencolitems[1], pytest.Function) - assert gencolitems[0].name == '[0]' - assert gencolitems[0].obj.__name__ == 'func1' + assert gencolitems[0].name == "[0]" + assert gencolitems[0].obj.__name__ == "func1" def test_generative_methods(self, testdir): - modcol = testdir.getmodulecol(""" + modcol = testdir.getmodulecol( + """ def func1(arg, arg2): assert arg == arg2 class TestGenMethods(object): def test_gen(self): yield func1, 17, 3*5 yield func1, 42, 6*7 - """) + """ + ) gencol = modcol.collect()[0].collect()[0].collect()[0] assert isinstance(gencol, pytest.Generator) gencolitems = gencol.collect() assert len(gencolitems) == 2 assert isinstance(gencolitems[0], pytest.Function) assert isinstance(gencolitems[1], pytest.Function) - assert gencolitems[0].name == '[0]' - assert gencolitems[0].obj.__name__ == 'func1' + assert gencolitems[0].name == "[0]" + assert gencolitems[0].obj.__name__ == "func1" def test_generative_functions_with_explicit_names(self, testdir): - modcol = testdir.getmodulecol(""" + modcol = testdir.getmodulecol( + """ def func1(arg, arg2): assert arg == arg2 def test_gen(): yield "seventeen", func1, 17, 3*5 yield "fortytwo", func1, 42, 6*7 - """) + """ + ) colitems = modcol.collect() assert len(colitems) == 1 gencol = colitems[0] @@ -272,18 +309,20 @@ class TestGenerator(object): assert isinstance(gencolitems[0], pytest.Function) assert isinstance(gencolitems[1], pytest.Function) assert gencolitems[0].name == "['seventeen']" - assert gencolitems[0].obj.__name__ == 'func1' + assert gencolitems[0].obj.__name__ == "func1" assert gencolitems[1].name == "['fortytwo']" - assert gencolitems[1].obj.__name__ == 'func1' + assert gencolitems[1].obj.__name__ == "func1" def test_generative_functions_unique_explicit_names(self, testdir): # generative - modcol = testdir.getmodulecol(""" + modcol = testdir.getmodulecol( + """ def func(): pass def test_gen(): yield "name", func yield "name", func - """) + """ + ) colitems = modcol.collect() assert len(colitems) == 1 gencol = colitems[0] @@ -291,14 +330,16 @@ class TestGenerator(object): pytest.raises(ValueError, "gencol.collect()") def test_generative_methods_with_explicit_names(self, testdir): - modcol = testdir.getmodulecol(""" + modcol = testdir.getmodulecol( + """ def func1(arg, arg2): assert arg == arg2 class TestGenMethods(object): def test_gen(self): yield "m1", func1, 17, 3*5 yield "m2", func1, 42, 6*7 - """) + """ + ) gencol = modcol.collect()[0].collect()[0].collect()[0] assert isinstance(gencol, pytest.Generator) gencolitems = gencol.collect() @@ -306,12 +347,13 @@ class TestGenerator(object): assert isinstance(gencolitems[0], pytest.Function) assert isinstance(gencolitems[1], pytest.Function) assert gencolitems[0].name == "['m1']" - assert gencolitems[0].obj.__name__ == 'func1' + assert gencolitems[0].obj.__name__ == "func1" assert gencolitems[1].name == "['m2']" - assert gencolitems[1].obj.__name__ == 'func1' + assert gencolitems[1].obj.__name__ == "func1" def test_order_of_execution_generator_same_codeline(self, testdir, tmpdir): - o = testdir.makepyfile(""" + o = testdir.makepyfile( + """ from __future__ import print_function def test_generative_order_of_execution(): import py, pytest @@ -329,14 +371,16 @@ class TestGenerator(object): for i in expected_list: yield list_append, i yield assert_order_of_execution - """) + """ + ) reprec = testdir.inline_run(o) passed, skipped, failed = reprec.countoutcomes() assert passed == 7 assert not skipped and not failed def test_order_of_execution_generator_different_codeline(self, testdir): - o = testdir.makepyfile(""" + o = testdir.makepyfile( + """ from __future__ import print_function def test_generative_tests_different_codeline(): import py, pytest @@ -361,7 +405,8 @@ class TestGenerator(object): yield list_append_1 yield list_append_2 yield assert_order_of_execution - """) + """ + ) reprec = testdir.inline_run(o) passed, skipped, failed = reprec.countoutcomes() assert passed == 4 @@ -374,7 +419,8 @@ class TestGenerator(object): # that the old 1.3.4 behaviour is preserved such that all # yielded functions all share the same "self" instance that # has been used during collection. - o = testdir.makepyfile(""" + o = testdir.makepyfile( + """ setuplist = [] class TestClass(object): def setup_method(self, func): @@ -403,33 +449,38 @@ class TestGenerator(object): assert len(setuplist) == 3, len(setuplist) assert setuplist[0] == setuplist[2], setuplist assert setuplist[1] != setuplist[2], setuplist - """) - reprec = testdir.inline_run(o, '-v') + """ + ) + reprec = testdir.inline_run(o, "-v") passed, skipped, failed = reprec.countoutcomes() assert passed == 4 assert not skipped and not failed class TestFunction(object): + def test_getmodulecollector(self, testdir): item = testdir.getitem("def test_func(): pass") modcol = item.getparent(pytest.Module) assert isinstance(modcol, pytest.Module) - assert hasattr(modcol.obj, 'test_func') + assert hasattr(modcol.obj, "test_func") def test_function_as_object_instance_ignored(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ class A(object): def __call__(self, tmpdir): 0/0 test_a = A() - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome() def test_function_equality(self, testdir, tmpdir): from _pytest.fixtures import FixtureManager + config = testdir.parseconfigure() session = testdir.Session(config) session._fixturemanager = FixtureManager(session) @@ -440,35 +491,40 @@ class TestFunction(object): def func2(): pass - f1 = pytest.Function(name="name", parent=session, config=config, - args=(1,), callobj=func1) + f1 = pytest.Function( + name="name", parent=session, config=config, args=(1,), callobj=func1 + ) assert f1 == f1 - f2 = pytest.Function(name="name", config=config, - callobj=func2, parent=session) + f2 = pytest.Function(name="name", config=config, callobj=func2, parent=session) assert f1 != f2 def test_issue197_parametrize_emptyset(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.parametrize('arg', []) def test_function(arg): pass - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(skipped=1) def test_single_tuple_unwraps_values(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.parametrize(('arg',), [(1,)]) def test_function(arg): assert arg == 1 - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_issue213_parametrize_value_no_equal(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest class A(object): def __eq__(self, other): @@ -476,13 +532,15 @@ class TestFunction(object): @pytest.mark.parametrize('arg', [A()]) def test_function(arg): assert arg.__class__.__name__ == "A" - """) + """ + ) reprec = testdir.inline_run("--fulltrace") reprec.assertoutcome(passed=1) def test_parametrize_with_non_hashable_values(self, testdir): """Test parametrization with non-hashable values.""" - testdir.makepyfile(""" + testdir.makepyfile( + """ archival_mapping = { '1.0': {'tag': '1.0'}, '1.2.2a1': {'tag': 'release-1.2.2a1'}, @@ -494,13 +552,15 @@ class TestFunction(object): def test_archival_to_version(key, value): assert key in archival_mapping assert value == archival_mapping[key] - """) + """ + ) rec = testdir.inline_run() rec.assertoutcome(passed=2) def test_parametrize_with_non_hashable_values_indirect(self, testdir): """Test parametrization with non-hashable values with indirect parametrization.""" - testdir.makepyfile(""" + testdir.makepyfile( + """ archival_mapping = { '1.0': {'tag': '1.0'}, '1.2.2a1': {'tag': 'release-1.2.2a1'}, @@ -521,13 +581,15 @@ class TestFunction(object): def test_archival_to_version(key, value): assert key in archival_mapping assert value == archival_mapping[key] - """) + """ + ) rec = testdir.inline_run() rec.assertoutcome(passed=2) def test_parametrize_overrides_fixture(self, testdir): """Test parametrization when parameter overrides existing fixture with same name.""" - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture @@ -548,13 +610,15 @@ class TestFunction(object): def test_overridden_via_multiparam(other, value): assert other == 'foo' assert value == 'overridden' - """) + """ + ) rec = testdir.inline_run() rec.assertoutcome(passed=3) def test_parametrize_overrides_parametrized_fixture(self, testdir): """Test parametrization when parameter overrides existing parametrized fixture with same name.""" - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(params=[1, 2]) @@ -565,13 +629,15 @@ class TestFunction(object): ['overridden']) def test_overridden_via_param(value): assert value == 'overridden' - """) + """ + ) rec = testdir.inline_run() rec.assertoutcome(passed=1) @ignore_parametrized_marks def test_parametrize_with_mark(self, testdir): - items = testdir.getitems(""" + items = testdir.getitems( + """ import pytest @pytest.mark.foo @pytest.mark.parametrize('arg', [ @@ -580,18 +646,25 @@ class TestFunction(object): ]) def test_function(arg): pass - """) + """ + ) keywords = [item.keywords for item in items] - assert 'foo' in keywords[0] and 'bar' not in keywords[0] and 'baz' not in keywords[0] - assert 'foo' in keywords[1] and 'bar' in keywords[1] and 'baz' in keywords[1] + assert ( + "foo" in keywords[0] + and "bar" not in keywords[0] + and "baz" not in keywords[0] + ) + assert "foo" in keywords[1] and "bar" in keywords[1] and "baz" in keywords[1] def test_function_equality_with_callspec(self, testdir, tmpdir): - items = testdir.getitems(""" + items = testdir.getitems( + """ import pytest @pytest.mark.parametrize('arg', [1,2]) def test_function(arg): pass - """) + """ + ) assert items[0] != items[1] assert not (items[0] == items[1]) @@ -600,10 +673,12 @@ class TestFunction(object): config = item.config class MyPlugin1(object): + def pytest_pyfunc_call(self, pyfuncitem): raise ValueError class MyPlugin2(object): + def pytest_pyfunc_call(self, pyfuncitem): return True @@ -613,21 +688,24 @@ class TestFunction(object): config.hook.pytest_pyfunc_call(pyfuncitem=item) def test_multiple_parametrize(self, testdir): - modcol = testdir.getmodulecol(""" + modcol = testdir.getmodulecol( + """ import pytest @pytest.mark.parametrize('x', [0, 1]) @pytest.mark.parametrize('y', [2, 3]) def test1(x, y): pass - """) + """ + ) colitems = modcol.collect() - assert colitems[0].name == 'test1[2-0]' - assert colitems[1].name == 'test1[2-1]' - assert colitems[2].name == 'test1[3-0]' - assert colitems[3].name == 'test1[3-1]' + assert colitems[0].name == "test1[2-0]" + assert colitems[1].name == "test1[2-1]" + assert colitems[2].name == "test1[3-0]" + assert colitems[3].name == "test1[3-1]" def test_issue751_multiple_parametrize_with_ids(self, testdir): - modcol = testdir.getmodulecol(""" + modcol = testdir.getmodulecol( + """ import pytest @pytest.mark.parametrize('x', [0], ids=['c']) @pytest.mark.parametrize('y', [0, 1], ids=['a', 'b']) @@ -636,16 +714,18 @@ class TestFunction(object): pass def test2(self, x, y): pass - """) + """ + ) colitems = modcol.collect()[0].collect()[0].collect() - assert colitems[0].name == 'test1[a-c]' - assert colitems[1].name == 'test1[b-c]' - assert colitems[2].name == 'test2[a-c]' - assert colitems[3].name == 'test2[b-c]' + assert colitems[0].name == "test1[a-c]" + assert colitems[1].name == "test1[b-c]" + assert colitems[2].name == "test2[a-c]" + assert colitems[3].name == "test2[b-c]" @ignore_parametrized_marks def test_parametrize_skipif(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest m = pytest.mark.skipif('True') @@ -653,13 +733,15 @@ class TestFunction(object): @pytest.mark.parametrize('x', [0, 1, m(2)]) def test_skip_if(x): assert x < 2 - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines('* 2 passed, 1 skipped in *') + result.stdout.fnmatch_lines("* 2 passed, 1 skipped in *") @ignore_parametrized_marks def test_parametrize_skip(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest m = pytest.mark.skip('') @@ -667,13 +749,15 @@ class TestFunction(object): @pytest.mark.parametrize('x', [0, 1, m(2)]) def test_skip(x): assert x < 2 - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines('* 2 passed, 1 skipped in *') + result.stdout.fnmatch_lines("* 2 passed, 1 skipped in *") @ignore_parametrized_marks def test_parametrize_skipif_no_skip(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest m = pytest.mark.skipif('False') @@ -681,13 +765,15 @@ class TestFunction(object): @pytest.mark.parametrize('x', [0, 1, m(2)]) def test_skipif_no_skip(x): assert x < 2 - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines('* 1 failed, 2 passed in *') + result.stdout.fnmatch_lines("* 1 failed, 2 passed in *") @ignore_parametrized_marks def test_parametrize_xfail(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest m = pytest.mark.xfail('True') @@ -695,13 +781,15 @@ class TestFunction(object): @pytest.mark.parametrize('x', [0, 1, m(2)]) def test_xfail(x): assert x < 2 - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines('* 2 passed, 1 xfailed in *') + result.stdout.fnmatch_lines("* 2 passed, 1 xfailed in *") @ignore_parametrized_marks def test_parametrize_passed(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest m = pytest.mark.xfail('True') @@ -709,13 +797,15 @@ class TestFunction(object): @pytest.mark.parametrize('x', [0, 1, m(2)]) def test_xfail(x): pass - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines('* 2 passed, 1 xpassed in *') + result.stdout.fnmatch_lines("* 2 passed, 1 xpassed in *") @ignore_parametrized_marks def test_parametrize_xfail_passed(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest m = pytest.mark.xfail('False') @@ -723,26 +813,32 @@ class TestFunction(object): @pytest.mark.parametrize('x', [0, 1, m(2)]) def test_passed(x): pass - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines('* 3 passed in *') + result.stdout.fnmatch_lines("* 3 passed in *") def test_function_original_name(self, testdir): - items = testdir.getitems(""" + items = testdir.getitems( + """ import pytest @pytest.mark.parametrize('arg', [1,2]) def test_func(arg): pass - """) - assert [x.originalname for x in items] == ['test_func', 'test_func'] + """ + ) + assert [x.originalname for x in items] == ["test_func", "test_func"] class TestSorting(object): + def test_check_equality(self, testdir): - modcol = testdir.getmodulecol(""" + modcol = testdir.getmodulecol( + """ def test_pass(): pass def test_fail(): assert 0 - """) + """ + ) fn1 = testdir.collect_by_name(modcol, "test_pass") assert isinstance(fn1, pytest.Function) fn2 = testdir.collect_by_name(modcol, "test_pass") @@ -751,7 +847,7 @@ class TestSorting(object): assert fn1 == fn2 assert fn1 != modcol if sys.version_info < (3, 0): - assert cmp(fn1, fn2) == 0 + assert cmp(fn1, fn2) == 0 # NOQA assert hash(fn1) == hash(fn2) fn3 = testdir.collect_by_name(modcol, "test_fail") @@ -767,7 +863,8 @@ class TestSorting(object): assert modcol != fn def test_allow_sane_sorting_for_decorators(self, testdir): - modcol = testdir.getmodulecol(""" + modcol = testdir.getmodulecol( + """ def dec(f): g = lambda: f(2) g.place_as = f @@ -781,50 +878,61 @@ class TestSorting(object): def test_a(y): pass test_a = dec(test_a) - """) + """ + ) colitems = modcol.collect() assert len(colitems) == 2 - assert [item.name for item in colitems] == ['test_b', 'test_a'] + assert [item.name for item in colitems] == ["test_b", "test_a"] class TestConftestCustomization(object): + def test_pytest_pycollect_module(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest class MyModule(pytest.Module): pass def pytest_pycollect_makemodule(path, parent): if path.basename == "test_xyz.py": return MyModule(path, parent) - """) + """ + ) testdir.makepyfile("def test_some(): pass") testdir.makepyfile(test_xyz="def test_func(): pass") result = testdir.runpytest("--collect-only") - result.stdout.fnmatch_lines([ - "* 3 def test_traceback_error_during_import(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ x = 1 x = 2 x = 17 asd - """) + """ + ) result = testdir.runpytest() assert result.ret != 0 out = result.stdout.str() assert "x = 1" not in out assert "x = 2" not in out - result.stdout.fnmatch_lines([ - " *asd*", - "E*NameError*", - ]) + result.stdout.fnmatch_lines([" *asd*", "E*NameError*"]) result = testdir.runpytest("--fulltrace") out = result.stdout.str() assert "x = 1" in out assert "x = 2" in out - result.stdout.fnmatch_lines([ - ">*asd*", - "E*NameError*", - ]) + result.stdout.fnmatch_lines([">*asd*", "E*NameError*"]) def test_traceback_filter_error_during_fixture_collection(self, testdir): """integration test for issue #995. """ - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def fail_me(func): @@ -1005,15 +1133,13 @@ class TestTracebackCutting(object): def test_failing_fixture(fail_fixture): pass - """) + """ + ) result = testdir.runpytest() assert result.ret != 0 out = result.stdout.str() assert "INTERNALERROR>" not in out - result.stdout.fnmatch_lines([ - "*ValueError: fail me*", - "* 1 error in *", - ]) + result.stdout.fnmatch_lines(["*ValueError: fail me*", "* 1 error in *"]) def test_filter_traceback_generated_code(self): """test that filter_traceback() works with the fact that @@ -1024,10 +1150,11 @@ class TestTracebackCutting(object): This fixes #995. """ from _pytest.python import filter_traceback + try: ns = {} - exec('def foo(): raise ValueError', ns) - ns['foo']() + exec("def foo(): raise ValueError", ns) + ns["foo"]() except ValueError: _, _, tb = sys.exc_info() @@ -1042,26 +1169,32 @@ class TestTracebackCutting(object): This fixes #1133. """ from _pytest.python import filter_traceback + testdir.syspathinsert() - testdir.makepyfile(filter_traceback_entry_as_str=''' + testdir.makepyfile( + filter_traceback_entry_as_str=""" def foo(): raise ValueError - ''') + """ + ) try: import filter_traceback_entry_as_str + filter_traceback_entry_as_str.foo() except ValueError: _, _, tb = sys.exc_info() - testdir.tmpdir.join('filter_traceback_entry_as_str.py').remove() + testdir.tmpdir.join("filter_traceback_entry_as_str.py").remove() tb = _pytest._code.Traceback(tb) assert isinstance(tb[-1].path, str) assert filter_traceback(tb[-1]) class TestReportInfo(object): + def test_itemreport_reportinfo(self, testdir, linecomp): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest class MyFunction(pytest.Function): def reportinfo(self): @@ -1069,7 +1202,8 @@ class TestReportInfo(object): def pytest_pycollect_makeitem(collector, name, obj): if name == "test_func": return MyFunction(name, parent=collector) - """) + """ + ) item = testdir.getitem("def test_func(): pass") item.config.pluginmanager.getplugin("runner") assert item.location == ("ABCDE", 42, "custom") @@ -1082,11 +1216,13 @@ class TestReportInfo(object): assert modpath == "test_func" def test_class_reportinfo(self, testdir): - modcol = testdir.getmodulecol(""" + modcol = testdir.getmodulecol( + """ # lineno 0 class TestClass(object): def test_hello(self): pass - """) + """ + ) classcol = testdir.collect_by_name(modcol, "TestClass") fspath, lineno, msg = classcol.reportinfo() assert fspath == modcol.fspath @@ -1094,13 +1230,15 @@ class TestReportInfo(object): assert msg == "TestClass" def test_generator_reportinfo(self, testdir): - modcol = testdir.getmodulecol(""" + modcol = testdir.getmodulecol( + """ # lineno 0 def test_gen(): def check(x): assert x yield check, 3 - """) + """ + ) gencol = testdir.collect_by_name(modcol, "test_gen") fspath, lineno, modpath = gencol.reportinfo() assert fspath == modcol.fspath @@ -1126,7 +1264,8 @@ class TestReportInfo(object): def test_reportinfo_with_nasty_getattr(self, testdir): # https://github.com/pytest-dev/pytest/issues/1204 - modcol = testdir.getmodulecol(""" + modcol = testdir.getmodulecol( + """ # lineno 0 class TestClass(object): def __getattr__(self, name): @@ -1134,85 +1273,88 @@ class TestReportInfo(object): def test_foo(self): pass - """) + """ + ) classcol = testdir.collect_by_name(modcol, "TestClass") instance = classcol.collect()[0] fspath, lineno, msg = instance.reportinfo() def test_customized_python_discovery(testdir): - testdir.makeini(""" + testdir.makeini( + """ [pytest] python_files=check_*.py python_classes=Check python_functions=check - """) - p = testdir.makepyfile(""" + """ + ) + p = testdir.makepyfile( + """ def check_simple(): pass class CheckMyApp(object): def check_meth(self): pass - """) + """ + ) p2 = p.new(basename=p.basename.replace("test", "check")) p.move(p2) result = testdir.runpytest("--collect-only", "-s") - result.stdout.fnmatch_lines([ - "*check_customized*", - "*check_simple*", - "*CheckMyApp*", - "*check_meth*", - ]) + result.stdout.fnmatch_lines( + ["*check_customized*", "*check_simple*", "*CheckMyApp*", "*check_meth*"] + ) result = testdir.runpytest() assert result.ret == 0 - result.stdout.fnmatch_lines([ - "*2 passed*", - ]) + result.stdout.fnmatch_lines(["*2 passed*"]) def test_customized_python_discovery_functions(testdir): - testdir.makeini(""" + testdir.makeini( + """ [pytest] python_functions=_test - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ def _test_underscore(): pass - """) + """ + ) result = testdir.runpytest("--collect-only", "-s") - result.stdout.fnmatch_lines([ - "*_test_underscore*", - ]) + result.stdout.fnmatch_lines(["*_test_underscore*"]) result = testdir.runpytest() assert result.ret == 0 - result.stdout.fnmatch_lines([ - "*1 passed*", - ]) + result.stdout.fnmatch_lines(["*1 passed*"]) def test_collector_attributes(testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest def pytest_pycollect_makeitem(collector): assert collector.Function == pytest.Function assert collector.Class == pytest.Class assert collector.Instance == pytest.Instance assert collector.Module == pytest.Module - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ def test_hello(): pass - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*1 passed*", - ]) + result.stdout.fnmatch_lines(["*1 passed*"]) def test_customize_through_attributes(testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest class MyFunction(pytest.Function): pass @@ -1224,22 +1366,24 @@ def test_customize_through_attributes(testdir): def pytest_pycollect_makeitem(collector, name, obj): if name.startswith("MyTestClass"): return MyClass(name, parent=collector) - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ class MyTestClass(object): def test_hello(self): pass - """) + """ + ) result = testdir.runpytest("--collect-only") - result.stdout.fnmatch_lines([ - "*MyClass*", - "*MyInstance*", - "*MyFunction*test_hello*", - ]) + result.stdout.fnmatch_lines( + ["*MyClass*", "*MyInstance*", "*MyFunction*test_hello*"] + ) def test_unorderable_types(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ class TestJoinEmpty(object): pass @@ -1249,7 +1393,8 @@ def test_unorderable_types(testdir): Test.__name__ = "TestFoo" return Test TestFoo = make_test() - """) + """ + ) result = testdir.runpytest() assert "TypeError" not in result.stdout.str() assert result.ret == EXIT_NOTESTSCOLLECTED @@ -1260,7 +1405,8 @@ def test_collect_functools_partial(testdir): Test that collection of functools.partial object works, and arguments to the wrapped functions are dealt correctly (see #811). """ - testdir.makepyfile(""" + testdir.makepyfile( + """ import functools import pytest @@ -1294,7 +1440,8 @@ def test_collect_functools_partial(testdir): test_fail_1 = functools.partial(check2, 2) test_fail_2 = functools.partial(check3, 2) - """) + """ + ) result = testdir.inline_run() result.assertoutcome(passed=6, failed=2) @@ -1305,7 +1452,8 @@ def test_dont_collect_non_function_callable(testdir): In this case an INTERNALERROR occurred trying to report the failure of a test like this one because py test failed to get the source lines. """ - testdir.makepyfile(""" + testdir.makepyfile( + """ class Oh(object): def __call__(self): pass @@ -1314,13 +1462,16 @@ def test_dont_collect_non_function_callable(testdir): def test_real(): pass - """) - result = testdir.runpytest('-rw') - result.stdout.fnmatch_lines([ - '*collected 1 item*', - "*cannot collect 'test_a' because it is not a function*", - '*1 passed, 1 warnings in *', - ]) + """ + ) + result = testdir.runpytest("-rw") + result.stdout.fnmatch_lines( + [ + "*collected 1 item*", + "*cannot collect 'test_a' because it is not a function*", + "*1 passed, 1 warnings in *", + ] + ) def test_class_injection_does_not_break_collection(testdir): @@ -1330,36 +1481,38 @@ def test_class_injection_does_not_break_collection(testdir): is modified during collection time, and the original method list is still used for collection. """ - testdir.makeconftest(""" + testdir.makeconftest( + """ from test_inject import TestClass def pytest_generate_tests(metafunc): TestClass.changed_var = {} - """) - testdir.makepyfile(test_inject=''' + """ + ) + testdir.makepyfile( + test_inject=''' class TestClass(object): def test_injection(self): """Test being parametrized.""" pass - ''') + ''' + ) result = testdir.runpytest() assert "RuntimeError: dictionary changed size during iteration" not in result.stdout.str() - result.stdout.fnmatch_lines(['*1 passed*']) + result.stdout.fnmatch_lines(["*1 passed*"]) def test_syntax_error_with_non_ascii_chars(testdir): """Fix decoding issue while formatting SyntaxErrors during collection (#578) """ - testdir.makepyfile(u""" + testdir.makepyfile( + u""" # -*- coding: UTF-8 -*- ☃ - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - '*ERROR collecting*', - '*SyntaxError*', - '*1 error in*', - ]) + result.stdout.fnmatch_lines(["*ERROR collecting*", "*SyntaxError*", "*1 error in*"]) def test_skip_duplicates_by_default(testdir): @@ -1369,15 +1522,17 @@ def test_skip_duplicates_by_default(testdir): """ a = testdir.mkdir("a") fh = a.join("test_a.py") - fh.write(_pytest._code.Source(""" + fh.write( + _pytest._code.Source( + """ import pytest def test_real(): pass - """)) + """ + ) + ) result = testdir.runpytest(a.strpath, a.strpath) - result.stdout.fnmatch_lines([ - '*collected 1 item*', - ]) + result.stdout.fnmatch_lines(["*collected 1 item*"]) def test_keep_duplicates(testdir): @@ -1387,12 +1542,14 @@ def test_keep_duplicates(testdir): """ a = testdir.mkdir("a") fh = a.join("test_a.py") - fh.write(_pytest._code.Source(""" + fh.write( + _pytest._code.Source( + """ import pytest def test_real(): pass - """)) + """ + ) + ) result = testdir.runpytest("--keep-duplicates", a.strpath, a.strpath) - result.stdout.fnmatch_lines([ - '*collected 2 item*', - ]) + result.stdout.fnmatch_lines(["*collected 2 item*"]) diff --git a/testing/python/fixture.py b/testing/python/fixture.py index d9f08a3f0..023a40d84 100644 --- a/testing/python/fixture.py +++ b/testing/python/fixture.py @@ -8,23 +8,29 @@ from _pytest import fixtures def test_getfuncargnames(): + def f(): pass + assert not fixtures.getfuncargnames(f) def g(arg): pass - assert fixtures.getfuncargnames(g) == ('arg',) + + assert fixtures.getfuncargnames(g) == ("arg",) def h(arg1, arg2="hello"): pass - assert fixtures.getfuncargnames(h) == ('arg1',) + + assert fixtures.getfuncargnames(h) == ("arg1",) def h(arg1, arg2, arg3="hello"): pass - assert fixtures.getfuncargnames(h) == ('arg1', 'arg2') + + assert fixtures.getfuncargnames(h) == ("arg1", "arg2") class A(object): + def f(self, arg1, arg2="hello"): pass @@ -32,17 +38,19 @@ def test_getfuncargnames(): def static(arg1, arg2): pass - assert fixtures.getfuncargnames(A().f) == ('arg1',) - assert fixtures.getfuncargnames(A.static, cls=A) == ('arg1', 'arg2') + assert fixtures.getfuncargnames(A().f) == ("arg1",) + assert fixtures.getfuncargnames(A.static, cls=A) == ("arg1", "arg2") class TestFillFixtures(object): + def test_fillfuncargs_exposed(self): # used by oejskit, kept for compatibility assert pytest._fillfuncargs == fixtures.fillfixtures def test_funcarg_lookupfails(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture @@ -51,17 +59,17 @@ class TestFillFixtures(object): def test_func(some): pass - """) + """ + ) result = testdir.runpytest() # "--collect-only") assert result.ret != 0 - result.stdout.fnmatch_lines([ - "*def test_func(some)*", - "*fixture*some*not found*", - "*xyzsomething*", - ]) + result.stdout.fnmatch_lines( + ["*def test_func(some)*", "*fixture*some*not found*", "*xyzsomething*"] + ) def test_funcarg_basic(self, testdir): - item = testdir.getitem(""" + item = testdir.getitem( + """ import pytest @pytest.fixture @@ -72,15 +80,17 @@ class TestFillFixtures(object): return 42 def test_func(some, other): pass - """) + """ + ) fixtures.fillfixtures(item) del item.funcargs["request"] assert len(get_public_names(item.funcargs)) == 2 - assert item.funcargs['some'] == "test_func" - assert item.funcargs['other'] == 42 + assert item.funcargs["some"] == "test_func" + assert item.funcargs["other"] == 42 def test_funcarg_lookup_modulelevel(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture @@ -92,12 +102,14 @@ class TestFillFixtures(object): assert something == "test_method" def test_func(something): assert something == "test_func" - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_funcarg_lookup_classlevel(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest class TestClass(object): @@ -107,27 +119,34 @@ class TestFillFixtures(object): def test_method(self, something): assert something is self - """) + """ + ) result = testdir.runpytest(p) - result.stdout.fnmatch_lines([ - "*1 passed*" - ]) + result.stdout.fnmatch_lines(["*1 passed*"]) def test_conftest_funcargs_only_available_in_subdir(self, testdir): sub1 = testdir.mkpydir("sub1") sub2 = testdir.mkpydir("sub2") - sub1.join("conftest.py").write(_pytest._code.Source(""" + sub1.join("conftest.py").write( + _pytest._code.Source( + """ import pytest @pytest.fixture def arg1(request): pytest.raises(Exception, "request.getfixturevalue('arg2')") - """)) - sub2.join("conftest.py").write(_pytest._code.Source(""" + """ + ) + ) + sub2.join("conftest.py").write( + _pytest._code.Source( + """ import pytest @pytest.fixture def arg2(request): pytest.raises(Exception, "request.getfixturevalue('arg1')") - """)) + """ + ) + ) sub1.join("test_in_sub1.py").write("def test_1(arg1): pass") sub2.join("test_in_sub2.py").write("def test_2(arg2): pass") @@ -135,7 +154,8 @@ class TestFillFixtures(object): result.assert_outcomes(passed=2) def test_extend_fixture_module_class(self, testdir): - testfile = testdir.makepyfile(""" + testfile = testdir.makepyfile( + """ import pytest @pytest.fixture @@ -150,21 +170,25 @@ class TestFillFixtures(object): def test_spam(self, spam): assert spam == 'spamspam' - """) + """ + ) result = testdir.runpytest() result.stdout.fnmatch_lines(["*1 passed*"]) result = testdir.runpytest(testfile) result.stdout.fnmatch_lines(["*1 passed*"]) def test_extend_fixture_conftest_module(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest @pytest.fixture def spam(): return 'spam' - """) - testfile = testdir.makepyfile(""" + """ + ) + testfile = testdir.makepyfile( + """ import pytest @pytest.fixture @@ -173,48 +197,62 @@ class TestFillFixtures(object): def test_spam(spam): assert spam == 'spamspam' - """) + """ + ) result = testdir.runpytest() result.stdout.fnmatch_lines(["*1 passed*"]) result = testdir.runpytest(testfile) result.stdout.fnmatch_lines(["*1 passed*"]) def test_extend_fixture_conftest_conftest(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest @pytest.fixture def spam(): return 'spam' - """) + """ + ) pkg = testdir.mkpydir("pkg") - pkg.join("conftest.py").write(_pytest._code.Source(""" + pkg.join("conftest.py").write( + _pytest._code.Source( + """ import pytest @pytest.fixture def spam(spam): return spam * 2 - """)) + """ + ) + ) testfile = pkg.join("test_spam.py") - testfile.write(_pytest._code.Source(""" + testfile.write( + _pytest._code.Source( + """ def test_spam(spam): assert spam == "spamspam" - """)) + """ + ) + ) result = testdir.runpytest() result.stdout.fnmatch_lines(["*1 passed*"]) result = testdir.runpytest(testfile) result.stdout.fnmatch_lines(["*1 passed*"]) def test_extend_fixture_conftest_plugin(self, testdir): - testdir.makepyfile(testplugin=""" + testdir.makepyfile( + testplugin=""" import pytest @pytest.fixture def foo(): return 7 - """) + """ + ) testdir.syspathinsert() - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest pytest_plugins = 'testplugin' @@ -222,50 +260,62 @@ class TestFillFixtures(object): @pytest.fixture def foo(foo): return foo + 7 - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ def test_foo(foo): assert foo == 14 - """) - result = testdir.runpytest('-s') + """ + ) + result = testdir.runpytest("-s") assert result.ret == 0 def test_extend_fixture_plugin_plugin(self, testdir): # Two plugins should extend each order in loading order - testdir.makepyfile(testplugin0=""" + testdir.makepyfile( + testplugin0=""" import pytest @pytest.fixture def foo(): return 7 - """) - testdir.makepyfile(testplugin1=""" + """ + ) + testdir.makepyfile( + testplugin1=""" import pytest @pytest.fixture def foo(foo): return foo + 7 - """) + """ + ) testdir.syspathinsert() - testdir.makepyfile(""" + testdir.makepyfile( + """ pytest_plugins = ['testplugin0', 'testplugin1'] def test_foo(foo): assert foo == 14 - """) + """ + ) result = testdir.runpytest() assert result.ret == 0 def test_override_parametrized_fixture_conftest_module(self, testdir): """Test override of the parametrized fixture with non-parametrized one on the test module level.""" - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest @pytest.fixture(params=[1, 2, 3]) def spam(request): return request.param - """) - testfile = testdir.makepyfile(""" + """ + ) + testfile = testdir.makepyfile( + """ import pytest @pytest.fixture @@ -274,7 +324,8 @@ class TestFillFixtures(object): def test_spam(spam): assert spam == 'spam' - """) + """ + ) result = testdir.runpytest() result.stdout.fnmatch_lines(["*1 passed*"]) result = testdir.runpytest(testfile) @@ -282,26 +333,36 @@ class TestFillFixtures(object): def test_override_parametrized_fixture_conftest_conftest(self, testdir): """Test override of the parametrized fixture with non-parametrized one on the conftest level.""" - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest @pytest.fixture(params=[1, 2, 3]) def spam(request): return request.param - """) - subdir = testdir.mkpydir('subdir') - subdir.join("conftest.py").write(_pytest._code.Source(""" + """ + ) + subdir = testdir.mkpydir("subdir") + subdir.join("conftest.py").write( + _pytest._code.Source( + """ import pytest @pytest.fixture def spam(): return 'spam' - """)) + """ + ) + ) testfile = subdir.join("test_spam.py") - testfile.write(_pytest._code.Source(""" + testfile.write( + _pytest._code.Source( + """ def test_spam(spam): assert spam == "spam" - """)) + """ + ) + ) result = testdir.runpytest() result.stdout.fnmatch_lines(["*1 passed*"]) result = testdir.runpytest(testfile) @@ -309,14 +370,17 @@ class TestFillFixtures(object): def test_override_non_parametrized_fixture_conftest_module(self, testdir): """Test override of the non-parametrized fixture with parametrized one on the test module level.""" - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest @pytest.fixture def spam(): return 'spam' - """) - testfile = testdir.makepyfile(""" + """ + ) + testfile = testdir.makepyfile( + """ import pytest @pytest.fixture(params=[1, 2, 3]) @@ -328,7 +392,8 @@ class TestFillFixtures(object): def test_spam(spam): assert spam == params['spam'] params['spam'] += 1 - """) + """ + ) result = testdir.runpytest() result.stdout.fnmatch_lines(["*3 passed*"]) result = testdir.runpytest(testfile) @@ -336,61 +401,83 @@ class TestFillFixtures(object): def test_override_non_parametrized_fixture_conftest_conftest(self, testdir): """Test override of the non-parametrized fixture with parametrized one on the conftest level.""" - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest @pytest.fixture def spam(): return 'spam' - """) - subdir = testdir.mkpydir('subdir') - subdir.join("conftest.py").write(_pytest._code.Source(""" + """ + ) + subdir = testdir.mkpydir("subdir") + subdir.join("conftest.py").write( + _pytest._code.Source( + """ import pytest @pytest.fixture(params=[1, 2, 3]) def spam(request): return request.param - """)) + """ + ) + ) testfile = subdir.join("test_spam.py") - testfile.write(_pytest._code.Source(""" + testfile.write( + _pytest._code.Source( + """ params = {'spam': 1} def test_spam(spam): assert spam == params['spam'] params['spam'] += 1 - """)) + """ + ) + ) result = testdir.runpytest() result.stdout.fnmatch_lines(["*3 passed*"]) result = testdir.runpytest(testfile) result.stdout.fnmatch_lines(["*3 passed*"]) - def test_override_autouse_fixture_with_parametrized_fixture_conftest_conftest(self, testdir): + def test_override_autouse_fixture_with_parametrized_fixture_conftest_conftest( + self, testdir + ): """Test override of the autouse fixture with parametrized one on the conftest level. This test covers the issue explained in issue 1601 """ - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest @pytest.fixture(autouse=True) def spam(): return 'spam' - """) - subdir = testdir.mkpydir('subdir') - subdir.join("conftest.py").write(_pytest._code.Source(""" + """ + ) + subdir = testdir.mkpydir("subdir") + subdir.join("conftest.py").write( + _pytest._code.Source( + """ import pytest @pytest.fixture(params=[1, 2, 3]) def spam(request): return request.param - """)) + """ + ) + ) testfile = subdir.join("test_spam.py") - testfile.write(_pytest._code.Source(""" + testfile.write( + _pytest._code.Source( + """ params = {'spam': 1} def test_spam(spam): assert spam == params['spam'] params['spam'] += 1 - """)) + """ + ) + ) result = testdir.runpytest() result.stdout.fnmatch_lines(["*3 passed*"]) result = testdir.runpytest(testfile) @@ -399,25 +486,30 @@ class TestFillFixtures(object): def test_autouse_fixture_plugin(self, testdir): # A fixture from a plugin has no baseid set, which screwed up # the autouse fixture handling. - testdir.makepyfile(testplugin=""" + testdir.makepyfile( + testplugin=""" import pytest @pytest.fixture(autouse=True) def foo(request): request.function.foo = 7 - """) + """ + ) testdir.syspathinsert() - testdir.makepyfile(""" + testdir.makepyfile( + """ pytest_plugins = 'testplugin' def test_foo(request): assert request.function.foo == 7 - """) + """ + ) result = testdir.runpytest() assert result.ret == 0 def test_funcarg_lookup_error(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest @pytest.fixture @@ -431,25 +523,31 @@ class TestFillFixtures(object): @pytest.fixture def d_fixture(): pass - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ def test_lookup_error(unknown): pass - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*ERROR at setup of test_lookup_error*", - " def test_lookup_error(unknown):*", - "E fixture 'unknown' not found", - "> available fixtures:*a_fixture,*b_fixture,*c_fixture,*d_fixture*monkeypatch,*", # sorted - "> use 'py*test --fixtures *' for help on them.", - "*1 error*", - ]) + result.stdout.fnmatch_lines( + [ + "*ERROR at setup of test_lookup_error*", + " def test_lookup_error(unknown):*", + "E fixture 'unknown' not found", + "> available fixtures:*a_fixture,*b_fixture,*c_fixture,*d_fixture*monkeypatch,*", # sorted + "> use 'py*test --fixtures *' for help on them.", + "*1 error*", + ] + ) assert "INTERNAL" not in result.stdout.str() def test_fixture_excinfo_leak(self, testdir): # on python2 sys.excinfo would leak into fixture executions - testdir.makepyfile(""" + testdir.makepyfile( + """ import sys import traceback import pytest @@ -465,31 +563,36 @@ class TestFillFixtures(object): if sys.exc_info()[0]: # python3 bug :) traceback.print_exc() assert sys.exc_info() == (None, None, None) - """) + """ + ) result = testdir.runpytest() assert result.ret == 0 class TestRequestBasic(object): + def test_request_attributes(self, testdir): - item = testdir.getitem(""" + item = testdir.getitem( + """ import pytest @pytest.fixture def something(request): pass def test_func(something): pass - """) + """ + ) req = fixtures.FixtureRequest(item) assert req.function == item.obj assert req.keywords == item.keywords - assert hasattr(req.module, 'test_func') + assert hasattr(req.module, "test_func") assert req.cls is None assert req.function.__name__ == "test_func" assert req.config == item.config assert repr(req).find(req.function.__name__) != -1 def test_request_attributes_method(self, testdir): - item, = testdir.getitems(""" + item, = testdir.getitems( + """ import pytest class TestB(object): @@ -498,13 +601,15 @@ class TestRequestBasic(object): return 1 def test_func(self, something): pass - """) + """ + ) req = item._request assert req.cls.__name__ == "TestB" assert req.instance.__class__ == req.cls def test_request_contains_funcarg_arg2fixturedefs(self, testdir): - modcol = testdir.getmodulecol(""" + modcol = testdir.getmodulecol( + """ import pytest @pytest.fixture def something(request): @@ -512,15 +617,17 @@ class TestRequestBasic(object): class TestClass(object): def test_method(self, something): pass - """) + """ + ) item1, = testdir.genitems([modcol]) assert item1.name == "test_method" arg2fixturedefs = fixtures.FixtureRequest(item1)._arg2fixturedefs assert len(arg2fixturedefs) == 1 - assert arg2fixturedefs['something'][0].argname == "something" + assert arg2fixturedefs["something"][0].argname == "something" def test_request_garbage(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import sys import pytest from _pytest.compat import safe_str @@ -551,19 +658,23 @@ class TestRequestBasic(object): def test_func(): pass - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_getfixturevalue_recursive(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest @pytest.fixture def something(request): return 1 - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ import pytest @pytest.fixture @@ -571,14 +682,15 @@ class TestRequestBasic(object): return request.getfixturevalue("something") + 1 def test_func(something): assert something == 2 - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) - @pytest.mark.parametrize( - 'getfixmethod', ('getfixturevalue', 'getfuncargvalue')) + @pytest.mark.parametrize("getfixmethod", ("getfixturevalue", "getfuncargvalue")) def test_getfixturevalue(self, testdir, getfixmethod): - item = testdir.getitem(""" + item = testdir.getitem( + """ import pytest values = [2] @pytest.fixture @@ -587,9 +699,11 @@ class TestRequestBasic(object): def other(request): return values.pop() def test_func(something): pass - """) + """ + ) import contextlib - if getfixmethod == 'getfuncargvalue': + + if getfixmethod == "getfuncargvalue": warning_expectation = pytest.warns(DeprecationWarning) else: # see #1830 for a cleaner way to accomplish this @@ -618,14 +732,16 @@ class TestRequestBasic(object): assert "request" in item.funcargs def test_request_addfinalizer(self, testdir): - item = testdir.getitem(""" + item = testdir.getitem( + """ import pytest teardownlist = [] @pytest.fixture def something(request): request.addfinalizer(lambda: teardownlist.append(1)) def test_func(something): pass - """) + """ + ) item.session._setupstate.prepare(item) pytest._fillfuncargs(item) # successively check finalization calls @@ -637,22 +753,27 @@ class TestRequestBasic(object): assert teardownlist == [1] def test_mark_as_fixture_with_prefix_and_decorator_fails(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest @pytest.fixture def pytest_funcarg__marked_with_prefix_and_decorator(): pass - """) + """ + ) result = testdir.runpytest_subprocess() assert result.ret != 0 - result.stdout.fnmatch_lines([ - "*AssertionError: fixtures cannot have*@pytest.fixture*", - "*pytest_funcarg__marked_with_prefix_and_decorator*" - ]) + result.stdout.fnmatch_lines( + [ + "*AssertionError: fixtures cannot have*@pytest.fixture*", + "*pytest_funcarg__marked_with_prefix_and_decorator*", + ] + ) def test_request_addfinalizer_failing_setup(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest values = [1] @pytest.fixture @@ -663,12 +784,14 @@ class TestRequestBasic(object): pass def test_finalizer_ran(): assert not values - """) + """ + ) reprec = testdir.inline_run("-s") reprec.assertoutcome(failed=1, passed=1) def test_request_addfinalizer_failing_setup_module(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest values = [1, 2] @pytest.fixture(scope="module") @@ -678,13 +801,15 @@ class TestRequestBasic(object): assert 0 def test_fix(myfix): pass - """) + """ + ) reprec = testdir.inline_run("-s") mod = reprec.getcalls("pytest_runtest_setup")[0].item.module assert not mod.values def test_request_addfinalizer_partial_setup_failure(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest values = [] @pytest.fixture @@ -694,18 +819,20 @@ class TestRequestBasic(object): pass def test_second(): assert len(values) == 1 - """) + """ + ) result = testdir.runpytest(p) - result.stdout.fnmatch_lines([ - "*1 error*" # XXX the whole module collection fails - ]) + result.stdout.fnmatch_lines( + ["*1 error*"] # XXX the whole module collection fails + ) def test_request_subrequest_addfinalizer_exceptions(self, testdir): """ Ensure exceptions raised during teardown by a finalizer are suppressed until all finalizers are called, re-raising the first exception (#2440) """ - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest values = [] def _excepts(where): @@ -726,12 +853,12 @@ class TestRequestBasic(object): pass def test_second(): assert values == [3, 2, 1] - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - '*Exception: Error in excepts fixture', - '* 2 passed, 1 error in *', - ]) + result.stdout.fnmatch_lines( + ["*Exception: Error in excepts fixture", "* 2 passed, 1 error in *"] + ) def test_request_getmodulepath(self, testdir): modcol = testdir.getmodulecol("def test_somefunc(): pass") @@ -740,7 +867,8 @@ class TestRequestBasic(object): assert req.fspath == modcol.fspath def test_request_fixturenames(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest from _pytest.pytester import get_public_names @pytest.fixture() @@ -756,12 +884,14 @@ class TestRequestBasic(object): assert set(get_public_names(request.fixturenames)) == \ set(["tmpdir", "sarg", "arg1", "request", "farg", "tmpdir_factory"]) - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_funcargnames_compatattr(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def pytest_generate_tests(metafunc): assert metafunc.funcargnames == metafunc.fixturenames @@ -773,12 +903,14 @@ class TestRequestBasic(object): def test_hello(fn): assert fn[0] == fn[1] - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_setupdecorator_and_xunit(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest values = [] @pytest.fixture(scope='module', autouse=True) @@ -803,47 +935,57 @@ class TestRequestBasic(object): def test_all(): assert values == ["module", "function", "class", "function", "method", "function"] - """) + """ + ) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=3) def test_fixtures_sub_subdir_normalize_sep(self, testdir): # this tests that normalization of nodeids takes place b = testdir.mkdir("tests").mkdir("unit") - b.join("conftest.py").write(_pytest._code.Source(""" + b.join("conftest.py").write( + _pytest._code.Source( + """ import pytest @pytest.fixture def arg1(): pass - """)) + """ + ) + ) p = b.join("test_module.py") p.write("def test_func(arg1): pass") result = testdir.runpytest(p, "--fixtures") assert result.ret == 0 - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *fixtures defined*conftest* *arg1* - """) + """ + ) def test_show_fixtures_color_yes(self, testdir): testdir.makepyfile("def test_this(): assert 1") - result = testdir.runpytest('--color=yes', '--fixtures') - assert '\x1b[32mtmpdir' in result.stdout.str() + result = testdir.runpytest("--color=yes", "--fixtures") + assert "\x1b[32mtmpdir" in result.stdout.str() def test_newstyle_with_request(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture() def arg(request): pass def test_1(arg): pass - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_setupcontext_no_param(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(params=[1,2]) def arg(request): @@ -854,14 +996,17 @@ class TestRequestBasic(object): assert not hasattr(request, "param") def test_1(arg): assert arg in (1,2) - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) class TestRequestMarking(object): + def test_applymarker(self, testdir): - item1, item2 = testdir.getitems(""" + item1, item2 = testdir.getitems( + """ import pytest @pytest.fixture @@ -872,18 +1017,20 @@ class TestRequestMarking(object): pass def test_func2(self, something): pass - """) + """ + ) req1 = fixtures.FixtureRequest(item1) - assert 'xfail' not in item1.keywords + assert "xfail" not in item1.keywords req1.applymarker(pytest.mark.xfail) - assert 'xfail' in item1.keywords - assert 'skipif' not in item1.keywords + assert "xfail" in item1.keywords + assert "skipif" not in item1.keywords req1.applymarker(pytest.mark.skipif) - assert 'skipif' in item1.keywords + assert "skipif" in item1.keywords pytest.raises(ValueError, "req1.applymarker(42)") def test_accesskeywords(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture() def keywords(request): @@ -892,12 +1039,14 @@ class TestRequestMarking(object): def test_function(keywords): assert keywords["XYZ"] assert "abc" not in keywords - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_accessmarker_dynamic(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest @pytest.fixture() def keywords(request): @@ -906,8 +1055,10 @@ class TestRequestMarking(object): @pytest.fixture(scope="class", autouse=True) def marking(request): request.applymarker(pytest.mark.XYZ("hello")) - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ import pytest def test_fun1(keywords): assert keywords["XYZ"] is not None @@ -915,14 +1066,17 @@ class TestRequestMarking(object): def test_fun2(keywords): assert keywords["XYZ"] is not None assert "abc" not in keywords - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) class TestRequestCachedSetup(object): + def test_request_cachedsetup_defaultmodule(self, testdir): - reprec = testdir.inline_runsource(""" + reprec = testdir.inline_runsource( + """ mysetup = ["hello",].pop import pytest @@ -936,11 +1090,13 @@ class TestRequestCachedSetup(object): class TestClass(object): def test_func1a(self, something): assert something == "hello" - """) + """ + ) reprec.assertoutcome(passed=2) def test_request_cachedsetup_class(self, testdir): - reprec = testdir.inline_runsource(""" + reprec = testdir.inline_runsource( + """ mysetup = ["hello", "hello2", "hello3"].pop import pytest @@ -956,7 +1112,8 @@ class TestRequestCachedSetup(object): assert something == "hello" def test_func2b(self, something): assert something == "hello" - """) + """ + ) reprec.assertoutcome(passed=4) def test_request_cachedsetup_extrakey(self, testdir): @@ -988,7 +1145,7 @@ class TestRequestCachedSetup(object): values.append("teardown") req1.cached_setup(setup, teardown, scope="function") - assert values == ['setup'] + assert values == ["setup"] # artificial call of finalizer setupstate = req1._pyfuncitem.session._setupstate setupstate._callfinalizers(item1) @@ -999,7 +1156,8 @@ class TestRequestCachedSetup(object): assert values == ["setup", "teardown", "setup", "teardown"] def test_request_cached_setup_two_args(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture @@ -1010,14 +1168,14 @@ class TestRequestCachedSetup(object): return request.cached_setup(lambda: 17) def test_two_different_setups(arg1, arg2): assert arg1 != arg2 - """) + """ + ) result = testdir.runpytest("-v") - result.stdout.fnmatch_lines([ - "*1 passed*" - ]) + result.stdout.fnmatch_lines(["*1 passed*"]) def test_request_cached_setup_getfixturevalue(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture @@ -1029,14 +1187,14 @@ class TestRequestCachedSetup(object): return request.cached_setup(lambda: 10) def test_two_funcarg(arg1): assert arg1 == 11 - """) + """ + ) result = testdir.runpytest("-v") - result.stdout.fnmatch_lines([ - "*1 passed*" - ]) + result.stdout.fnmatch_lines(["*1 passed*"]) def test_request_cached_setup_functional(self, testdir): - testdir.makepyfile(test_0=""" + testdir.makepyfile( + test_0=""" import pytest values = [] @pytest.fixture @@ -1053,19 +1211,21 @@ class TestRequestCachedSetup(object): assert something == [1] def test_list_twice(something): assert something == [1] - """) - testdir.makepyfile(test_1=""" + """ + ) + testdir.makepyfile( + test_1=""" import test_0 # should have run already def test_check_test0_has_teardown_correct(): assert test_0.values == [2] - """) + """ + ) result = testdir.runpytest("-v") - result.stdout.fnmatch_lines([ - "*3 passed*" - ]) + result.stdout.fnmatch_lines(["*3 passed*"]) def test_issue117_sessionscopeteardown(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture @@ -1077,18 +1237,18 @@ class TestRequestCachedSetup(object): return app def test_func(app): pass - """) + """ + ) result = testdir.runpytest() assert result.ret != 0 - result.stdout.fnmatch_lines([ - "*3/x*", - "*ZeroDivisionError*", - ]) + result.stdout.fnmatch_lines(["*3/x*", "*ZeroDivisionError*"]) class TestFixtureUsages(object): + def test_noargfixturedec(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture def arg1(): @@ -1096,12 +1256,14 @@ class TestFixtureUsages(object): def test_func(arg1): assert arg1 == 1 - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_receives_funcargs(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture() def arg1(): @@ -1116,12 +1278,14 @@ class TestFixtureUsages(object): def test_all(arg1, arg2): assert arg1 == 1 assert arg2 == 2 - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_receives_funcargs_scope_mismatch(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(scope="function") def arg1(): @@ -1133,17 +1297,21 @@ class TestFixtureUsages(object): def test_add(arg2): assert arg2 == 2 - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*ScopeMismatch*involved factories*", - "* def arg2*", - "* def arg1*", - "*1 error*" - ]) + result.stdout.fnmatch_lines( + [ + "*ScopeMismatch*involved factories*", + "* def arg2*", + "* def arg1*", + "*1 error*", + ] + ) def test_receives_funcargs_scope_mismatch_issue660(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(scope="function") def arg1(): @@ -1155,16 +1323,16 @@ class TestFixtureUsages(object): def test_add(arg1, arg2): assert arg2 == 2 - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*ScopeMismatch*involved factories*", - "* def arg2*", - "*1 error*" - ]) + result.stdout.fnmatch_lines( + ["*ScopeMismatch*involved factories*", "* def arg2*", "*1 error*"] + ) def test_invalid_scope(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(scope="functions") def badscope(): @@ -1172,15 +1340,19 @@ class TestFixtureUsages(object): def test_nothing(badscope): pass - """) + """ + ) result = testdir.runpytest_inprocess() result.stdout.fnmatch_lines( - ("*ValueError: fixture badscope from test_invalid_scope.py has an unsupported" - " scope value 'functions'") + ( + "*ValueError: fixture badscope from test_invalid_scope.py has an unsupported" + " scope value 'functions'" + ) ) def test_funcarg_parametrized_and_used_twice(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest values = [] @pytest.fixture(params=[1,2]) @@ -1195,14 +1367,14 @@ class TestFixtureUsages(object): def test_add(arg1, arg2): assert arg2 == arg1 + 1 assert len(values) == arg1 - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*2 passed*" - ]) + result.stdout.fnmatch_lines(["*2 passed*"]) def test_factory_uses_unknown_funcarg_as_dependency_error(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture() @@ -1215,31 +1387,37 @@ class TestFixtureUsages(object): def test_missing(call_fail): pass - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *pytest.fixture()* *def call_fail(fail)* *pytest.fixture()* *def fail* *fixture*'missing'*not found* - """) + """ + ) def test_factory_setup_as_classes_fails(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest class arg1(object): def __init__(self, request): self.x = 1 arg1 = pytest.fixture()(arg1) - """) + """ + ) reprec = testdir.inline_run() values = reprec.getfailedcollections() assert len(values) == 1 def test_request_can_be_overridden(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture() def request(request): @@ -1247,12 +1425,14 @@ class TestFixtureUsages(object): return request def test_request(request): assert request.a == 1 - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_usefixtures_marker(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest values = [] @@ -1270,41 +1450,51 @@ class TestFixtureUsages(object): assert self.hello == "world" assert len(values) == 1 pytest.mark.usefixtures("myfix")(TestClass) - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_usefixtures_ini(self, testdir): - testdir.makeini(""" + testdir.makeini( + """ [pytest] usefixtures = myfix - """) - testdir.makeconftest(""" + """ + ) + testdir.makeconftest( + """ import pytest @pytest.fixture(scope="class") def myfix(request): request.cls.hello = "world" - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ class TestClass(object): def test_one(self): assert self.hello == "world" def test_two(self): assert self.hello == "world" - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_usefixtures_seen_in_showmarkers(self, testdir): result = testdir.runpytest("--markers") - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *usefixtures(fixturename1*mark tests*fixtures* - """) + """ + ) def test_request_instance_issue203(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest class TestClass(object): @@ -1314,12 +1504,14 @@ class TestFixtureUsages(object): self.arg1 = 1 def test_hello(self, setup1): assert self.arg1 == 1 - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_fixture_parametrized_with_iterator(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest values = [] @@ -1339,7 +1531,8 @@ class TestFixtureUsages(object): values.append(arg) def test_2(arg2): values.append(arg2*10) - """) + """ + ) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=4) values = reprec.getcalls("pytest_runtest_call")[0].item.module.values @@ -1351,7 +1544,8 @@ class TestFixtureManagerParseFactories(object): @pytest.fixture def testdir(self, request): testdir = request.getfixturevalue("testdir") - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest @pytest.fixture @@ -1365,11 +1559,13 @@ class TestFixtureManagerParseFactories(object): @pytest.fixture def item(request): return request._pyfuncitem - """) + """ + ) return testdir def test_parsefactories_evil_objects_issue214(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ class A(object): def __call__(self): pass @@ -1378,24 +1574,28 @@ class TestFixtureManagerParseFactories(object): a = A() def test_hello(): pass - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1, failed=0) def test_parsefactories_conftest(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_hello(item, fm): for name in ("fm", "hello", "item"): faclist = fm.getfixturedefs(name, item.nodeid) assert len(faclist) == 1 fac = faclist[0] assert fac.func.__name__ == name - """) + """ + ) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=1) def test_parsefactories_conftest_and_module_and_class(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture @@ -1412,7 +1612,8 @@ class TestFixtureManagerParseFactories(object): assert faclist[0].func(item._request) == "conftest" assert faclist[1].func(item._request) == "module" assert faclist[2].func(item._request) == "class" - """) + """ + ) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=1) @@ -1421,28 +1622,44 @@ class TestFixtureManagerParseFactories(object): # https://mail.python.org/pipermail/pytest-dev/2014-September/002617.html runner = testdir.mkdir("runner") package = testdir.mkdir("package") - package.join("conftest.py").write(dedent("""\ + package.join("conftest.py").write( + dedent( + """\ import pytest @pytest.fixture def one(): return 1 - """)) - package.join("test_x.py").write(dedent("""\ + """ + ) + ) + package.join("test_x.py").write( + dedent( + """\ def test_x(one): assert one == 1 - """)) + """ + ) + ) sub = package.mkdir("sub") sub.join("__init__.py").ensure() - sub.join("conftest.py").write(dedent("""\ + sub.join("conftest.py").write( + dedent( + """\ import pytest @pytest.fixture def one(): return 2 - """)) - sub.join("test_y.py").write(dedent("""\ + """ + ) + ) + sub.join("test_y.py").write( + dedent( + """\ def test_x(one): assert one == 2 - """)) + """ + ) + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) with runner.as_cwd(): @@ -1454,7 +1671,8 @@ class TestAutouseDiscovery(object): @pytest.fixture def testdir(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest @pytest.fixture(autouse=True) def perfunction(request, tmpdir): @@ -1474,23 +1692,27 @@ class TestAutouseDiscovery(object): @pytest.fixture def item(request): return request._pyfuncitem - """) + """ + ) return testdir def test_parsefactories_conftest(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ from _pytest.pytester import get_public_names def test_check_setup(item, fm): autousenames = fm._getautousenames(item.nodeid) assert len(get_public_names(autousenames)) == 2 assert "perfunction2" in autousenames assert "perfunction" in autousenames - """) + """ + ) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=1) def test_two_classes_separated_autouse(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest class TestA(object): values = [] @@ -1506,12 +1728,14 @@ class TestAutouseDiscovery(object): self.values.append(1) def test_setup2(self): assert self.values == [1] - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_setup_at_classlevel(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest class TestClass(object): @pytest.fixture(autouse=True) @@ -1521,13 +1745,15 @@ class TestAutouseDiscovery(object): assert self.funcname == "test_method1" def test_method2(self): assert self.funcname == "test_method2" - """) + """ + ) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=2) @pytest.mark.xfail(reason="'enabled' feature not implemented") def test_setup_enabled_functionnode(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def enabled(parentnode, markers): @@ -1547,7 +1773,8 @@ class TestAutouseDiscovery(object): @pytest.mark.needsdb def test_func2(request): assert "db" in request.fixturenames - """) + """ + ) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=2) @@ -1556,7 +1783,8 @@ class TestAutouseDiscovery(object): an imported mock.call would break setup/factory discovery due to it being callable and __code__ not being a code object """ - testdir.makepyfile(""" + testdir.makepyfile( + """ class _call(tuple): def __call__(self, *k, **kw): pass @@ -1564,29 +1792,35 @@ class TestAutouseDiscovery(object): return self call = _call() - """) + """ + ) reprec = testdir.inline_run("-s") reprec.assertoutcome(failed=0, passed=0) def test_autouse_in_conftests(self, testdir): a = testdir.mkdir("a") b = testdir.mkdir("a1") - conftest = testdir.makeconftest(""" + conftest = testdir.makeconftest( + """ import pytest @pytest.fixture(autouse=True) def hello(): xxx - """) + """ + ) conftest.move(a.join(conftest.basename)) a.join("test_something.py").write("def test_func(): pass") b.join("test_otherthing.py").write("def test_func(): pass") result = testdir.runpytest() - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *1 passed*1 error* - """) + """ + ) def test_autouse_in_module_and_two_classes(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest values = [] @pytest.fixture(autouse=True) @@ -1604,32 +1838,43 @@ class TestAutouseDiscovery(object): class TestA2(object): def test_world(self): assert values == ["module", "module", "A", "module"], values - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=3) class TestAutouseManagement(object): + def test_autouse_conftest_mid_directory(self, testdir): pkgdir = testdir.mkpydir("xyz123") - pkgdir.join("conftest.py").write(_pytest._code.Source(""" + pkgdir.join("conftest.py").write( + _pytest._code.Source( + """ import pytest @pytest.fixture(autouse=True) def app(): import sys sys._myapp = "hello" - """)) + """ + ) + ) t = pkgdir.ensure("tests", "test_app.py") - t.write(_pytest._code.Source(""" + t.write( + _pytest._code.Source( + """ import sys def test_app(): assert sys._myapp == "hello" - """)) + """ + ) + ) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=1) def test_autouse_honored_for_yield(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(autouse=True) def tst(): @@ -1640,12 +1885,14 @@ class TestAutouseManagement(object): assert x == abs(hello) yield f, 3 yield f, -3 - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_funcarg_and_setup(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest values = [] @pytest.fixture(scope="module") @@ -1665,12 +1912,14 @@ class TestAutouseManagement(object): assert len(values) == 2 assert values == [1,2] assert arg == 0 - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_uses_parametrized_resource(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest values = [] @pytest.fixture(params=[1,2]) @@ -1689,12 +1938,14 @@ class TestAutouseManagement(object): else: 0/0 - """) + """ + ) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=2) def test_session_parametrized_function(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest values = [] @@ -1714,12 +1965,14 @@ class TestAutouseManagement(object): def test_result(arg): assert len(values) == arg assert values[:arg] == [1,2][:arg] - """) + """ + ) reprec = testdir.inline_run("-v", "-s") reprec.assertoutcome(passed=4) def test_class_function_parametrization_finalization(self, testdir): - p = testdir.makeconftest(""" + p = testdir.makeconftest( + """ import pytest import pprint @@ -1738,8 +1991,10 @@ class TestAutouseManagement(object): def fin(): values.append("fin_%s%s" % (carg, farg)) request.addfinalizer(fin) - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ import pytest class TestClass(object): @@ -1748,8 +2003,9 @@ class TestAutouseManagement(object): class TestClass2(object): def test_2(self): pass - """) - confcut = "--confcutdir={0}".format(testdir.tmpdir) + """ + ) + confcut = "--confcutdir={}".format(testdir.tmpdir) reprec = testdir.inline_run("-v", "-s", confcut) reprec.assertoutcome(passed=8) config = reprec.getcalls("pytest_unconfigure")[0].config @@ -1757,7 +2013,8 @@ class TestAutouseManagement(object): assert values == ["fin_a1", "fin_a2", "fin_b1", "fin_b2"] * 2 def test_scope_ordering(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest values = [] @pytest.fixture(scope="function", autouse=True) @@ -1773,12 +2030,14 @@ class TestAutouseManagement(object): class TestHallo(object): def test_method(self): assert values == [1,3,2] - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_parametrization_setup_teardown_ordering(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest values = [] def pytest_generate_tests(metafunc): @@ -1800,12 +2059,14 @@ class TestAutouseManagement(object): print (values) assert values == ["setup-1", "step1-1", "step2-1", "teardown-1", "setup-2", "step1-2", "step2-2", "teardown-2",] - """) - reprec = testdir.inline_run('-s') + """ + ) + reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=5) def test_ordering_autouse_before_explicit(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest values = [] @@ -1817,7 +2078,8 @@ class TestAutouseManagement(object): values.append(2) def test_hello(arg1): assert values == [1,2] - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) @@ -1825,7 +2087,8 @@ class TestAutouseManagement(object): @pytest.mark.parametrize("param1", ["", "params=[1]"], ids=["p00", "p01"]) @pytest.mark.parametrize("param2", ["", "params=[1]"], ids=["p10", "p11"]) def test_ordering_dependencies_torndown_first(self, testdir, param1, param2): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest values = [] @pytest.fixture(%(param1)s) @@ -1841,14 +2104,18 @@ class TestAutouseManagement(object): pass def test_check(): assert values == ["new1", "new2", "fin2", "fin1"] - """ % locals()) + """ + % locals() + ) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=2) class TestFixtureMarker(object): + def test_parametrize(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(params=["a", "b", "c"]) def arg(request): @@ -1858,12 +2125,14 @@ class TestFixtureMarker(object): values.append(arg) def test_result(): assert values == list("abc") - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=4) def test_multiple_parametrization_issue_736(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(params=[1,2,3]) @@ -1874,17 +2143,22 @@ class TestFixtureMarker(object): def test_issue(foo, foobar): assert foo in [1,2,3] assert foobar in [4,5,6] - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=9) - @pytest.mark.parametrize('param_args', ["'fixt, val'", "'fixt,val'", "['fixt', 'val']", "('fixt', 'val')"]) + @pytest.mark.parametrize( + "param_args", + ["'fixt, val'", "'fixt,val'", "['fixt', 'val']", "('fixt', 'val')"], + ) def test_override_parametrized_fixture_issue_979(self, testdir, param_args): """Make sure a parametrized argument can override a parametrized fixture. This was a regression introduced in the fix for #736. """ - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(params=[1, 2]) @@ -1894,12 +2168,15 @@ class TestFixtureMarker(object): @pytest.mark.parametrize(%s, [(3, 'x'), (4, 'x')]) def test_foo(fixt, val): pass - """ % param_args) + """ + % param_args + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_scope_session(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest values = [] @pytest.fixture(scope="module") @@ -1916,12 +2193,14 @@ class TestFixtureMarker(object): def test3(self, arg): assert arg == 1 assert len(values) == 1 - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=3) def test_scope_session_exc(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest values = [] @pytest.fixture(scope="session") @@ -1935,12 +2214,14 @@ class TestFixtureMarker(object): pass def test_last(): assert values == [1] - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(skipped=2, passed=1) def test_scope_session_exc_two_fix(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest values = [] m = [] @@ -1959,7 +2240,8 @@ class TestFixtureMarker(object): def test_last(): assert values == [1] assert m == [] - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(skipped=2, passed=1) @@ -1987,13 +2269,14 @@ class TestFixtureMarker(object): test_real=""" def test_last(req_list): assert req_list == [1] - """ + """, ) reprec = testdir.inline_run() reprec.assertoutcome(skipped=2, passed=1) def test_scope_module_uses_session(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest values = [] @pytest.fixture(scope="module") @@ -2010,12 +2293,14 @@ class TestFixtureMarker(object): def test3(self, arg): assert arg == 1 assert len(values) == 1 - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=3) def test_scope_module_and_finalizer(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest finalized_list = [] created_list = [] @@ -2030,7 +2315,8 @@ class TestFixtureMarker(object): @pytest.fixture def finalized(request): return len(finalized_list) - """) + """ + ) testdir.makepyfile( test_mod1=""" def test_1(arg, created, finalized): @@ -2047,23 +2333,30 @@ class TestFixtureMarker(object): def test_4(arg, created, finalized): assert created == 3 assert finalized == 2 - """) + """, + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=4) - @pytest.mark.parametrize("method", [ - 'request.getfixturevalue("arg")', - 'request.cached_setup(lambda: None, scope="function")', - ], ids=["getfixturevalue", "cached_setup"]) + @pytest.mark.parametrize( + "method", + [ + 'request.getfixturevalue("arg")', + 'request.cached_setup(lambda: None, scope="function")', + ], + ids=["getfixturevalue", "cached_setup"], + ) def test_scope_mismatch_various(self, testdir, method): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest finalized = [] created = [] @pytest.fixture(scope="function") def arg(request): pass - """) + """ + ) testdir.makepyfile( test_mod1=""" import pytest @@ -2072,20 +2365,24 @@ class TestFixtureMarker(object): %s def test_1(arg): pass - """ % method) + """ + % method + ) result = testdir.runpytest() assert result.ret != 0 - result.stdout.fnmatch_lines([ - "*ScopeMismatch*You tried*function*session*request*", - ]) + result.stdout.fnmatch_lines( + ["*ScopeMismatch*You tried*function*session*request*"] + ) def test_register_only_with_mark(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest @pytest.fixture() def arg(): return 1 - """) + """ + ) testdir.makepyfile( test_mod1=""" import pytest @@ -2094,12 +2391,14 @@ class TestFixtureMarker(object): return arg + 1 def test_1(arg): assert arg == 2 - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_parametrize_and_scope(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(scope="module", params=["a", "b", "c"]) def arg(request): @@ -2107,7 +2406,8 @@ class TestFixtureMarker(object): values = [] def test_param(arg): values.append(arg) - """) + """ + ) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=3) values = reprec.getcalls("pytest_runtest_call")[0].item.module.values @@ -2117,28 +2417,30 @@ class TestFixtureMarker(object): assert "c" in values def test_scope_mismatch(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest @pytest.fixture(scope="function") def arg(request): pass - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ import pytest @pytest.fixture(scope="session") def arg(arg): pass def test_mismatch(arg): pass - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*ScopeMismatch*", - "*1 error*", - ]) + result.stdout.fnmatch_lines(["*ScopeMismatch*", "*1 error*"]) def test_parametrize_separated_order(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(scope="module", params=[1, 2]) @@ -2150,18 +2452,22 @@ class TestFixtureMarker(object): values.append(arg) def test_2(arg): values.append(arg) - """) + """ + ) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=4) values = reprec.getcalls("pytest_runtest_call")[0].item.module.values assert values == [1, 1, 2, 2] def test_module_parametrized_ordering(self, testdir): - testdir.makeini(""" + testdir.makeini( + """ [pytest] console_output_style=classic - """) - testdir.makeconftest(""" + """ + ) + testdir.makeconftest( + """ import pytest @pytest.fixture(scope="session", params="s1 s2".split()) @@ -2170,13 +2476,16 @@ class TestFixtureMarker(object): @pytest.fixture(scope="module", params="m1 m2".split()) def marg(): pass - """) - testdir.makepyfile(test_mod1=""" + """ + ) + testdir.makepyfile( + test_mod1=""" def test_func(sarg): pass def test_func1(marg): pass - """, test_mod2=""" + """, + test_mod2=""" def test_func2(sarg): pass def test_func3(sarg, marg): @@ -2185,9 +2494,11 @@ class TestFixtureMarker(object): pass def test_func4(marg): pass - """) + """, + ) result = testdir.runpytest("-v") - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ test_mod1.py::test_func[s1] PASSED test_mod2.py::test_func2[s1] PASSED test_mod2.py::test_func3[s1-m1] PASSED @@ -2204,14 +2515,18 @@ class TestFixtureMarker(object): test_mod2.py::test_func4[m2] PASSED test_mod1.py::test_func1[m1] PASSED test_mod1.py::test_func1[m2] PASSED - """) + """ + ) def test_dynamic_parametrized_ordering(self, testdir): - testdir.makeini(""" + testdir.makeini( + """ [pytest] console_output_style=classic - """) - testdir.makeconftest(""" + """ + ) + testdir.makeconftest( + """ import pytest def pytest_configure(config): @@ -2228,15 +2543,19 @@ class TestFixtureMarker(object): @pytest.fixture(scope='session', autouse='True') def reprovision(request, flavor, encap): pass - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ def test(reprovision): pass def test2(reprovision): pass - """) + """ + ) result = testdir.runpytest("-v") - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ test_dynamic_parametrized_ordering.py::test[flavor1-vxlan] PASSED test_dynamic_parametrized_ordering.py::test2[flavor1-vxlan] PASSED test_dynamic_parametrized_ordering.py::test[flavor2-vxlan] PASSED @@ -2245,14 +2564,18 @@ class TestFixtureMarker(object): test_dynamic_parametrized_ordering.py::test2[flavor2-vlan] PASSED test_dynamic_parametrized_ordering.py::test[flavor1-vlan] PASSED test_dynamic_parametrized_ordering.py::test2[flavor1-vlan] PASSED - """) + """ + ) def test_class_ordering(self, testdir): - testdir.makeini(""" + testdir.makeini( + """ [pytest] console_output_style=classic - """) - testdir.makeconftest(""" + """ + ) + testdir.makeconftest( + """ import pytest values = [] @@ -2270,8 +2593,10 @@ class TestFixtureMarker(object): def fin(): values.append("fin_%s%s" % (carg, farg)) request.addfinalizer(fin) - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ import pytest class TestClass2(object): @@ -2282,9 +2607,11 @@ class TestFixtureMarker(object): class TestClass(object): def test_3(self): pass - """) + """ + ) result = testdir.runpytest("-vs") - result.stdout.re_match_lines(r""" + result.stdout.re_match_lines( + r""" test_class_ordering.py::TestClass2::test_1\[a-1\] PASSED test_class_ordering.py::TestClass2::test_1\[a-2\] PASSED test_class_ordering.py::TestClass2::test_2\[a-1\] PASSED @@ -2297,10 +2624,12 @@ class TestFixtureMarker(object): test_class_ordering.py::TestClass::test_3\[a-2\] PASSED test_class_ordering.py::TestClass::test_3\[b-1\] PASSED test_class_ordering.py::TestClass::test_3\[b-2\] PASSED - """) + """ + ) def test_parametrize_separated_order_higher_scope_first(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(scope="function", params=[1, 2]) @@ -2326,25 +2655,57 @@ class TestFixtureMarker(object): values.append("test3") def test_4(modarg, arg): values.append("test4") - """) + """ + ) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=12) values = reprec.getcalls("pytest_runtest_call")[0].item.module.values expected = [ - 'create:1', 'test1', 'fin:1', 'create:2', 'test1', - 'fin:2', 'create:mod1', 'test2', 'create:1', 'test3', - 'fin:1', 'create:2', 'test3', 'fin:2', 'create:1', - 'test4', 'fin:1', 'create:2', 'test4', 'fin:2', - 'fin:mod1', 'create:mod2', 'test2', 'create:1', 'test3', - 'fin:1', 'create:2', 'test3', 'fin:2', 'create:1', - 'test4', 'fin:1', 'create:2', 'test4', 'fin:2', - 'fin:mod2'] + "create:1", + "test1", + "fin:1", + "create:2", + "test1", + "fin:2", + "create:mod1", + "test2", + "create:1", + "test3", + "fin:1", + "create:2", + "test3", + "fin:2", + "create:1", + "test4", + "fin:1", + "create:2", + "test4", + "fin:2", + "fin:mod1", + "create:mod2", + "test2", + "create:1", + "test3", + "fin:1", + "create:2", + "test3", + "fin:2", + "create:1", + "test4", + "fin:1", + "create:2", + "test4", + "fin:2", + "fin:mod2", + ] import pprint + pprint.pprint(list(zip(values, expected))) assert values == expected def test_parametrized_fixture_teardown_order(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(params=[1,2], scope="class") def param1(request): @@ -2373,15 +2734,19 @@ class TestFixtureMarker(object): def test_finish(): assert not values - """) + """ + ) result = testdir.runpytest("-v") - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *3 passed* - """) + """ + ) assert "error" not in result.stdout.str() def test_fixture_finalizer(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest import sys @@ -2392,9 +2757,12 @@ class TestFixtureMarker(object): sys.stdout.write('Finalized') request.addfinalizer(finalize) return {} - """) + """ + ) b = testdir.mkdir("subdir") - b.join("test_overridden_fixture_finalizer.py").write(dedent(""" + b.join("test_overridden_fixture_finalizer.py").write( + dedent( + """ import pytest @pytest.fixture def browser(browser): @@ -2403,13 +2771,16 @@ class TestFixtureMarker(object): def test_browser(browser): assert browser['visited'] is True - """)) + """ + ) + ) reprec = testdir.runpytest("-s") - for test in ['test_browser']: - reprec.stdout.fnmatch_lines('*Finalized*') + for test in ["test_browser"]: + reprec.stdout.fnmatch_lines("*Finalized*") def test_class_scope_with_normal_tests(self, testdir): - testpath = testdir.makepyfile(""" + testpath = testdir.makepyfile( + """ import pytest class Box(object): @@ -2429,13 +2800,15 @@ class TestFixtureMarker(object): class Test2(object): def test_c(self, a): - assert a == 3""") + assert a == 3""" + ) reprec = testdir.inline_run(testpath) - for test in ['test_a', 'test_b', 'test_c']: + for test in ["test_a", "test_b", "test_c"]: assert reprec.matchreport(test).passed def test_request_is_clean(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest values = [] @pytest.fixture(params=[1, 2]) @@ -2443,13 +2816,15 @@ class TestFixtureMarker(object): request.addfinalizer(lambda: values.append(request.param)) def test_fix(fix): pass - """) + """ + ) reprec = testdir.inline_run("-s") values = reprec.getcalls("pytest_runtest_call")[0].item.module.values assert values == [1, 2] def test_parametrize_separated_lifecycle(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest values = [] @@ -2462,11 +2837,13 @@ class TestFixtureMarker(object): values.append(arg) def test_2(arg): values.append(arg) - """) + """ + ) reprec = testdir.inline_run("-vs") reprec.assertoutcome(passed=4) values = reprec.getcalls("pytest_runtest_call")[0].item.module.values import pprint + pprint.pprint(values) # assert len(values) == 6 assert values[0] == values[1] == 1 @@ -2475,7 +2852,8 @@ class TestFixtureMarker(object): assert values[5] == "fin2" def test_parametrize_function_scoped_finalizers_called(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(scope="function", params=[1, 2]) @@ -2492,14 +2870,16 @@ class TestFixtureMarker(object): def test_3(): assert len(values) == 8 assert values == [1, "fin1", 2, "fin2", 1, "fin1", 2, "fin2"] - """) + """ + ) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=5) @pytest.mark.issue246 @pytest.mark.parametrize("scope", ["session", "function", "module"]) def test_finalizer_order_on_parametrization(self, scope, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest values = [] @@ -2526,13 +2906,16 @@ class TestFixtureMarker(object): pass def test_other(): pass - """ % {"scope": scope}) + """ + % {"scope": scope} + ) reprec = testdir.inline_run("-lvs") reprec.assertoutcome(passed=3) @pytest.mark.issue396 def test_class_scope_parametrization_ordering(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest values = [] @pytest.fixture(params=["John", "Doe"], scope="class") @@ -2550,16 +2933,30 @@ class TestFixtureMarker(object): def test_population(self, human): values.append("test_population") - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=6) values = reprec.getcalls("pytest_runtest_call")[0].item.module.values - assert values == ["test_hello", "fin John", "test_hello", "fin Doe", - "test_name", "test_population", "fin John", - "test_name", "test_population", "fin Doe"] + assert ( + values + == [ + "test_hello", + "fin John", + "test_hello", + "fin Doe", + "test_name", + "test_population", + "fin John", + "test_name", + "test_population", + "fin Doe", + ] + ) def test_parametrize_setup_function(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(scope="module", params=[1, 2]) @@ -2585,12 +2982,14 @@ class TestFixtureMarker(object): assert values == ["setup1", 1, 1, "fin1", "setup2", 2, 2, ] - """) + """ + ) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=6) def test_fixture_marked_function_not_collected_as_test(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture def test_app(): @@ -2598,12 +2997,14 @@ class TestFixtureMarker(object): def test_something(test_app): assert test_app == 1 - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_params_and_ids(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(params=[object(), object()], @@ -2613,14 +3014,14 @@ class TestFixtureMarker(object): def test_foo(fix): assert 1 - """) - res = testdir.runpytest('-v') - res.stdout.fnmatch_lines([ - '*test_foo*alpha*', - '*test_foo*beta*']) + """ + ) + res = testdir.runpytest("-v") + res.stdout.fnmatch_lines(["*test_foo*alpha*", "*test_foo*beta*"]) def test_params_and_ids_yieldfixture(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.yield_fixture(params=[object(), object()], @@ -2630,15 +3031,15 @@ class TestFixtureMarker(object): def test_foo(fix): assert 1 - """) - res = testdir.runpytest('-v') - res.stdout.fnmatch_lines([ - '*test_foo*alpha*', - '*test_foo*beta*']) + """ + ) + res = testdir.runpytest("-v") + res.stdout.fnmatch_lines(["*test_foo*alpha*", "*test_foo*beta*"]) @pytest.mark.issue920 def test_deterministic_fixture_collection(self, testdir, monkeypatch): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(scope="module", @@ -2659,27 +3060,40 @@ class TestFixtureMarker(object): # sometimes we get unexpected passes. hashing B seems # to help? assert hash(B) or True - """) + """ + ) monkeypatch.setenv("PYTHONHASHSEED", "1") out1 = testdir.runpytest_subprocess("-v") monkeypatch.setenv("PYTHONHASHSEED", "2") out2 = testdir.runpytest_subprocess("-v") - out1 = [line for line in out1.outlines if line.startswith("test_deterministic_fixture_collection.py::test_foo")] - out2 = [line for line in out2.outlines if line.startswith("test_deterministic_fixture_collection.py::test_foo")] + out1 = [ + line + for line in out1.outlines + if line.startswith("test_deterministic_fixture_collection.py::test_foo") + ] + out2 = [ + line + for line in out2.outlines + if line.startswith("test_deterministic_fixture_collection.py::test_foo") + ] assert len(out1) == 12 assert out1 == out2 class TestRequestScopeAccess(object): - pytestmark = pytest.mark.parametrize(("scope", "ok", "error"), [ - ["session", "", "fspath class function module"], - ["module", "module fspath", "cls function"], - ["class", "module fspath cls", "function"], - ["function", "module fspath cls function", ""] - ]) + pytestmark = pytest.mark.parametrize( + ("scope", "ok", "error"), + [ + ["session", "", "fspath class function module"], + ["module", "module fspath", "cls function"], + ["class", "module fspath cls", "function"], + ["function", "module fspath cls function", ""], + ], + ) def test_setup(self, testdir, scope, ok, error): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(scope=%r, autouse=True) def myscoped(request): @@ -2692,12 +3106,15 @@ class TestRequestScopeAccess(object): assert request.config def test_func(): pass - """ % (scope, ok.split(), error.split())) + """ + % (scope, ok.split(), error.split()) + ) reprec = testdir.inline_run("-l") reprec.assertoutcome(passed=1) def test_funcarg(self, testdir, scope, ok, error): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(scope=%r) def arg(request): @@ -2710,31 +3127,35 @@ class TestRequestScopeAccess(object): assert request.config def test_func(arg): pass - """ % (scope, ok.split(), error.split())) + """ + % (scope, ok.split(), error.split()) + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) class TestErrors(object): + def test_subfactory_missing_funcarg(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture() def gen(qwe123): return 1 def test_something(gen): pass - """) + """ + ) result = testdir.runpytest() assert result.ret != 0 - result.stdout.fnmatch_lines([ - "*def gen(qwe123):*", - "*fixture*qwe123*not found*", - "*1 error*", - ]) + result.stdout.fnmatch_lines( + ["*def gen(qwe123):*", "*fixture*qwe123*not found*", "*1 error*"] + ) def test_issue498_fixture_finalizer_failing(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture def fix1(request): @@ -2750,55 +3171,54 @@ class TestErrors(object): values.append(fix1) def test_3(): assert values[0] != values[1] - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *ERROR*teardown*test_1* *KeyError* *ERROR*teardown*test_2* *KeyError* *3 pass*2 error* - """) + """ + ) def test_setupfunc_missing_funcarg(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(autouse=True) def gen(qwe123): return 1 def test_something(): pass - """) + """ + ) result = testdir.runpytest() assert result.ret != 0 - result.stdout.fnmatch_lines([ - "*def gen(qwe123):*", - "*fixture*qwe123*not found*", - "*1 error*", - ]) + result.stdout.fnmatch_lines( + ["*def gen(qwe123):*", "*fixture*qwe123*not found*", "*1 error*"] + ) class TestShowFixtures(object): + def test_funcarg_compat(self, testdir): config = testdir.parseconfigure("--funcargs") assert config.option.showfixtures def test_show_fixtures(self, testdir): result = testdir.runpytest("--fixtures") - result.stdout.fnmatch_lines([ - "*tmpdir*", - "*temporary directory*", - ]) + result.stdout.fnmatch_lines(["*tmpdir*", "*temporary directory*"]) def test_show_fixtures_verbose(self, testdir): result = testdir.runpytest("--fixtures", "-v") - result.stdout.fnmatch_lines([ - "*tmpdir*--*tmpdir.py*", - "*temporary directory*", - ]) + result.stdout.fnmatch_lines(["*tmpdir*--*tmpdir.py*", "*temporary directory*"]) def test_show_fixtures_testmodule(self, testdir): - p = testdir.makepyfile(''' + p = testdir.makepyfile( + ''' import pytest @pytest.fixture def _arg0(): @@ -2806,39 +3226,50 @@ class TestShowFixtures(object): @pytest.fixture def arg1(): """ hello world """ - ''') + ''' + ) result = testdir.runpytest("--fixtures", p) - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *tmpdir *fixtures defined from* *arg1* *hello world* - """) + """ + ) assert "arg0" not in result.stdout.str() @pytest.mark.parametrize("testmod", [True, False]) def test_show_fixtures_conftest(self, testdir, testmod): - testdir.makeconftest(''' + testdir.makeconftest( + ''' import pytest @pytest.fixture def arg1(): """ hello world """ - ''') + ''' + ) if testmod: - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_hello(): pass - """) + """ + ) result = testdir.runpytest("--fixtures") - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *tmpdir* *fixtures defined from*conftest* *arg1* *hello world* - """) + """ + ) def test_show_fixtures_trimmed_doc(self, testdir): - p = testdir.makepyfile(dedent(''' + p = testdir.makepyfile( + dedent( + ''' import pytest @pytest.fixture def arg1(): @@ -2854,9 +3285,13 @@ class TestShowFixtures(object): line2 """ - ''')) + ''' + ) + ) result = testdir.runpytest("--fixtures", p) - result.stdout.fnmatch_lines(dedent(""" + result.stdout.fnmatch_lines( + dedent( + """ * fixtures defined from test_show_fixtures_trimmed_doc * arg2 line1 @@ -2865,10 +3300,14 @@ class TestShowFixtures(object): line1 line2 - """)) + """ + ) + ) def test_show_fixtures_indented_doc(self, testdir): - p = testdir.makepyfile(dedent(''' + p = testdir.makepyfile( + dedent( + ''' import pytest @pytest.fixture def fixture1(): @@ -2876,17 +3315,25 @@ class TestShowFixtures(object): line1 indented line """ - ''')) + ''' + ) + ) result = testdir.runpytest("--fixtures", p) - result.stdout.fnmatch_lines(dedent(""" + result.stdout.fnmatch_lines( + dedent( + """ * fixtures defined from test_show_fixtures_indented_doc * fixture1 line1 indented line - """)) + """ + ) + ) def test_show_fixtures_indented_doc_first_line_unindented(self, testdir): - p = testdir.makepyfile(dedent(''' + p = testdir.makepyfile( + dedent( + ''' import pytest @pytest.fixture def fixture1(): @@ -2894,18 +3341,26 @@ class TestShowFixtures(object): line2 indented line """ - ''')) + ''' + ) + ) result = testdir.runpytest("--fixtures", p) - result.stdout.fnmatch_lines(dedent(""" + result.stdout.fnmatch_lines( + dedent( + """ * fixtures defined from test_show_fixtures_indented_doc_first_line_unindented * fixture1 line1 line2 indented line - """)) + """ + ) + ) def test_show_fixtures_indented_in_class(self, testdir): - p = testdir.makepyfile(dedent(''' + p = testdir.makepyfile( + dedent( + ''' import pytest class TestClass(object): @pytest.fixture @@ -2914,21 +3369,28 @@ class TestShowFixtures(object): line2 indented line """ - ''')) + ''' + ) + ) result = testdir.runpytest("--fixtures", p) - result.stdout.fnmatch_lines(dedent(""" + result.stdout.fnmatch_lines( + dedent( + """ * fixtures defined from test_show_fixtures_indented_in_class * fixture1 line1 line2 indented line - """)) + """ + ) + ) def test_show_fixtures_different_files(self, testdir): """ #833: --fixtures only shows fixtures from first file """ - testdir.makepyfile(test_a=''' + testdir.makepyfile( + test_a=''' import pytest @pytest.fixture @@ -2938,8 +3400,10 @@ class TestShowFixtures(object): def test_a(fix_a): pass - ''') - testdir.makepyfile(test_b=''' + ''' + ) + testdir.makepyfile( + test_b=''' import pytest @pytest.fixture @@ -2949,9 +3413,11 @@ class TestShowFixtures(object): def test_b(fix_b): pass - ''') + ''' + ) result = testdir.runpytest("--fixtures") - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ * fixtures defined from test_a * fix_a Fixture A @@ -2959,21 +3425,27 @@ class TestShowFixtures(object): * fixtures defined from test_b * fix_b Fixture B - """) + """ + ) def test_show_fixtures_with_same_name(self, testdir): - testdir.makeconftest(''' + testdir.makeconftest( + ''' import pytest @pytest.fixture def arg1(): """Hello World in conftest.py""" return "Hello World" - ''') - testdir.makepyfile(''' + ''' + ) + testdir.makepyfile( + """ def test_foo(arg1): assert arg1 == "Hello World" - ''') - testdir.makepyfile(''' + """ + ) + testdir.makepyfile( + ''' import pytest @pytest.fixture def arg1(): @@ -2981,9 +3453,11 @@ class TestShowFixtures(object): return "Hi" def test_bar(arg1): assert arg1 == "Hi" - ''') + ''' + ) result = testdir.runpytest("--fixtures") - result.stdout.fnmatch_lines(''' + result.stdout.fnmatch_lines( + """ * fixtures defined from conftest * arg1 Hello World in conftest.py @@ -2991,22 +3465,25 @@ class TestShowFixtures(object): * fixtures defined from test_show_fixtures_with_same_name * arg1 Hi from test module - ''') + """ + ) def test_fixture_disallow_twice(self): """Test that applying @pytest.fixture twice generates an error (#2334).""" with pytest.raises(ValueError): + @pytest.fixture @pytest.fixture def foo(): pass -@pytest.mark.parametrize('flavor', ['fixture', 'yield_fixture']) +@pytest.mark.parametrize("flavor", ["fixture", "yield_fixture"]) class TestContextManagerFixtureFuncs(object): def test_simple(self, testdir, flavor): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.{flavor} def arg1(): @@ -3018,19 +3495,25 @@ class TestContextManagerFixtureFuncs(object): def test_2(arg1): print ("test2 %s" % arg1) assert 0 - """.format(flavor=flavor)) + """.format( + flavor=flavor + ) + ) result = testdir.runpytest("-s") - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *setup* *test1 1* *teardown* *setup* *test2 1* *teardown* - """) + """ + ) def test_scoped(self, testdir, flavor): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.{flavor}(scope="module") def arg1(): @@ -3041,17 +3524,23 @@ class TestContextManagerFixtureFuncs(object): print ("test1 %s" % arg1) def test_2(arg1): print ("test2 %s" % arg1) - """.format(flavor=flavor)) + """.format( + flavor=flavor + ) + ) result = testdir.runpytest("-s") - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *setup* *test1 1* *test2 1* *teardown* - """) + """ + ) def test_setup_exception(self, testdir, flavor): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.{flavor}(scope="module") def arg1(): @@ -3059,15 +3548,21 @@ class TestContextManagerFixtureFuncs(object): yield 1 def test_1(arg1): pass - """.format(flavor=flavor)) + """.format( + flavor=flavor + ) + ) result = testdir.runpytest("-s") - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *pytest.fail*setup* *1 error* - """) + """ + ) def test_teardown_exception(self, testdir, flavor): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.{flavor}(scope="module") def arg1(): @@ -3075,15 +3570,21 @@ class TestContextManagerFixtureFuncs(object): pytest.fail("teardown") def test_1(arg1): pass - """.format(flavor=flavor)) + """.format( + flavor=flavor + ) + ) result = testdir.runpytest("-s") - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *pytest.fail*teardown* *1 passed*1 error* - """) + """ + ) def test_yields_more_than_one(self, testdir, flavor): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.{flavor}(scope="module") def arg1(): @@ -3091,29 +3592,40 @@ class TestContextManagerFixtureFuncs(object): yield 2 def test_1(arg1): pass - """.format(flavor=flavor)) + """.format( + flavor=flavor + ) + ) result = testdir.runpytest("-s") - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *fixture function* *test_yields*:2* - """) + """ + ) def test_custom_name(self, testdir, flavor): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.{flavor}(name='meow') def arg1(): return 'mew' def test_1(meow): print(meow) - """.format(flavor=flavor)) + """.format( + flavor=flavor + ) + ) result = testdir.runpytest("-s") result.stdout.fnmatch_lines("*mew*") class TestParameterizedSubRequest(object): + def test_call_from_fixture(self, testdir): - testfile = testdir.makepyfile(""" + testfile = testdir.makepyfile( + """ import pytest @pytest.fixture(params=[0, 1, 2]) @@ -3126,20 +3638,26 @@ class TestParameterizedSubRequest(object): def test_foo(request, get_named_fixture): pass - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ E*Failed: The requested fixture has no parameter defined for the current test. E* E*Requested fixture 'fix_with_param' defined in: - E*{0}:4 + E*{}:4 E*Requested here: - E*{1}:9 + E*{}:9 *1 error* - """.format(testfile.basename, testfile.basename)) + """.format( + testfile.basename, testfile.basename + ) + ) def test_call_from_test(self, testdir): - testfile = testdir.makepyfile(""" + testfile = testdir.makepyfile( + """ import pytest @pytest.fixture(params=[0, 1, 2]) @@ -3148,93 +3666,121 @@ class TestParameterizedSubRequest(object): def test_foo(request): request.getfixturevalue('fix_with_param') - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ E*Failed: The requested fixture has no parameter defined for the current test. E* E*Requested fixture 'fix_with_param' defined in: - E*{0}:4 + E*{}:4 E*Requested here: - E*{1}:8 + E*{}:8 *1 failed* - """.format(testfile.basename, testfile.basename)) + """.format( + testfile.basename, testfile.basename + ) + ) def test_external_fixture(self, testdir): - conffile = testdir.makeconftest(""" + conffile = testdir.makeconftest( + """ import pytest @pytest.fixture(params=[0, 1, 2]) def fix_with_param(request): return request.param - """) + """ + ) - testfile = testdir.makepyfile(""" + testfile = testdir.makepyfile( + """ def test_foo(request): request.getfixturevalue('fix_with_param') - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ E*Failed: The requested fixture has no parameter defined for the current test. E* E*Requested fixture 'fix_with_param' defined in: - E*{0}:4 + E*{}:4 E*Requested here: - E*{1}:2 + E*{}:2 *1 failed* - """.format(conffile.basename, testfile.basename)) + """.format( + conffile.basename, testfile.basename + ) + ) def test_non_relative_path(self, testdir): - tests_dir = testdir.mkdir('tests') - fixdir = testdir.mkdir('fixtures') + tests_dir = testdir.mkdir("tests") + fixdir = testdir.mkdir("fixtures") fixfile = fixdir.join("fix.py") - fixfile.write(_pytest._code.Source(""" + fixfile.write( + _pytest._code.Source( + """ import pytest @pytest.fixture(params=[0, 1, 2]) def fix_with_param(request): return request.param - """)) + """ + ) + ) testfile = tests_dir.join("test_foos.py") - testfile.write(_pytest._code.Source(""" + testfile.write( + _pytest._code.Source( + """ from fix import fix_with_param def test_foo(request): request.getfixturevalue('fix_with_param') - """)) + """ + ) + ) tests_dir.chdir() testdir.syspathinsert(fixdir) result = testdir.runpytest() - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ E*Failed: The requested fixture has no parameter defined for the current test. E* E*Requested fixture 'fix_with_param' defined in: - E*{0}:5 + E*{}:5 E*Requested here: - E*{1}:5 + E*{}:5 *1 failed* - """.format(fixfile.strpath, testfile.basename)) + """.format( + fixfile.strpath, testfile.basename + ) + ) def test_pytest_fixture_setup_and_post_finalizer_hook(testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ from __future__ import print_function def pytest_fixture_setup(fixturedef, request): print('ROOT setup hook called for {0} from {1}'.format(fixturedef.argname, request.node.name)) def pytest_fixture_post_finalizer(fixturedef, request): print('ROOT finalizer hook called for {0} from {1}'.format(fixturedef.argname, request.node.name)) - """) - testdir.makepyfile(**{ - 'tests/conftest.py': """ + """ + ) + testdir.makepyfile( + **{ + "tests/conftest.py": """ from __future__ import print_function def pytest_fixture_setup(fixturedef, request): print('TESTS setup hook called for {0} from {1}'.format(fixturedef.argname, request.node.name)) def pytest_fixture_post_finalizer(fixturedef, request): print('TESTS finalizer hook called for {0} from {1}'.format(fixturedef.argname, request.node.name)) """, - 'tests/test_hooks.py': """ + "tests/test_hooks.py": """ from __future__ import print_function import pytest @@ -3245,26 +3791,30 @@ def test_pytest_fixture_setup_and_post_finalizer_hook(testdir): def test_func(my_fixture): print('TEST test_func') assert my_fixture == 'some' - """ - }) + """, + } + ) result = testdir.runpytest("-s") assert result.ret == 0 - result.stdout.fnmatch_lines([ - "*TESTS setup hook called for my_fixture from test_func*", - "*ROOT setup hook called for my_fixture from test_func*", - "*TEST test_func*", - "*TESTS finalizer hook called for my_fixture from test_func*", - "*ROOT finalizer hook called for my_fixture from test_func*", - ]) + result.stdout.fnmatch_lines( + [ + "*TESTS setup hook called for my_fixture from test_func*", + "*ROOT setup hook called for my_fixture from test_func*", + "*TEST test_func*", + "*TESTS finalizer hook called for my_fixture from test_func*", + "*ROOT finalizer hook called for my_fixture from test_func*", + ] + ) class TestScopeOrdering(object): """Class of tests that ensure fixtures are ordered based on their scopes (#2405)""" - @pytest.mark.parametrize('use_mark', [True, False]) + @pytest.mark.parametrize("use_mark", [True, False]) def test_func_closure_module_auto(self, testdir, use_mark): """Semantically identical to the example posted in #2405 when ``use_mark=True``""" - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(scope='module', autouse={autouse}) @@ -3278,17 +3828,21 @@ class TestScopeOrdering(object): def test_func(m1): pass - """.format(autouse=not use_mark, use_mark=use_mark)) + """.format( + autouse=not use_mark, use_mark=use_mark + ) + ) items, _ = testdir.inline_genitems() request = FixtureRequest(items[0]) - assert request.fixturenames == 'm1 f1'.split() + assert request.fixturenames == "m1 f1".split() def test_func_closure_with_native_fixtures(self, testdir, monkeypatch): """Sanity check that verifies the order returned by the closures and the actual fixture execution order: The execution order may differ because of fixture inter-dependencies. """ - monkeypatch.setattr(pytest, 'FIXTURE_ORDER', [], raising=False) - testdir.makepyfile(""" + monkeypatch.setattr(pytest, "FIXTURE_ORDER", [], raising=False) + testdir.makepyfile( + """ import pytest FIXTURE_ORDER = pytest.FIXTURE_ORDER @@ -3318,17 +3872,19 @@ class TestScopeOrdering(object): FIXTURE_ORDER.append('f2') def test_foo(f1, m1, f2, s1): pass - """) + """ + ) items, _ = testdir.inline_genitems() request = FixtureRequest(items[0]) # order of fixtures based on their scope and position in the parameter list - assert request.fixturenames == 's1 my_tmpdir_factory m1 f1 f2 my_tmpdir'.split() + assert request.fixturenames == "s1 my_tmpdir_factory m1 f1 f2 my_tmpdir".split() testdir.runpytest() # actual fixture execution differs: dependent fixtures must be created first ("my_tmpdir") - assert pytest.FIXTURE_ORDER == 's1 my_tmpdir_factory m1 my_tmpdir f1 f2'.split() + assert pytest.FIXTURE_ORDER == "s1 my_tmpdir_factory m1 my_tmpdir f1 f2".split() def test_func_closure_module(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(scope='module') @@ -3339,16 +3895,18 @@ class TestScopeOrdering(object): def test_func(f1, m1): pass - """) + """ + ) items, _ = testdir.inline_genitems() request = FixtureRequest(items[0]) - assert request.fixturenames == 'm1 f1'.split() + assert request.fixturenames == "m1 f1".split() def test_func_closure_scopes_reordered(self, testdir): """Test ensures that fixtures are ordered by scope regardless of the order of the parameters, although fixtures of same scope keep the declared order """ - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(scope='session') @@ -3370,27 +3928,31 @@ class TestScopeOrdering(object): def test_func(self, f2, f1, c1, m1, s1): pass - """) + """ + ) items, _ = testdir.inline_genitems() request = FixtureRequest(items[0]) - assert request.fixturenames == 's1 m1 c1 f2 f1'.split() + assert request.fixturenames == "s1 m1 c1 f2 f1".split() def test_func_closure_same_scope_closer_root_first(self, testdir): """Auto-use fixtures of same scope are ordered by closer-to-root first""" - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest @pytest.fixture(scope='module', autouse=True) def m_conf(): pass - """) - testdir.makepyfile(**{ - 'sub/conftest.py': """ + """ + ) + testdir.makepyfile( + **{ + "sub/conftest.py": """ import pytest @pytest.fixture(scope='module', autouse=True) def m_sub(): pass """, - 'sub/test_func.py': """ + "sub/test_func.py": """ import pytest @pytest.fixture(scope='module', autouse=True) @@ -3401,20 +3963,25 @@ class TestScopeOrdering(object): def test_func(m_test, f1): pass - """}) + """, + } + ) items, _ = testdir.inline_genitems() request = FixtureRequest(items[0]) - assert request.fixturenames == 'm_conf m_sub m_test f1'.split() + assert request.fixturenames == "m_conf m_sub m_test f1".split() def test_func_closure_all_scopes_complex(self, testdir): """Complex test involving all scopes and mixing autouse with normal fixtures""" - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest @pytest.fixture(scope='session') def s1(): pass - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ import pytest @pytest.fixture(scope='module', autouse=True) @@ -3437,7 +4004,8 @@ class TestScopeOrdering(object): def test_func(self, f2, f1, m2): pass - """) + """ + ) items, _ = testdir.inline_genitems() request = FixtureRequest(items[0]) - assert request.fixturenames == 's1 m1 m2 c1 f2 f1'.split() + assert request.fixturenames == "s1 m1 m2 c1 f2 f1".split() diff --git a/testing/python/integration.py b/testing/python/integration.py index aade04fa9..2705bdc49 100644 --- a/testing/python/integration.py +++ b/testing/python/integration.py @@ -4,8 +4,10 @@ from _pytest import runner class TestOEJSKITSpecials(object): + def test_funcarg_non_pycollectobj(self, testdir): # rough jstests usage - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest def pytest_pycollect_makeitem(collector, name, obj): if name == "MyClass": @@ -13,25 +15,29 @@ class TestOEJSKITSpecials(object): class MyCollector(pytest.Collector): def reportinfo(self): return self.fspath, 3, "xyz" - """) - modcol = testdir.getmodulecol(""" + """ + ) + modcol = testdir.getmodulecol( + """ import pytest @pytest.fixture def arg1(request): return 42 class MyClass(object): pass - """) + """ + ) # this hook finds funcarg factories rep = runner.collect_one_node(collector=modcol) clscol = rep.result[0] clscol.obj = lambda arg1: None clscol.funcargs = {} pytest._fillfuncargs(clscol) - assert clscol.funcargs['arg1'] == 42 + assert clscol.funcargs["arg1"] == 42 def test_autouse_fixture(self, testdir): # rough jstests usage - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest def pytest_pycollect_makeitem(collector, name, obj): if name == "MyClass": @@ -39,8 +45,10 @@ class TestOEJSKITSpecials(object): class MyCollector(pytest.Collector): def reportinfo(self): return self.fspath, 3, "xyz" - """) - modcol = testdir.getmodulecol(""" + """ + ) + modcol = testdir.getmodulecol( + """ import pytest @pytest.fixture(autouse=True) def hello(): @@ -50,7 +58,8 @@ class TestOEJSKITSpecials(object): return 42 class MyClass(object): pass - """) + """ + ) # this hook finds funcarg factories rep = runner.collect_one_node(modcol) clscol = rep.result[0] @@ -61,6 +70,7 @@ class TestOEJSKITSpecials(object): def test_wrapped_getfslineno(): + def func(): pass @@ -72,12 +82,14 @@ def test_wrapped_getfslineno(): @wrap def wrapped_func(x, y, z): pass + fs, lineno = python.getfslineno(wrapped_func) fs2, lineno2 = python.getfslineno(wrap) assert lineno > lineno2, "getfslineno does not unwrap correctly" class TestMockDecoration(object): + def test_wrapped_getfuncargnames(self): from _pytest.compat import getfuncargnames @@ -100,8 +112,10 @@ class TestMockDecoration(object): from _pytest.compat import getfuncargnames def wrap(f): + def func(): pass + func.__wrapped__ = f func.patchings = ["qwe"] return func @@ -115,7 +129,8 @@ class TestMockDecoration(object): def test_unittest_mock(self, testdir): pytest.importorskip("unittest.mock") - testdir.makepyfile(""" + testdir.makepyfile( + """ import unittest.mock class T(unittest.TestCase): @unittest.mock.patch("os.path.abspath") @@ -123,13 +138,15 @@ class TestMockDecoration(object): import os os.path.abspath("hello") abspath.assert_any_call("hello") - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_unittest_mock_and_fixture(self, testdir): pytest.importorskip("unittest.mock") - testdir.makepyfile(""" + testdir.makepyfile( + """ import os.path import unittest.mock import pytest @@ -143,14 +160,16 @@ class TestMockDecoration(object): def test_hello(inject_me): import os os.path.abspath("hello") - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_unittest_mock_and_pypi_mock(self, testdir): pytest.importorskip("unittest.mock") pytest.importorskip("mock", "1.0.1") - testdir.makepyfile(""" + testdir.makepyfile( + """ import mock import unittest.mock class TestBoth(object): @@ -165,13 +184,15 @@ class TestMockDecoration(object): import os os.path.abspath("hello") abspath.assert_any_call("hello") - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_mock(self, testdir): pytest.importorskip("mock", "1.0.1") - testdir.makepyfile(""" + testdir.makepyfile( + """ import os import unittest import mock @@ -191,17 +212,20 @@ class TestMockDecoration(object): os.path.normpath(os.path.abspath("hello")) normpath.assert_any_call("this") assert os.path.basename("123") == "mock_basename" - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) calls = reprec.getcalls("pytest_runtest_logreport") - funcnames = [call.report.location[2] for call in calls - if call.report.when == "call"] + funcnames = [ + call.report.location[2] for call in calls if call.report.when == "call" + ] assert funcnames == ["T.test_hello", "test_someting"] def test_mock_sorting(self, testdir): pytest.importorskip("mock", "1.0.1") - testdir.makepyfile(""" + testdir.makepyfile( + """ import os import mock @@ -214,7 +238,8 @@ class TestMockDecoration(object): @mock.patch("os.path.abspath") def test_three(abspath): pass - """) + """ + ) reprec = testdir.inline_run() calls = reprec.getreports("pytest_runtest_logreport") calls = [x for x in calls if x.when == "call"] @@ -223,7 +248,8 @@ class TestMockDecoration(object): def test_mock_double_patch_issue473(self, testdir): pytest.importorskip("mock", "1.0.1") - testdir.makepyfile(""" + testdir.makepyfile( + """ from mock import patch from pytest import mark @@ -233,20 +259,25 @@ class TestMockDecoration(object): class TestSimple(object): def test_simple_thing(self, mock_path, mock_getcwd): pass - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) class TestReRunTests(object): + def test_rerun(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ from _pytest.runner import runtestprotocol def pytest_runtest_protocol(item, nextitem): runtestprotocol(item, log=False, nextitem=nextitem) runtestprotocol(item, log=True, nextitem=nextitem) - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ import pytest count = 0 req = None @@ -259,36 +290,46 @@ class TestReRunTests(object): count += 1 def test_fix(fix): pass - """) + """ + ) result = testdir.runpytest("-s") - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *fix count 0* *fix count 1* - """) - result.stdout.fnmatch_lines(""" + """ + ) + result.stdout.fnmatch_lines( + """ *2 passed* - """) + """ + ) def test_pytestconfig_is_session_scoped(): from _pytest.fixtures import pytestconfig + assert pytestconfig._pytestfixturefunction.scope == "session" class TestNoselikeTestAttribute(object): + def test_module_with_global_test(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ __test__ = False def test_hello(): pass - """) + """ + ) reprec = testdir.inline_run() assert not reprec.getfailedcollections() calls = reprec.getreports("pytest_runtest_logreport") assert not calls def test_class_and_method(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ __test__ = True def test_func(): pass @@ -298,14 +339,16 @@ class TestNoselikeTestAttribute(object): __test__ = False def test_method(self): pass - """) + """ + ) reprec = testdir.inline_run() assert not reprec.getfailedcollections() calls = reprec.getreports("pytest_runtest_logreport") assert not calls def test_unittest_class(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import unittest class TC(unittest.TestCase): def test_1(self): @@ -314,7 +357,8 @@ class TestNoselikeTestAttribute(object): __test__ = False def test_2(self): pass - """) + """ + ) reprec = testdir.inline_run() assert not reprec.getfailedcollections() call = reprec.getcalls("pytest_collection_modifyitems")[0] @@ -328,7 +372,8 @@ class TestNoselikeTestAttribute(object): RPC wrapper), we shouldn't assume this meant "__test__ = True". """ # https://github.com/pytest-dev/pytest/issues/1204 - testdir.makepyfile(""" + testdir.makepyfile( + """ class MetaModel(type): def __getattr__(cls, key): @@ -344,7 +389,8 @@ class TestNoselikeTestAttribute(object): def test_blah(self): pass - """) + """ + ) reprec = testdir.inline_run() assert not reprec.getfailedcollections() call = reprec.getcalls("pytest_collection_modifyitems")[0] @@ -355,7 +401,8 @@ class TestNoselikeTestAttribute(object): class TestParameterize(object): def test_idfn_marker(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def idfn(param): @@ -369,15 +416,14 @@ class TestParameterize(object): @pytest.mark.parametrize('a,b', [(0, 2), (1, 2)], ids=idfn) def test_params(a, b): pass - """) - res = testdir.runpytest('--collect-only') - res.stdout.fnmatch_lines([ - "*spam-2*", - "*ham-2*", - ]) + """ + ) + res = testdir.runpytest("--collect-only") + res.stdout.fnmatch_lines(["*spam-2*", "*ham-2*"]) def test_idfn_fixture(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def idfn(param): @@ -398,9 +444,7 @@ class TestParameterize(object): def test_params(a, b): pass - """) - res = testdir.runpytest('--collect-only') - res.stdout.fnmatch_lines([ - "*spam-2*", - "*ham-2*", - ]) + """ + ) + res = testdir.runpytest("--collect-only") + res.stdout.fnmatch_lines(["*spam-2*", "*ham-2*"]) diff --git a/testing/python/metafunc.py b/testing/python/metafunc.py index db6dae318..e181d3131 100644 --- a/testing/python/metafunc.py +++ b/testing/python/metafunc.py @@ -14,6 +14,7 @@ PY3 = sys.version_info >= (3, 0) class TestMetafunc(object): + def Metafunc(self, func, config=None): # the unit tests of this class check if things work correctly # on the funcarg level, so we don't need a full blown @@ -34,6 +35,7 @@ class TestMetafunc(object): return python.Metafunc(definition, fixtureinfo, config) def test_no_funcargs(self, testdir): + def function(): pass @@ -42,27 +44,33 @@ class TestMetafunc(object): repr(metafunc._calls) def test_function_basic(self): + def func(arg1, arg2="qwe"): pass + metafunc = self.Metafunc(func) assert len(metafunc.fixturenames) == 1 - assert 'arg1' in metafunc.fixturenames + assert "arg1" in metafunc.fixturenames assert metafunc.function is func assert metafunc.cls is None def test_addcall_no_args(self): + def func(arg1): pass + metafunc = self.Metafunc(func) metafunc.addcall() assert len(metafunc._calls) == 1 call = metafunc._calls[0] assert call.id == "0" - assert not hasattr(call, 'param') + assert not hasattr(call, "param") def test_addcall_id(self): + def func(arg1): pass + metafunc = self.Metafunc(func) pytest.raises(ValueError, "metafunc.addcall(id=None)") @@ -75,8 +83,10 @@ class TestMetafunc(object): assert metafunc._calls[1].id == "2" def test_addcall_param(self): + def func(arg1): pass + metafunc = self.Metafunc(func) class obj(object): @@ -91,6 +101,7 @@ class TestMetafunc(object): assert metafunc._calls[2].getparam("arg1") == 1 def test_addcall_funcargs(self): + def func(x): pass @@ -103,13 +114,15 @@ class TestMetafunc(object): metafunc.addcall(funcargs={"x": 3}) pytest.raises(pytest.fail.Exception, "metafunc.addcall({'xyz': 0})") assert len(metafunc._calls) == 2 - assert metafunc._calls[0].funcargs == {'x': 2} - assert metafunc._calls[1].funcargs == {'x': 3} - assert not hasattr(metafunc._calls[1], 'param') + assert metafunc._calls[0].funcargs == {"x": 2} + assert metafunc._calls[1].funcargs == {"x": 3} + assert not hasattr(metafunc._calls[1], "param") def test_parametrize_error(self): + def func(x, y): pass + metafunc = self.Metafunc(func) metafunc.parametrize("x", [1, 2]) pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5, 6])) @@ -119,53 +132,67 @@ class TestMetafunc(object): pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5, 6])) def test_parametrize_bad_scope(self, testdir): + def func(x): pass + metafunc = self.Metafunc(func) try: - metafunc.parametrize("x", [1], scope='doggy') + metafunc.parametrize("x", [1], scope="doggy") except ValueError as ve: assert "has an unsupported scope value 'doggy'" in str(ve) def test_parametrize_and_id(self): + def func(x, y): pass + metafunc = self.Metafunc(func) - metafunc.parametrize("x", [1, 2], ids=['basic', 'advanced']) + metafunc.parametrize("x", [1, 2], ids=["basic", "advanced"]) metafunc.parametrize("y", ["abc", "def"]) ids = [x.id for x in metafunc._calls] assert ids == ["basic-abc", "basic-def", "advanced-abc", "advanced-def"] def test_parametrize_and_id_unicode(self): """Allow unicode strings for "ids" parameter in Python 2 (##1905)""" + def func(x): pass + metafunc = self.Metafunc(func) - metafunc.parametrize("x", [1, 2], ids=[u'basic', u'advanced']) + metafunc.parametrize("x", [1, 2], ids=[u"basic", u"advanced"]) ids = [x.id for x in metafunc._calls] assert ids == [u"basic", u"advanced"] def test_parametrize_with_wrong_number_of_ids(self, testdir): + def func(x, y): pass + metafunc = self.Metafunc(func) - pytest.raises(ValueError, lambda: - metafunc.parametrize("x", [1, 2], ids=['basic'])) + pytest.raises( + ValueError, lambda: metafunc.parametrize("x", [1, 2], ids=["basic"]) + ) - pytest.raises(ValueError, lambda: - metafunc.parametrize(("x", "y"), [("abc", "def"), - ("ghi", "jkl")], ids=["one"])) + pytest.raises( + ValueError, + lambda: metafunc.parametrize( + ("x", "y"), [("abc", "def"), ("ghi", "jkl")], ids=["one"] + ), + ) @pytest.mark.issue510 def test_parametrize_empty_list(self): + def func(y): pass class MockConfig(object): + def getini(self, name): - return '' + return "" @property def hook(self): @@ -176,11 +203,13 @@ class TestMetafunc(object): metafunc = self.Metafunc(func, MockConfig()) metafunc.parametrize("y", []) - assert 'skip' == metafunc._calls[0].marks[0].name + assert "skip" == metafunc._calls[0].marks[0].name def test_parametrize_with_userobjects(self): + def func(x, y): pass + metafunc = self.Metafunc(func) class A(object): @@ -194,15 +223,18 @@ class TestMetafunc(object): assert metafunc._calls[3].id == "x1-b" @hypothesis.given(strategies.text() | strategies.binary()) - @hypothesis.settings(deadline=400.0) # very close to std deadline and CI boxes are not reliable in CPU power + @hypothesis.settings( + deadline=400.0 + ) # very close to std deadline and CI boxes are not reliable in CPU power def test_idval_hypothesis(self, value): from _pytest.python import _idval - escaped = _idval(value, 'a', 6, None) + + escaped = _idval(value, "a", 6, None) assert isinstance(escaped, str) if PY3: - escaped.encode('ascii') + escaped.encode("ascii") else: - escaped.decode('ascii') + escaped.decode("ascii") def test_unicode_idval(self): """This tests that Unicode strings outside the ASCII character set get @@ -211,31 +243,20 @@ class TestMetafunc(object): """ from _pytest.python import _idval + values = [ + (u"", ""), + (u"ascii", "ascii"), + (u"ação", "a\\xe7\\xe3o"), + (u"josé@blah.com", "jos\\xe9@blah.com"), ( - u'', - '' - ), - ( - u'ascii', - 'ascii' - ), - ( - u'ação', - 'a\\xe7\\xe3o' - ), - ( - u'josé@blah.com', - 'jos\\xe9@blah.com' - ), - ( - u'δοκ.ιμή@παράδειγμα.δοκιμή', - '\\u03b4\\u03bf\\u03ba.\\u03b9\\u03bc\\u03ae@\\u03c0\\u03b1\\u03c1\\u03ac\\u03b4\\u03b5\\u03b9\\u03b3' - '\\u03bc\\u03b1.\\u03b4\\u03bf\\u03ba\\u03b9\\u03bc\\u03ae' + u"δοκ.ιμή@παράδειγμα.δοκιμή", + "\\u03b4\\u03bf\\u03ba.\\u03b9\\u03bc\\u03ae@\\u03c0\\u03b1\\u03c1\\u03ac\\u03b4\\u03b5\\u03b9\\u03b3" + "\\u03bc\\u03b1.\\u03b4\\u03bf\\u03ba\\u03b9\\u03bc\\u03ae", ), ] for val, expected in values: - assert _idval(val, 'a', 6, None) == expected + assert _idval(val, "a", 6, None) == expected def test_bytes_idval(self): """unittest for the expected behavior to obtain ids for parametrized @@ -245,14 +266,15 @@ class TestMetafunc(object): - python3: bytes objects are always escaped using "binary escape". """ from _pytest.python import _idval + values = [ - (b'', ''), - (b'\xc3\xb4\xff\xe4', '\\xc3\\xb4\\xff\\xe4'), - (b'ascii', 'ascii'), - (u'αρά'.encode('utf-8'), '\\xce\\xb1\\xcf\\x81\\xce\\xac'), + (b"", ""), + (b"\xc3\xb4\xff\xe4", "\\xc3\\xb4\\xff\\xe4"), + (b"ascii", "ascii"), + (u"αρά".encode("utf-8"), "\\xce\\xb1\\xcf\\x81\\xce\\xac"), ] for val, expected in values: - assert _idval(val, 'a', 6, None) == expected + assert _idval(val, "a", 6, None) == expected def test_class_or_function_idval(self): """unittest for the expected behavior to obtain ids for parametrized @@ -266,67 +288,77 @@ class TestMetafunc(object): def test_function(): pass - values = [ - (TestClass, "TestClass"), - (test_function, "test_function"), - ] + values = [(TestClass, "TestClass"), (test_function, "test_function")] for val, expected in values: - assert _idval(val, 'a', 6, None) == expected + assert _idval(val, "a", 6, None) == expected @pytest.mark.issue250 def test_idmaker_autoname(self): from _pytest.python import idmaker - result = idmaker(("a", "b"), [pytest.param("string", 1.0), - pytest.param("st-ring", 2.0)]) + + result = idmaker( + ("a", "b"), [pytest.param("string", 1.0), pytest.param("st-ring", 2.0)] + ) assert result == ["string-1.0", "st-ring-2.0"] - result = idmaker(("a", "b"), [pytest.param(object(), 1.0), - pytest.param(object(), object())]) + result = idmaker( + ("a", "b"), [pytest.param(object(), 1.0), pytest.param(object(), object())] + ) assert result == ["a0-1.0", "a1-b1"] # unicode mixing, issue250 result = idmaker( - (py.builtin._totext("a"), "b"), - [pytest.param({}, b'\xc3\xb4')]) - assert result == ['a0-\\xc3\\xb4'] + (py.builtin._totext("a"), "b"), [pytest.param({}, b"\xc3\xb4")] + ) + assert result == ["a0-\\xc3\\xb4"] def test_idmaker_with_bytes_regex(self): from _pytest.python import idmaker - result = idmaker(("a"), [pytest.param(re.compile(b'foo'), 1.0)]) + + result = idmaker(("a"), [pytest.param(re.compile(b"foo"), 1.0)]) assert result == ["foo"] def test_idmaker_native_strings(self): from _pytest.python import idmaker + totext = py.builtin._totext - result = idmaker(("a", "b"), [ - pytest.param(1.0, -1.1), - pytest.param(2, -202), - pytest.param("three", "three hundred"), - pytest.param(True, False), - pytest.param(None, None), - pytest.param(re.compile('foo'), re.compile('bar')), - pytest.param(str, int), - pytest.param(list("six"), [66, 66]), - pytest.param(set([7]), set("seven")), - pytest.param(tuple("eight"), (8, -8, 8)), - pytest.param(b'\xc3\xb4', b"name"), - pytest.param(b'\xc3\xb4', totext("other")), - ]) - assert result == ["1.0--1.1", - "2--202", - "three-three hundred", - "True-False", - "None-None", - "foo-bar", - "str-int", - "a7-b7", - "a8-b8", - "a9-b9", - "\\xc3\\xb4-name", - "\\xc3\\xb4-other", - ] + result = idmaker( + ("a", "b"), + [ + pytest.param(1.0, -1.1), + pytest.param(2, -202), + pytest.param("three", "three hundred"), + pytest.param(True, False), + pytest.param(None, None), + pytest.param(re.compile("foo"), re.compile("bar")), + pytest.param(str, int), + pytest.param(list("six"), [66, 66]), + pytest.param({7}, set("seven")), + pytest.param(tuple("eight"), (8, -8, 8)), + pytest.param(b"\xc3\xb4", b"name"), + pytest.param(b"\xc3\xb4", totext("other")), + ], + ) + assert ( + result + == [ + "1.0--1.1", + "2--202", + "three-three hundred", + "True-False", + "None-None", + "foo-bar", + "str-int", + "a7-b7", + "a8-b8", + "a9-b9", + "\\xc3\\xb4-name", + "\\xc3\\xb4-other", + ] + ) def test_idmaker_enum(self): from _pytest.python import idmaker + enum = pytest.importorskip("enum") e = enum.Enum("Foo", "one, two") result = idmaker(("a", "b"), [pytest.param(e.one, e.two)]) @@ -340,31 +372,34 @@ class TestMetafunc(object): if isinstance(val, Exception): return repr(val) - result = idmaker(("a", "b"), [ - pytest.param(10.0, IndexError()), - pytest.param(20, KeyError()), - pytest.param("three", [1, 2, 3]), - ], idfn=ids) - assert result == ["10.0-IndexError()", - "20-KeyError()", - "three-b2", - ] + result = idmaker( + ("a", "b"), + [ + pytest.param(10.0, IndexError()), + pytest.param(20, KeyError()), + pytest.param("three", [1, 2, 3]), + ], + idfn=ids, + ) + assert result == ["10.0-IndexError()", "20-KeyError()", "three-b2"] @pytest.mark.issue351 def test_idmaker_idfn_unique_names(self): from _pytest.python import idmaker def ids(val): - return 'a' + return "a" - result = idmaker(("a", "b"), [pytest.param(10.0, IndexError()), - pytest.param(20, KeyError()), - pytest.param("three", [1, 2, 3]), - ], idfn=ids) - assert result == ["a-a0", - "a-a1", - "a-a2", - ] + result = idmaker( + ("a", "b"), + [ + pytest.param(10.0, IndexError()), + pytest.param(20, KeyError()), + pytest.param("three", [1, 2, 3]), + ], + idfn=ids, + ) + assert result == ["a-a0", "a-a1", "a-a2"] @pytest.mark.issue351 def test_idmaker_idfn_exception(self): @@ -379,33 +414,41 @@ class TestMetafunc(object): rec = WarningsRecorder() with rec: - idmaker(("a", "b"), [ - pytest.param(10.0, IndexError()), - pytest.param(20, KeyError()), - pytest.param("three", [1, 2, 3]), - ], idfn=ids) + idmaker( + ("a", "b"), + [ + pytest.param(10.0, IndexError()), + pytest.param(20, KeyError()), + pytest.param("three", [1, 2, 3]), + ], + idfn=ids, + ) - assert [str(i.message) for i in rec.list] == [ - "Raised while trying to determine id of parameter a at position 0." - "\nUpdate your code as this will raise an error in pytest-4.0.", - "Raised while trying to determine id of parameter b at position 0." - "\nUpdate your code as this will raise an error in pytest-4.0.", - "Raised while trying to determine id of parameter a at position 1." - "\nUpdate your code as this will raise an error in pytest-4.0.", - "Raised while trying to determine id of parameter b at position 1." - "\nUpdate your code as this will raise an error in pytest-4.0.", - "Raised while trying to determine id of parameter a at position 2." - "\nUpdate your code as this will raise an error in pytest-4.0.", - "Raised while trying to determine id of parameter b at position 2." - "\nUpdate your code as this will raise an error in pytest-4.0.", - ] + assert ( + [str(i.message) for i in rec.list] + == [ + "Raised while trying to determine id of parameter a at position 0." + "\nUpdate your code as this will raise an error in pytest-4.0.", + "Raised while trying to determine id of parameter b at position 0." + "\nUpdate your code as this will raise an error in pytest-4.0.", + "Raised while trying to determine id of parameter a at position 1." + "\nUpdate your code as this will raise an error in pytest-4.0.", + "Raised while trying to determine id of parameter b at position 1." + "\nUpdate your code as this will raise an error in pytest-4.0.", + "Raised while trying to determine id of parameter a at position 2." + "\nUpdate your code as this will raise an error in pytest-4.0.", + "Raised while trying to determine id of parameter b at position 2." + "\nUpdate your code as this will raise an error in pytest-4.0.", + ] + ) def test_parametrize_ids_exception(self, testdir): """ :param testdir: the instance of Testdir class, a temporary test directory. """ - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def ids(arg): @@ -414,54 +457,67 @@ class TestMetafunc(object): @pytest.mark.parametrize("arg", ["a", "b"], ids=ids) def test_foo(arg): pass - """) + """ + ) with pytest.warns(DeprecationWarning): result = testdir.runpytest("--collect-only") - result.stdout.fnmatch_lines([ - "", - " ", - " ", - ]) + result.stdout.fnmatch_lines( + [ + "", + " ", + " ", + ] + ) def test_idmaker_with_ids(self): from _pytest.python import idmaker - result = idmaker(("a", "b"), [pytest.param(1, 2), - pytest.param(3, 4)], - ids=["a", None]) + + result = idmaker( + ("a", "b"), [pytest.param(1, 2), pytest.param(3, 4)], ids=["a", None] + ) assert result == ["a", "3-4"] def test_idmaker_with_paramset_id(self): from _pytest.python import idmaker - result = idmaker(("a", "b"), [pytest.param(1, 2, id="me"), - pytest.param(3, 4, id="you")], - ids=["a", None]) + + result = idmaker( + ("a", "b"), + [pytest.param(1, 2, id="me"), pytest.param(3, 4, id="you")], + ids=["a", None], + ) assert result == ["me", "you"] def test_idmaker_with_ids_unique_names(self): from _pytest.python import idmaker - result = idmaker(("a"), map(pytest.param, [1, 2, 3, 4, 5]), - ids=["a", "a", "b", "c", "b"]) + + result = idmaker( + ("a"), map(pytest.param, [1, 2, 3, 4, 5]), ids=["a", "a", "b", "c", "b"] + ) assert result == ["a0", "a1", "b0", "c", "b1"] def test_addcall_and_parametrize(self): + def func(x, y): pass + metafunc = self.Metafunc(func) - metafunc.addcall({'x': 1}) - metafunc.parametrize('y', [2, 3]) + metafunc.addcall({"x": 1}) + metafunc.parametrize("y", [2, 3]) assert len(metafunc._calls) == 2 - assert metafunc._calls[0].funcargs == {'x': 1, 'y': 2} - assert metafunc._calls[1].funcargs == {'x': 1, 'y': 3} + assert metafunc._calls[0].funcargs == {"x": 1, "y": 2} + assert metafunc._calls[1].funcargs == {"x": 1, "y": 3} assert metafunc._calls[0].id == "0-2" assert metafunc._calls[1].id == "0-3" @pytest.mark.issue714 def test_parametrize_indirect(self): + def func(x, y): pass + metafunc = self.Metafunc(func) - metafunc.parametrize('x', [1], indirect=True) - metafunc.parametrize('y', [2, 3], indirect=True) + metafunc.parametrize("x", [1], indirect=True) + metafunc.parametrize("y", [2, 3], indirect=True) assert len(metafunc._calls) == 2 assert metafunc._calls[0].funcargs == {} assert metafunc._calls[1].funcargs == {} @@ -470,29 +526,35 @@ class TestMetafunc(object): @pytest.mark.issue714 def test_parametrize_indirect_list(self): + def func(x, y): pass + metafunc = self.Metafunc(func) - metafunc.parametrize('x, y', [('a', 'b')], indirect=['x']) - assert metafunc._calls[0].funcargs == dict(y='b') - assert metafunc._calls[0].params == dict(x='a') + metafunc.parametrize("x, y", [("a", "b")], indirect=["x"]) + assert metafunc._calls[0].funcargs == dict(y="b") + assert metafunc._calls[0].params == dict(x="a") @pytest.mark.issue714 def test_parametrize_indirect_list_all(self): + def func(x, y): pass + metafunc = self.Metafunc(func) - metafunc.parametrize('x, y', [('a', 'b')], indirect=['x', 'y']) + metafunc.parametrize("x, y", [("a", "b")], indirect=["x", "y"]) assert metafunc._calls[0].funcargs == {} - assert metafunc._calls[0].params == dict(x='a', y='b') + assert metafunc._calls[0].params == dict(x="a", y="b") @pytest.mark.issue714 def test_parametrize_indirect_list_empty(self): + def func(x, y): pass + metafunc = self.Metafunc(func) - metafunc.parametrize('x, y', [('a', 'b')], indirect=[]) - assert metafunc._calls[0].funcargs == dict(x='a', y='b') + metafunc.parametrize("x, y", [("a", "b")], indirect=[]) + assert metafunc._calls[0].funcargs == dict(x="a", y="b") assert metafunc._calls[0].params == {} @pytest.mark.issue714 @@ -506,7 +568,8 @@ class TestMetafunc(object): :param testdir: the instance of Testdir class, a temporary test directory. """ - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(scope='function') def x(request): @@ -518,20 +581,20 @@ class TestMetafunc(object): def test_simple(x,y): assert len(x) == 3 assert len(y) == 1 - """) + """ + ) result = testdir.runpytest("-v") - result.stdout.fnmatch_lines([ - "*test_simple*a-b*", - "*1 passed*", - ]) + result.stdout.fnmatch_lines(["*test_simple*a-b*", "*1 passed*"]) @pytest.mark.issue714 def test_parametrize_indirect_list_error(self, testdir): + def func(x, y): pass + metafunc = self.Metafunc(func) with pytest.raises(ValueError): - metafunc.parametrize('x, y', [('a', 'b')], indirect=['x', 'z']) + metafunc.parametrize("x, y", [("a", "b")], indirect=["x", "z"]) @pytest.mark.issue714 def test_parametrize_uses_no_fixture_error_indirect_false(self, testdir): @@ -540,21 +603,22 @@ class TestMetafunc(object): fixtures in their test function, rather than silently ignoring this and letting the test potentially pass. """ - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.parametrize('x, y', [('a', 'b')], indirect=False) def test_simple(x): assert len(x) == 3 - """) + """ + ) result = testdir.runpytest("--collect-only") - result.stdout.fnmatch_lines([ - "*uses no argument 'y'*", - ]) + result.stdout.fnmatch_lines(["*uses no argument 'y'*"]) @pytest.mark.issue714 def test_parametrize_uses_no_fixture_error_indirect_true(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(scope='function') def x(request): @@ -566,15 +630,15 @@ class TestMetafunc(object): @pytest.mark.parametrize('x, y', [('a', 'b')], indirect=True) def test_simple(x): assert len(x) == 3 - """) + """ + ) result = testdir.runpytest("--collect-only") - result.stdout.fnmatch_lines([ - "*uses no fixture 'y'*", - ]) + result.stdout.fnmatch_lines(["*uses no fixture 'y'*"]) @pytest.mark.issue714 def test_parametrize_indirect_uses_no_fixture_error_indirect_string(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(scope='function') def x(request): @@ -583,15 +647,15 @@ class TestMetafunc(object): @pytest.mark.parametrize('x, y', [('a', 'b')], indirect='y') def test_simple(x): assert len(x) == 3 - """) + """ + ) result = testdir.runpytest("--collect-only") - result.stdout.fnmatch_lines([ - "*uses no fixture 'y'*", - ]) + result.stdout.fnmatch_lines(["*uses no fixture 'y'*"]) @pytest.mark.issue714 def test_parametrize_indirect_uses_no_fixture_error_indirect_list(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(scope='function') def x(request): @@ -600,15 +664,15 @@ class TestMetafunc(object): @pytest.mark.parametrize('x, y', [('a', 'b')], indirect=['y']) def test_simple(x): assert len(x) == 3 - """) + """ + ) result = testdir.runpytest("--collect-only") - result.stdout.fnmatch_lines([ - "*uses no fixture 'y'*", - ]) + result.stdout.fnmatch_lines(["*uses no fixture 'y'*"]) @pytest.mark.issue714 def test_parametrize_argument_not_in_indirect_list(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(scope='function') def x(request): @@ -617,32 +681,37 @@ class TestMetafunc(object): @pytest.mark.parametrize('x, y', [('a', 'b')], indirect=['x']) def test_simple(x): assert len(x) == 3 - """) + """ + ) result = testdir.runpytest("--collect-only") - result.stdout.fnmatch_lines([ - "*uses no argument 'y'*", - ]) + result.stdout.fnmatch_lines(["*uses no argument 'y'*"]) - def test_parametrize_gives_indicative_error_on_function_with_default_argument(self, testdir): - testdir.makepyfile(""" + def test_parametrize_gives_indicative_error_on_function_with_default_argument( + self, testdir + ): + testdir.makepyfile( + """ import pytest @pytest.mark.parametrize('x, y', [('a', 'b')]) def test_simple(x, y=1): assert len(x) == 1 - """) + """ + ) result = testdir.runpytest("--collect-only") - result.stdout.fnmatch_lines([ - "*already takes an argument 'y' with a default value", - ]) + result.stdout.fnmatch_lines( + ["*already takes an argument 'y' with a default value"] + ) def test_addcalls_and_parametrize_indirect(self): + def func(x, y): pass + metafunc = self.Metafunc(func) metafunc.addcall(param="123") - metafunc.parametrize('x', [1], indirect=True) - metafunc.parametrize('y', [2, 3], indirect=True) + metafunc.parametrize("x", [1], indirect=True) + metafunc.parametrize("y", [2, 3], indirect=True) assert len(metafunc._calls) == 2 assert metafunc._calls[0].funcargs == {} assert metafunc._calls[1].funcargs == {} @@ -650,7 +719,8 @@ class TestMetafunc(object): assert metafunc._calls[1].params == dict(x=1, y=3) def test_parametrize_functional(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def pytest_generate_tests(metafunc): metafunc.parametrize('x', [1,2], indirect=True) @@ -662,13 +732,12 @@ class TestMetafunc(object): def test_simple(x,y): assert x in (10,20) assert y == 2 - """) + """ + ) result = testdir.runpytest("-v") - result.stdout.fnmatch_lines([ - "*test_simple*1-2*", - "*test_simple*2-2*", - "*2 passed*", - ]) + result.stdout.fnmatch_lines( + ["*test_simple*1-2*", "*test_simple*2-2*", "*2 passed*"] + ) def test_parametrize_onearg(self): metafunc = self.Metafunc(lambda x: None) @@ -697,7 +766,8 @@ class TestMetafunc(object): assert metafunc._calls[1].id == "3-4" def test_parametrize_multiple_times(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest pytestmark = pytest.mark.parametrize("x", [1,2]) def test_func(x): @@ -706,23 +776,27 @@ class TestMetafunc(object): pytestmark = pytest.mark.parametrize("y", [3,4]) def test_meth(self, x, y): assert 0, x - """) + """ + ) result = testdir.runpytest() assert result.ret == 1 result.assert_outcomes(failed=6) def test_parametrize_CSV(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.parametrize("x, y,", [(1,2), (2,3)]) def test_func(x, y): assert x+1 == y - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_parametrize_class_scenarios(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ # same as doc/en/example/parametrize scenario example def pytest_generate_tests(metafunc): idlist = [] @@ -746,10 +820,12 @@ class TestMetafunc(object): def test_3(self, arg, arg2): pass - """) + """ + ) result = testdir.runpytest("-v") assert result.ret == 0 - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *test_1*1* *test_2*1* *test_3*1* @@ -757,29 +833,37 @@ class TestMetafunc(object): *test_2*2* *test_3*2* *6 passed* - """) + """ + ) def test_format_args(self): + def function1(): pass - assert fixtures._format_args(function1) == '()' + + assert fixtures._format_args(function1) == "()" def function2(arg1): pass + assert fixtures._format_args(function2) == "(arg1)" def function3(arg1, arg2="qwe"): pass + assert fixtures._format_args(function3) == "(arg1, arg2='qwe')" def function4(arg1, *args, **kwargs): pass + assert fixtures._format_args(function4) == "(arg1, *args, **kwargs)" class TestMetafuncFunctional(object): + def test_attributes(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ # assumes that generate/provide runs in the same process import sys, pytest def pytest_generate_tests(metafunc): @@ -807,33 +891,37 @@ class TestMetafuncFunctional(object): # XXX actually have an unbound test function here? assert metafunc.function == unbound assert metafunc.cls == TestClass - """) + """ + ) result = testdir.runpytest(p, "-v") result.assert_outcomes(passed=2) def test_addcall_with_two_funcargs_generators(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_generate_tests(metafunc): assert "arg1" in metafunc.fixturenames metafunc.addcall(funcargs=dict(arg1=1, arg2=2)) - """) - p = testdir.makepyfile(""" + """ + ) + p = testdir.makepyfile( + """ def pytest_generate_tests(metafunc): metafunc.addcall(funcargs=dict(arg1=1, arg2=1)) class TestClass(object): def test_myfunc(self, arg1, arg2): assert arg1 == arg2 - """) + """ + ) result = testdir.runpytest("-v", p) - result.stdout.fnmatch_lines([ - "*test_myfunc*0*PASS*", - "*test_myfunc*1*FAIL*", - "*1 failed, 1 passed*" - ]) + result.stdout.fnmatch_lines( + ["*test_myfunc*0*PASS*", "*test_myfunc*1*FAIL*", "*1 failed, 1 passed*"] + ) def test_two_functions(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def pytest_generate_tests(metafunc): metafunc.addcall(param=10) metafunc.addcall(param=20) @@ -847,34 +935,42 @@ class TestMetafuncFunctional(object): assert arg1 == 10 def test_func2(arg1): assert arg1 in (10, 20) - """) + """ + ) result = testdir.runpytest("-v", p) - result.stdout.fnmatch_lines([ - "*test_func1*0*PASS*", - "*test_func1*1*FAIL*", - "*test_func2*PASS*", - "*1 failed, 3 passed*" - ]) + result.stdout.fnmatch_lines( + [ + "*test_func1*0*PASS*", + "*test_func1*1*FAIL*", + "*test_func2*PASS*", + "*1 failed, 3 passed*", + ] + ) def test_noself_in_method(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def pytest_generate_tests(metafunc): assert 'xyz' not in metafunc.fixturenames class TestHello(object): def test_hello(xyz): pass - """) + """ + ) result = testdir.runpytest(p) result.assert_outcomes(passed=1) def test_generate_plugin_and_module(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_generate_tests(metafunc): assert "arg1" in metafunc.fixturenames metafunc.addcall(id="world", param=(2,100)) - """) - p = testdir.makepyfile(""" + """ + ) + p = testdir.makepyfile( + """ def pytest_generate_tests(metafunc): metafunc.addcall(param=(1,1), id="hello") @@ -889,31 +985,34 @@ class TestMetafuncFunctional(object): class TestClass(object): def test_myfunc(self, arg1, arg2): assert arg1 == arg2 - """) + """ + ) result = testdir.runpytest("-v", p) - result.stdout.fnmatch_lines([ - "*test_myfunc*hello*PASS*", - "*test_myfunc*world*FAIL*", - "*1 failed, 1 passed*" - ]) + result.stdout.fnmatch_lines( + [ + "*test_myfunc*hello*PASS*", + "*test_myfunc*world*FAIL*", + "*1 failed, 1 passed*", + ] + ) def test_generate_tests_in_class(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ class TestClass(object): def pytest_generate_tests(self, metafunc): metafunc.addcall(funcargs={'hello': 'world'}, id="hello") def test_myfunc(self, hello): assert hello == "world" - """) + """ + ) result = testdir.runpytest("-v", p) - result.stdout.fnmatch_lines([ - "*test_myfunc*hello*PASS*", - "*1 passed*" - ]) + result.stdout.fnmatch_lines(["*test_myfunc*hello*PASS*", "*1 passed*"]) def test_two_functions_not_same_instance(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def pytest_generate_tests(metafunc): metafunc.addcall({'arg1': 10}) metafunc.addcall({'arg1': 20}) @@ -922,16 +1021,16 @@ class TestMetafuncFunctional(object): def test_func(self, arg1): assert not hasattr(self, 'x') self.x = 1 - """) + """ + ) result = testdir.runpytest("-v", p) - result.stdout.fnmatch_lines([ - "*test_func*0*PASS*", - "*test_func*1*PASS*", - "*2 pass*", - ]) + result.stdout.fnmatch_lines( + ["*test_func*0*PASS*", "*test_func*1*PASS*", "*2 pass*"] + ) def test_issue28_setup_method_in_generate_tests(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def pytest_generate_tests(metafunc): metafunc.addcall({'arg1': 1}) @@ -940,29 +1039,29 @@ class TestMetafuncFunctional(object): assert arg1 == self.val def setup_method(self, func): self.val = 1 - """) + """ + ) result = testdir.runpytest(p) result.assert_outcomes(passed=1) def test_parametrize_functional2(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def pytest_generate_tests(metafunc): metafunc.parametrize("arg1", [1,2]) metafunc.parametrize("arg2", [4,5]) def test_hello(arg1, arg2): assert 0, (arg1, arg2) - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*(1, 4)*", - "*(1, 5)*", - "*(2, 4)*", - "*(2, 5)*", - "*4 failed*", - ]) + result.stdout.fnmatch_lines( + ["*(1, 4)*", "*(1, 5)*", "*(2, 4)*", "*(2, 5)*", "*4 failed*"] + ) def test_parametrize_and_inner_getfixturevalue(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def pytest_generate_tests(metafunc): metafunc.parametrize("arg1", [1], indirect=True) metafunc.parametrize("arg2", [10], indirect=True) @@ -979,15 +1078,14 @@ class TestMetafuncFunctional(object): def test_func1(arg1, arg2): assert arg1 == 11 - """) + """ + ) result = testdir.runpytest("-v", p) - result.stdout.fnmatch_lines([ - "*test_func1*1*PASS*", - "*1 passed*" - ]) + result.stdout.fnmatch_lines(["*test_func1*1*PASS*", "*1 passed*"]) def test_parametrize_on_setup_arg(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def pytest_generate_tests(metafunc): assert "arg1" in metafunc.fixturenames metafunc.parametrize("arg1", [1], indirect=True) @@ -1003,19 +1101,20 @@ class TestMetafuncFunctional(object): def test_func(arg2): assert arg2 == 10 - """) + """ + ) result = testdir.runpytest("-v", p) - result.stdout.fnmatch_lines([ - "*test_func*1*PASS*", - "*1 passed*" - ]) + result.stdout.fnmatch_lines(["*test_func*1*PASS*", "*1 passed*"]) def test_parametrize_with_ids(self, testdir): - testdir.makeini(""" + testdir.makeini( + """ [pytest] console_output_style=classic - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ import pytest def pytest_generate_tests(metafunc): metafunc.parametrize(("a", "b"), [(1,1), (1,2)], @@ -1023,16 +1122,17 @@ class TestMetafuncFunctional(object): def test_function(a, b): assert a == b - """) + """ + ) result = testdir.runpytest("-v") assert result.ret == 1 - result.stdout.fnmatch_lines_random([ - "*test_function*basic*PASSED", - "*test_function*advanced*FAILED", - ]) + result.stdout.fnmatch_lines_random( + ["*test_function*basic*PASSED", "*test_function*advanced*FAILED"] + ) def test_parametrize_without_ids(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def pytest_generate_tests(metafunc): metafunc.parametrize(("a", "b"), @@ -1040,15 +1140,19 @@ class TestMetafuncFunctional(object): def test_function(a, b): assert 1 - """) + """ + ) result = testdir.runpytest("-v") - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *test_function*1-b0* *test_function*1.3-b1* - """) + """ + ) def test_parametrize_with_None_in_ids(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def pytest_generate_tests(metafunc): metafunc.parametrize(("a", "b"), [(1,1), (1,1), (1,2)], @@ -1056,18 +1160,22 @@ class TestMetafuncFunctional(object): def test_function(a, b): assert a == b - """) + """ + ) result = testdir.runpytest("-v") assert result.ret == 1 - result.stdout.fnmatch_lines_random([ - "*test_function*basic*PASSED*", - "*test_function*1-1*PASSED*", - "*test_function*advanced*FAILED*", - ]) + result.stdout.fnmatch_lines_random( + [ + "*test_function*basic*PASSED*", + "*test_function*1-1*PASSED*", + "*test_function*advanced*FAILED*", + ] + ) def test_fixture_parametrized_empty_ids(self, testdir): """Fixtures parametrized with empty ids cause an internal error (#1849).""" - testdir.makepyfile(''' + testdir.makepyfile( + """ import pytest @pytest.fixture(scope="module", ids=[], params=[]) @@ -1076,36 +1184,44 @@ class TestMetafuncFunctional(object): def test_temp(temp): pass - ''') + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(['* 1 skipped *']) + result.stdout.fnmatch_lines(["* 1 skipped *"]) def test_parametrized_empty_ids(self, testdir): """Tests parametrized with empty ids cause an internal error (#1849).""" - testdir.makepyfile(''' + testdir.makepyfile( + """ import pytest @pytest.mark.parametrize('temp', [], ids=list()) def test_temp(temp): pass - ''') + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(['* 1 skipped *']) + result.stdout.fnmatch_lines(["* 1 skipped *"]) def test_parametrized_ids_invalid_type(self, testdir): """Tests parametrized with ids as non-strings (#1857).""" - testdir.makepyfile(''' + testdir.makepyfile( + """ import pytest @pytest.mark.parametrize("x, expected", [(10, 20), (40, 80)], ids=(None, 2)) def test_ids_numbers(x,expected): assert x * 2 == expected - ''') + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(['*ids must be list of strings, found: 2 (type: int)*']) + result.stdout.fnmatch_lines( + ["*ids must be list of strings, found: 2 (type: int)*"] + ) def test_parametrize_with_identical_ids_get_unique_names(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def pytest_generate_tests(metafunc): metafunc.parametrize(("a", "b"), [(1,1), (1,2)], @@ -1113,18 +1229,18 @@ class TestMetafuncFunctional(object): def test_function(a, b): assert a == b - """) + """ + ) result = testdir.runpytest("-v") assert result.ret == 1 - result.stdout.fnmatch_lines_random([ - "*test_function*a0*PASSED*", - "*test_function*a1*FAILED*" - ]) + result.stdout.fnmatch_lines_random( + ["*test_function*a0*PASSED*", "*test_function*a1*FAILED*"] + ) - @pytest.mark.parametrize(("scope", "length"), - [("module", 2), ("function", 4)]) + @pytest.mark.parametrize(("scope", "length"), [("module", 2), ("function", 4)]) def test_parametrize_scope_overrides(self, testdir, scope, length): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest values = [] def pytest_generate_tests(metafunc): @@ -1141,12 +1257,15 @@ class TestMetafuncFunctional(object): assert arg in (1,2) def test_checklength(): assert len(values) == %d - """ % (scope, length)) + """ + % (scope, length) + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=5) def test_parametrize_issue323(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(scope='module', params=range(966)) @@ -1157,12 +1276,14 @@ class TestMetafuncFunctional(object): pass def test_it2(foo): pass - """) + """ + ) reprec = testdir.inline_run("--collect-only") assert not reprec.getcalls("pytest_internalerror") def test_usefixtures_seen_in_generate_tests(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def pytest_generate_tests(metafunc): assert "abc" in metafunc.fixturenames @@ -1171,28 +1292,38 @@ class TestMetafuncFunctional(object): @pytest.mark.usefixtures("abc") def test_function(): pass - """) + """ + ) reprec = testdir.runpytest() reprec.assert_outcomes(passed=1) def test_generate_tests_only_done_in_subdir(self, testdir): sub1 = testdir.mkpydir("sub1") sub2 = testdir.mkpydir("sub2") - sub1.join("conftest.py").write(_pytest._code.Source(""" + sub1.join("conftest.py").write( + _pytest._code.Source( + """ def pytest_generate_tests(metafunc): assert metafunc.function.__name__ == "test_1" - """)) - sub2.join("conftest.py").write(_pytest._code.Source(""" + """ + ) + ) + sub2.join("conftest.py").write( + _pytest._code.Source( + """ def pytest_generate_tests(metafunc): assert metafunc.function.__name__ == "test_2" - """)) + """ + ) + ) sub1.join("test_in_sub1.py").write("def test_1(): pass") sub2.join("test_in_sub2.py").write("def test_2(): pass") result = testdir.runpytest("--keep-duplicates", "-v", "-s", sub1, sub2, sub1) result.assert_outcomes(passed=3) def test_generate_same_function_names_issue403(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def make_tests(): @@ -1203,25 +1334,31 @@ class TestMetafuncFunctional(object): test_x = make_tests() test_y = make_tests() - """) + """ + ) reprec = testdir.runpytest() reprec.assert_outcomes(passed=4) @pytest.mark.issue463 - @pytest.mark.parametrize('attr', ['parametrise', 'parameterize', - 'parameterise']) + @pytest.mark.parametrize("attr", ["parametrise", "parameterize", "parameterise"]) def test_parametrize_misspelling(self, testdir, attr): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest - @pytest.mark.{0}("x", range(2)) + @pytest.mark.{}("x", range(2)) def test_foo(x): pass - """.format(attr)) - reprec = testdir.inline_run('--collectonly') + """.format( + attr + ) + ) + reprec = testdir.inline_run("--collectonly") failures = reprec.getfailures() assert len(failures) == 1 - expectederror = "MarkerError: test_foo has '{0}', spelling should be 'parametrize'".format(attr) + expectederror = "MarkerError: test_foo has '{}', spelling should be 'parametrize'".format( + attr + ) assert expectederror in failures[0].longrepr.reprcrash.message @@ -1231,7 +1368,8 @@ class TestMetafuncFunctionalAuto(object): """ def test_parametrize_auto_scope(self, testdir): - testdir.makepyfile(''' + testdir.makepyfile( + """ import pytest @pytest.fixture(scope='session', autouse=True) @@ -1246,12 +1384,14 @@ class TestMetafuncFunctionalAuto(object): def test_2(animal): assert animal == 'fish' - ''') + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(['* 3 passed *']) + result.stdout.fnmatch_lines(["* 3 passed *"]) def test_parametrize_auto_scope_indirect(self, testdir): - testdir.makepyfile(''' + testdir.makepyfile( + """ import pytest @pytest.fixture(scope='session') @@ -1267,12 +1407,14 @@ class TestMetafuncFunctionalAuto(object): def test_2(animal, echo): assert animal == 'fish' assert echo in (1, 2, 3) - ''') + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(['* 3 passed *']) + result.stdout.fnmatch_lines(["* 3 passed *"]) def test_parametrize_auto_scope_override_fixture(self, testdir): - testdir.makepyfile(''' + testdir.makepyfile( + """ import pytest @pytest.fixture(scope='session', autouse=True) @@ -1282,12 +1424,14 @@ class TestMetafuncFunctionalAuto(object): @pytest.mark.parametrize('animal', ["dog", "cat"]) def test_1(animal): assert animal in ('dog', 'cat') - ''') + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(['* 2 passed *']) + result.stdout.fnmatch_lines(["* 2 passed *"]) def test_parametrize_all_indirects(self, testdir): - testdir.makepyfile(''' + testdir.makepyfile( + """ import pytest @pytest.fixture() @@ -1307,12 +1451,14 @@ class TestMetafuncFunctionalAuto(object): def test_2(animal, echo): assert animal == 'fish' assert echo in (1, 2, 3) - ''') + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(['* 3 passed *']) + result.stdout.fnmatch_lines(["* 3 passed *"]) def test_parametrize_issue634(self, testdir): - testdir.makepyfile(''' + testdir.makepyfile( + """ import pytest @pytest.fixture(scope='module') @@ -1337,14 +1483,15 @@ class TestMetafuncFunctionalAuto(object): if test_with: params = test_with metafunc.parametrize('foo', params, indirect=True) - ''') + """ + ) result = testdir.runpytest("-s") output = result.stdout.str() - assert output.count('preparing foo-2') == 1 - assert output.count('preparing foo-3') == 1 + assert output.count("preparing foo-2") == 1 + assert output.count("preparing foo-3") == 1 -@pytest.mark.filterwarnings('ignore:Applying marks directly to parameters') +@pytest.mark.filterwarnings("ignore:Applying marks directly to parameters") @pytest.mark.issue308 class TestMarkersWithParametrization(object): @@ -1364,10 +1511,10 @@ class TestMarkersWithParametrization(object): items = testdir.getitems(s) assert len(items) == 3 for item in items: - assert 'foo' in item.keywords - assert 'bar' not in items[0].keywords - assert 'bar' in items[1].keywords - assert 'bar' not in items[2].keywords + assert "foo" in item.keywords + assert "bar" not in items[0].keywords + assert "bar" in items[1].keywords + assert "bar" not in items[2].keywords def test_select_based_on_mark(self, testdir): s = """ @@ -1382,7 +1529,7 @@ class TestMarkersWithParametrization(object): assert n + 1 == expected """ testdir.makepyfile(s) - rec = testdir.inline_run("-m", 'foo') + rec = testdir.inline_run("-m", "foo") passed, skipped, fail = rec.listoutcomes() assert len(passed) == 1 assert len(skipped) == 0 @@ -1404,7 +1551,7 @@ class TestMarkersWithParametrization(object): """ items = testdir.getitems(s) assert len(items) == 3 - for mark in ['foo', 'bar']: + for mark in ["foo", "bar"]: assert mark not in items[0].keywords assert mark in items[1].keywords assert mark not in items[2].keywords @@ -1490,7 +1637,7 @@ class TestMarkersWithParametrization(object): reprec = testdir.inline_run() reprec.assertoutcome(passed=2, skipped=1) - @pytest.mark.parametrize('strict', [True, False]) + @pytest.mark.parametrize("strict", [True, False]) def test_xfail_passing_is_xpass(self, testdir, strict): s = """ import pytest @@ -1502,7 +1649,9 @@ class TestMarkersWithParametrization(object): ]) def test_increment(n, expected): assert n + 1 == expected - """.format(strict=strict) + """.format( + strict=strict + ) testdir.makepyfile(s) reprec = testdir.inline_run() passed, failed = (2, 1) if strict else (3, 0) @@ -1533,7 +1682,8 @@ class TestMarkersWithParametrization(object): @pytest.mark.issue290 def test_parametrize_ID_generation_string_int_works(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture @@ -1543,11 +1693,12 @@ class TestMarkersWithParametrization(object): 'limit', (0, '0')) def test_limit(limit, myfixture): return - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) - @pytest.mark.parametrize('strict', [True, False]) + @pytest.mark.parametrize("strict", [True, False]) def test_parametrize_marked_value(self, testdir, strict): s = """ import pytest @@ -1564,36 +1715,42 @@ class TestMarkersWithParametrization(object): ]) def test_increment(n, expected): assert n + 1 == expected - """.format(strict=strict) + """.format( + strict=strict + ) testdir.makepyfile(s) reprec = testdir.inline_run() passed, failed = (0, 2) if strict else (2, 0) reprec.assertoutcome(passed=passed, failed=failed) def test_pytest_make_parametrize_id(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_make_parametrize_id(config, val): return str(val * 2) - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ import pytest @pytest.mark.parametrize("x", range(2)) def test_func(x): pass - """) + """ + ) result = testdir.runpytest("-v") - result.stdout.fnmatch_lines([ - "*test_func*0*PASS*", - "*test_func*2*PASS*", - ]) + result.stdout.fnmatch_lines(["*test_func*0*PASS*", "*test_func*2*PASS*"]) def test_pytest_make_parametrize_id_with_argname(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_make_parametrize_id(config, val, argname): return str(val * 2 if argname == 'x' else val * 10) - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ import pytest @pytest.mark.parametrize("x", range(2)) @@ -1603,10 +1760,9 @@ class TestMarkersWithParametrization(object): @pytest.mark.parametrize("y", [1]) def test_func_b(y): pass - """) + """ + ) result = testdir.runpytest("-v") - result.stdout.fnmatch_lines([ - "*test_func_a*0*PASS*", - "*test_func_a*2*PASS*", - "*test_func_b*10*PASS*", - ]) + result.stdout.fnmatch_lines( + ["*test_func_a*0*PASS*", "*test_func_a*2*PASS*", "*test_func_b*10*PASS*"] + ) diff --git a/testing/python/raises.py b/testing/python/raises.py index 053426395..64199c3b6 100644 --- a/testing/python/raises.py +++ b/testing/python/raises.py @@ -4,6 +4,7 @@ import sys class TestRaises(object): + def test_raises(self): source = "int('qwe')" excinfo = pytest.raises(ValueError, source) @@ -18,19 +19,23 @@ class TestRaises(object): pytest.raises(SyntaxError, "qwe qwe qwe") def test_raises_function(self): - pytest.raises(ValueError, int, 'hello') + pytest.raises(ValueError, int, "hello") def test_raises_callable_no_exception(self): + class A(object): + def __call__(self): pass + try: pytest.raises(ValueError, A()) except pytest.raises.Exception: pass def test_raises_as_contextmanager(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ from __future__ import with_statement import py, pytest import _pytest._code @@ -52,30 +57,29 @@ class TestRaises(object): with pytest.raises(ZeroDivisionError): with pytest.raises(ValueError): 1/0 - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - '*3 passed*', - ]) + result.stdout.fnmatch_lines(["*3 passed*"]) def test_noclass(self): with pytest.raises(TypeError): - pytest.raises('wrong', lambda: None) + pytest.raises("wrong", lambda: None) def test_invalid_arguments_to_raises(self): - with pytest.raises(TypeError, match='unknown'): - with pytest.raises(TypeError, unknown='bogus'): + with pytest.raises(TypeError, match="unknown"): + with pytest.raises(TypeError, unknown="bogus"): raise ValueError() def test_tuple(self): with pytest.raises((KeyError, ValueError)): - raise KeyError('oops') + raise KeyError("oops") def test_no_raise_message(self): try: - pytest.raises(ValueError, int, '0') + pytest.raises(ValueError, int, "0") except pytest.raises.Exception as e: - assert e.msg == "DID NOT RAISE {0}".format(repr(ValueError)) + assert e.msg == "DID NOT RAISE {}".format(repr(ValueError)) else: assert False, "Expected pytest.raises.Exception" @@ -83,7 +87,7 @@ class TestRaises(object): with pytest.raises(ValueError): pass except pytest.raises.Exception as e: - assert e.msg == "DID NOT RAISE {0}".format(repr(ValueError)) + assert e.msg == "DID NOT RAISE {}".format(repr(ValueError)) else: assert False, "Expected pytest.raises.Exception" @@ -97,7 +101,7 @@ class TestRaises(object): else: assert False, "Expected pytest.raises.Exception" - @pytest.mark.parametrize('method', ['function', 'with']) + @pytest.mark.parametrize("method", ["function", "with"]) def test_raises_cyclic_reference(self, method): """ Ensure pytest.raises does not leave a reference cycle (#1965). @@ -105,11 +109,12 @@ class TestRaises(object): import gc class T(object): + def __call__(self): raise ValueError t = T() - if method == 'function': + if method == "function": pytest.raises(ValueError, t) else: with pytest.raises(ValueError): @@ -127,17 +132,19 @@ class TestRaises(object): def test_raises_match(self): msg = r"with base \d+" with pytest.raises(ValueError, match=msg): - int('asdf') + int("asdf") msg = "with base 10" with pytest.raises(ValueError, match=msg): - int('asdf') + int("asdf") msg = "with base 16" - expr = r"Pattern '{0}' not found in 'invalid literal for int\(\) with base 10: 'asdf''".format(msg) + expr = r"Pattern '{}' not found in 'invalid literal for int\(\) with base 10: 'asdf''".format( + msg + ) with pytest.raises(AssertionError, match=expr): with pytest.raises(ValueError, match=msg): - int('asdf', base=10) + int("asdf", base=10) def test_raises_match_wrong_type(self): """Raising an exception with the wrong type and match= given. @@ -146,15 +153,16 @@ class TestRaises(object): really relevant if we got a different exception. """ with pytest.raises(ValueError): - with pytest.raises(IndexError, match='nomatch'): - int('asdf') + with pytest.raises(IndexError, match="nomatch"): + int("asdf") def test_raises_exception_looks_iterable(self): from six import add_metaclass class Meta(type(object)): + def __getitem__(self, item): - return 1/0 + return 1 / 0 def __len__(self): return 1 @@ -163,5 +171,7 @@ class TestRaises(object): class ClassLooksIterableException(Exception): pass - with pytest.raises(Failed, match="DID NOT RAISE "): + with pytest.raises( + Failed, match="DID NOT RAISE " + ): pytest.raises(ClassLooksIterableException, lambda: None) diff --git a/testing/python/setup_only.py b/testing/python/setup_only.py index ab34312fc..4ae24b15a 100644 --- a/testing/python/setup_only.py +++ b/testing/python/setup_only.py @@ -1,14 +1,14 @@ import pytest -@pytest.fixture(params=['--setup-only', '--setup-plan', '--setup-show'], - scope='module') +@pytest.fixture(params=["--setup-only", "--setup-plan", "--setup-show"], scope="module") def mode(request): return request.param def test_show_only_active_fixtures(testdir, mode): - p = testdir.makepyfile(''' + p = testdir.makepyfile( + ''' import pytest @pytest.fixture def _arg0(): @@ -18,21 +18,21 @@ def test_show_only_active_fixtures(testdir, mode): """arg1 docstring""" def test_arg1(arg1): pass - ''') + ''' + ) result = testdir.runpytest(mode, p) assert result.ret == 0 - result.stdout.fnmatch_lines([ - '*SETUP F arg1*', - '*test_arg1 (fixtures used: arg1)*', - '*TEARDOWN F arg1*', - ]) + result.stdout.fnmatch_lines( + ["*SETUP F arg1*", "*test_arg1 (fixtures used: arg1)*", "*TEARDOWN F arg1*"] + ) assert "_arg0" not in result.stdout.str() def test_show_different_scopes(testdir, mode): - p = testdir.makepyfile(''' + p = testdir.makepyfile( + ''' import pytest @pytest.fixture def arg_function(): @@ -42,50 +42,60 @@ def test_show_different_scopes(testdir, mode): """session scoped fixture""" def test_arg1(arg_session, arg_function): pass - ''') + ''' + ) result = testdir.runpytest(mode, p) assert result.ret == 0 - result.stdout.fnmatch_lines([ - 'SETUP S arg_session*', - '*SETUP F arg_function*', - '*test_arg1 (fixtures used: arg_function, arg_session)*', - '*TEARDOWN F arg_function*', - 'TEARDOWN S arg_session*', - ]) + result.stdout.fnmatch_lines( + [ + "SETUP S arg_session*", + "*SETUP F arg_function*", + "*test_arg1 (fixtures used: arg_function, arg_session)*", + "*TEARDOWN F arg_function*", + "TEARDOWN S arg_session*", + ] + ) def test_show_nested_fixtures(testdir, mode): - testdir.makeconftest(''' + testdir.makeconftest( + ''' import pytest @pytest.fixture(scope='session') def arg_same(): """session scoped fixture""" - ''') - p = testdir.makepyfile(''' + ''' + ) + p = testdir.makepyfile( + ''' import pytest @pytest.fixture(scope='function') def arg_same(arg_same): """function scoped fixture""" def test_arg1(arg_same): pass - ''') + ''' + ) result = testdir.runpytest(mode, p) assert result.ret == 0 - result.stdout.fnmatch_lines([ - 'SETUP S arg_same*', - '*SETUP F arg_same (fixtures used: arg_same)*', - '*test_arg1 (fixtures used: arg_same)*', - '*TEARDOWN F arg_same*', - 'TEARDOWN S arg_same*', - ]) + result.stdout.fnmatch_lines( + [ + "SETUP S arg_same*", + "*SETUP F arg_same (fixtures used: arg_same)*", + "*test_arg1 (fixtures used: arg_same)*", + "*TEARDOWN F arg_same*", + "TEARDOWN S arg_same*", + ] + ) def test_show_fixtures_with_autouse(testdir, mode): - p = testdir.makepyfile(''' + p = testdir.makepyfile( + ''' import pytest @pytest.fixture def arg_function(): @@ -95,92 +105,104 @@ def test_show_fixtures_with_autouse(testdir, mode): """session scoped fixture""" def test_arg1(arg_function): pass - ''') + ''' + ) result = testdir.runpytest(mode, p) assert result.ret == 0 - result.stdout.fnmatch_lines([ - 'SETUP S arg_session*', - '*SETUP F arg_function*', - '*test_arg1 (fixtures used: arg_function, arg_session)*', - ]) + result.stdout.fnmatch_lines( + [ + "SETUP S arg_session*", + "*SETUP F arg_function*", + "*test_arg1 (fixtures used: arg_function, arg_session)*", + ] + ) def test_show_fixtures_with_parameters(testdir, mode): - testdir.makeconftest(''' + testdir.makeconftest( + ''' import pytest @pytest.fixture(scope='session', params=['foo', 'bar']) def arg_same(): """session scoped fixture""" - ''') - p = testdir.makepyfile(''' + ''' + ) + p = testdir.makepyfile( + ''' import pytest @pytest.fixture(scope='function') def arg_other(arg_same): """function scoped fixture""" def test_arg1(arg_other): pass - ''') + ''' + ) result = testdir.runpytest(mode, p) assert result.ret == 0 - result.stdout.fnmatch_lines([ - 'SETUP S arg_same?foo?', - 'TEARDOWN S arg_same?foo?', - 'SETUP S arg_same?bar?', - 'TEARDOWN S arg_same?bar?', - ]) + result.stdout.fnmatch_lines( + [ + "SETUP S arg_same?foo?", + "TEARDOWN S arg_same?foo?", + "SETUP S arg_same?bar?", + "TEARDOWN S arg_same?bar?", + ] + ) def test_show_fixtures_with_parameter_ids(testdir, mode): - testdir.makeconftest(''' + testdir.makeconftest( + ''' import pytest @pytest.fixture( scope='session', params=['foo', 'bar'], ids=['spam', 'ham']) def arg_same(): """session scoped fixture""" - ''') - p = testdir.makepyfile(''' + ''' + ) + p = testdir.makepyfile( + ''' import pytest @pytest.fixture(scope='function') def arg_other(arg_same): """function scoped fixture""" def test_arg1(arg_other): pass - ''') + ''' + ) result = testdir.runpytest(mode, p) assert result.ret == 0 - result.stdout.fnmatch_lines([ - 'SETUP S arg_same?spam?', - 'SETUP S arg_same?ham?', - ]) + result.stdout.fnmatch_lines( + ["SETUP S arg_same?spam?", "SETUP S arg_same?ham?"] + ) def test_show_fixtures_with_parameter_ids_function(testdir, mode): - p = testdir.makepyfile(''' + p = testdir.makepyfile( + """ import pytest @pytest.fixture(params=['foo', 'bar'], ids=lambda p: p.upper()) def foobar(): pass def test_foobar(foobar): pass - ''') + """ + ) result = testdir.runpytest(mode, p) assert result.ret == 0 - result.stdout.fnmatch_lines([ - '*SETUP F foobar?FOO?', - '*SETUP F foobar?BAR?', - ]) + result.stdout.fnmatch_lines(["*SETUP F foobar?FOO?", "*SETUP F foobar?BAR?"]) def test_dynamic_fixture_request(testdir): - p = testdir.makepyfile(''' + p = testdir.makepyfile( + """ import pytest @pytest.fixture() def dynamically_requested_fixture(): @@ -190,19 +212,23 @@ def test_dynamic_fixture_request(testdir): request.getfixturevalue('dynamically_requested_fixture') def test_dyn(dependent_fixture): pass - ''') + """ + ) - result = testdir.runpytest('--setup-only', p) + result = testdir.runpytest("--setup-only", p) assert result.ret == 0 - result.stdout.fnmatch_lines([ - '*SETUP F dynamically_requested_fixture', - '*TEARDOWN F dynamically_requested_fixture' - ]) + result.stdout.fnmatch_lines( + [ + "*SETUP F dynamically_requested_fixture", + "*TEARDOWN F dynamically_requested_fixture", + ] + ) def test_capturing(testdir): - p = testdir.makepyfile(''' + p = testdir.makepyfile( + """ import pytest, sys @pytest.fixture() def one(): @@ -213,31 +239,31 @@ def test_capturing(testdir): assert 0 def test_capturing(two): pass - ''') + """ + ) - result = testdir.runpytest('--setup-only', p) - result.stdout.fnmatch_lines([ - 'this should be captured', - 'this should also be captured' - ]) + result = testdir.runpytest("--setup-only", p) + result.stdout.fnmatch_lines( + ["this should be captured", "this should also be captured"] + ) def test_show_fixtures_and_execute_test(testdir): """ Verifies that setups are shown and tests are executed. """ - p = testdir.makepyfile(''' + p = testdir.makepyfile( + """ import pytest @pytest.fixture def arg(): assert True def test_arg(arg): assert False - ''') + """ + ) result = testdir.runpytest("--setup-show", p) assert result.ret == 1 - result.stdout.fnmatch_lines([ - '*SETUP F arg*', - '*test_arg (fixtures used: arg)F*', - '*TEARDOWN F arg*', - ]) + result.stdout.fnmatch_lines( + ["*SETUP F arg*", "*test_arg (fixtures used: arg)F*", "*TEARDOWN F arg*"] + ) diff --git a/testing/python/setup_plan.py b/testing/python/setup_plan.py index 8c9822469..0321939a8 100644 --- a/testing/python/setup_plan.py +++ b/testing/python/setup_plan.py @@ -1,19 +1,19 @@ def test_show_fixtures_and_test(testdir): """ Verifies that fixtures are not executed. """ - p = testdir.makepyfile(''' + p = testdir.makepyfile( + """ import pytest @pytest.fixture def arg(): assert False def test_arg(arg): assert False - ''') + """ + ) result = testdir.runpytest("--setup-plan", p) assert result.ret == 0 - result.stdout.fnmatch_lines([ - '*SETUP F arg*', - '*test_arg (fixtures used: arg)', - '*TEARDOWN F arg*', - ]) + result.stdout.fnmatch_lines( + ["*SETUP F arg*", "*test_arg (fixtures used: arg)", "*TEARDOWN F arg*"] + ) diff --git a/testing/python/show_fixtures_per_test.py b/testing/python/show_fixtures_per_test.py index 741f33946..e14344d4e 100644 --- a/testing/python/show_fixtures_per_test.py +++ b/testing/python/show_fixtures_per_test.py @@ -2,13 +2,14 @@ def test_no_items_should_not_show_output(testdir): - result = testdir.runpytest('--fixtures-per-test') - assert 'fixtures used by' not in result.stdout.str() + result = testdir.runpytest("--fixtures-per-test") + assert "fixtures used by" not in result.stdout.str() assert result.ret == 0 def test_fixtures_in_module(testdir): - p = testdir.makepyfile(''' + p = testdir.makepyfile( + ''' import pytest @pytest.fixture def _arg0(): @@ -18,22 +19,26 @@ def test_fixtures_in_module(testdir): """arg1 docstring""" def test_arg1(arg1): pass - ''') + ''' + ) result = testdir.runpytest("--fixtures-per-test", p) assert result.ret == 0 - result.stdout.fnmatch_lines([ - '*fixtures used by test_arg1*', - '*(test_fixtures_in_module.py:9)*', - 'arg1', - ' arg1 docstring', - ]) + result.stdout.fnmatch_lines( + [ + "*fixtures used by test_arg1*", + "*(test_fixtures_in_module.py:9)*", + "arg1", + " arg1 docstring", + ] + ) assert "_arg0" not in result.stdout.str() def test_fixtures_in_conftest(testdir): - testdir.makeconftest(''' + testdir.makeconftest( + ''' import pytest @pytest.fixture def arg1(): @@ -46,35 +51,41 @@ def test_fixtures_in_conftest(testdir): """arg3 docstring """ - ''') - p = testdir.makepyfile(''' + ''' + ) + p = testdir.makepyfile( + """ def test_arg2(arg2): pass def test_arg3(arg3): pass - ''') + """ + ) result = testdir.runpytest("--fixtures-per-test", p) assert result.ret == 0 - result.stdout.fnmatch_lines([ - '*fixtures used by test_arg2*', - '*(test_fixtures_in_conftest.py:2)*', - 'arg2', - ' arg2 docstring', - '*fixtures used by test_arg3*', - '*(test_fixtures_in_conftest.py:4)*', - 'arg1', - ' arg1 docstring', - 'arg2', - ' arg2 docstring', - 'arg3', - ' arg3', - ' docstring', - ]) + result.stdout.fnmatch_lines( + [ + "*fixtures used by test_arg2*", + "*(test_fixtures_in_conftest.py:2)*", + "arg2", + " arg2 docstring", + "*fixtures used by test_arg3*", + "*(test_fixtures_in_conftest.py:4)*", + "arg1", + " arg1 docstring", + "arg2", + " arg2 docstring", + "arg3", + " arg3", + " docstring", + ] + ) def test_should_show_fixtures_used_by_test(testdir): - testdir.makeconftest(''' + testdir.makeconftest( + ''' import pytest @pytest.fixture def arg1(): @@ -82,30 +93,36 @@ def test_should_show_fixtures_used_by_test(testdir): @pytest.fixture def arg2(): """arg2 from conftest""" - ''') - p = testdir.makepyfile(''' + ''' + ) + p = testdir.makepyfile( + ''' import pytest @pytest.fixture def arg1(): """arg1 from testmodule""" def test_args(arg1, arg2): pass - ''') + ''' + ) result = testdir.runpytest("--fixtures-per-test", p) assert result.ret == 0 - result.stdout.fnmatch_lines([ - '*fixtures used by test_args*', - '*(test_should_show_fixtures_used_by_test.py:6)*', - 'arg1', - ' arg1 from testmodule', - 'arg2', - ' arg2 from conftest', - ]) + result.stdout.fnmatch_lines( + [ + "*fixtures used by test_args*", + "*(test_should_show_fixtures_used_by_test.py:6)*", + "arg1", + " arg1 from testmodule", + "arg2", + " arg2 from conftest", + ] + ) def test_verbose_include_private_fixtures_and_loc(testdir): - testdir.makeconftest(''' + testdir.makeconftest( + ''' import pytest @pytest.fixture def _arg1(): @@ -113,46 +130,54 @@ def test_verbose_include_private_fixtures_and_loc(testdir): @pytest.fixture def arg2(_arg1): """arg2 from conftest""" - ''') - p = testdir.makepyfile(''' + ''' + ) + p = testdir.makepyfile( + ''' import pytest @pytest.fixture def arg3(): """arg3 from testmodule""" def test_args(arg2, arg3): pass - ''') + ''' + ) result = testdir.runpytest("--fixtures-per-test", "-v", p) assert result.ret == 0 - result.stdout.fnmatch_lines([ - '*fixtures used by test_args*', - '*(test_verbose_include_private_fixtures_and_loc.py:6)*', - '_arg1 -- conftest.py:3', - ' _arg1 from conftest', - 'arg2 -- conftest.py:6', - ' arg2 from conftest', - 'arg3 -- test_verbose_include_private_fixtures_and_loc.py:3', - ' arg3 from testmodule', - ]) + result.stdout.fnmatch_lines( + [ + "*fixtures used by test_args*", + "*(test_verbose_include_private_fixtures_and_loc.py:6)*", + "_arg1 -- conftest.py:3", + " _arg1 from conftest", + "arg2 -- conftest.py:6", + " arg2 from conftest", + "arg3 -- test_verbose_include_private_fixtures_and_loc.py:3", + " arg3 from testmodule", + ] + ) def test_doctest_items(testdir): - testdir.makepyfile(''' + testdir.makepyfile( + ''' def foo(): """ >>> 1 + 1 2 """ - ''') - testdir.maketxtfile(''' + ''' + ) + testdir.maketxtfile( + """ >>> 1 + 1 2 - ''') - result = testdir.runpytest("--fixtures-per-test", "--doctest-modules", - "--doctest-glob=*.txt", "-v") + """ + ) + result = testdir.runpytest( + "--fixtures-per-test", "--doctest-modules", "--doctest-glob=*.txt", "-v" + ) assert result.ret == 0 - result.stdout.fnmatch_lines([ - '*collected 2 items*', - ]) + result.stdout.fnmatch_lines(["*collected 2 items*"]) diff --git a/testing/python/test_deprecations.py b/testing/python/test_deprecations.py index 5001f765f..b0c11f0b0 100644 --- a/testing/python/test_deprecations.py +++ b/testing/python/test_deprecations.py @@ -18,5 +18,5 @@ def test_pycollector_makeitem_is_deprecated(): collector = PyCollectorMock() with pytest.deprecated_call(): - collector.makeitem('foo', 'bar') + collector.makeitem("foo", "bar") assert collector.called diff --git a/testing/test_argcomplete.py b/testing/test_argcomplete.py index 7a5e25d69..b042de5ce 100644 --- a/testing/test_argcomplete.py +++ b/testing/test_argcomplete.py @@ -11,12 +11,13 @@ def equal_with_bash(prefix, ffc, fc, out=None): res_bash = set(fc(prefix)) retval = set(res) == res_bash if out: - out.write('equal_with_bash %s %s\n' % (retval, res)) + out.write("equal_with_bash %s %s\n" % (retval, res)) if not retval: - out.write(' python - bash: %s\n' % (set(res) - res_bash)) - out.write(' bash - python: %s\n' % (res_bash - set(res))) + out.write(" python - bash: %s\n" % (set(res) - res_bash)) + out.write(" bash - python: %s\n" % (res_bash - set(res))) return retval + # copied from argcomplete.completers as import from there # also pulls in argcomplete.__init__ which opens filedescriptor 9 # this gives an IOError at the end of testrun @@ -26,10 +27,9 @@ def _wrapcall(*args, **kargs): try: if sys.version_info > (2, 7): return subprocess.check_output(*args, **kargs).decode().splitlines() - if 'stdout' in kargs: - raise ValueError('stdout argument not allowed, it will be overridden.') - process = subprocess.Popen( - stdout=subprocess.PIPE, *args, **kargs) + if "stdout" in kargs: + raise ValueError("stdout argument not allowed, it will be overridden.") + process = subprocess.Popen(stdout=subprocess.PIPE, *args, **kargs) output, unused_err = process.communicate() retcode = process.poll() if retcode: @@ -43,47 +43,57 @@ def _wrapcall(*args, **kargs): class FilesCompleter(object): - 'File completer class, optionally takes a list of allowed extensions' + "File completer class, optionally takes a list of allowed extensions" def __init__(self, allowednames=(), directories=True): # Fix if someone passes in a string instead of a list if type(allowednames) is str: allowednames = [allowednames] - self.allowednames = [x.lstrip('*').lstrip('.') for x in allowednames] + self.allowednames = [x.lstrip("*").lstrip(".") for x in allowednames] self.directories = directories def __call__(self, prefix, **kwargs): completion = [] if self.allowednames: if self.directories: - files = _wrapcall(['bash', '-c', - "compgen -A directory -- '{p}'".format(p=prefix)]) - completion += [f + '/' for f in files] + files = _wrapcall( + ["bash", "-c", "compgen -A directory -- '{p}'".format(p=prefix)] + ) + completion += [f + "/" for f in files] for x in self.allowednames: - completion += _wrapcall(['bash', '-c', - "compgen -A file -X '!*.{0}' -- '{p}'".format(x, p=prefix)]) + completion += _wrapcall( + [ + "bash", + "-c", + "compgen -A file -X '!*.{0}' -- '{p}'".format(x, p=prefix), + ] + ) else: - completion += _wrapcall(['bash', '-c', - "compgen -A file -- '{p}'".format(p=prefix)]) + completion += _wrapcall( + ["bash", "-c", "compgen -A file -- '{p}'".format(p=prefix)] + ) - anticomp = _wrapcall(['bash', '-c', - "compgen -A directory -- '{p}'".format(p=prefix)]) + anticomp = _wrapcall( + ["bash", "-c", "compgen -A directory -- '{p}'".format(p=prefix)] + ) completion = list(set(completion) - set(anticomp)) if self.directories: - completion += [f + '/' for f in anticomp] + completion += [f + "/" for f in anticomp] return completion class TestArgComplete(object): + @pytest.mark.skipif("sys.platform in ('win32', 'darwin')") def test_compare_with_compgen(self): from _pytest._argcomplete import FastFilesCompleter + ffc = FastFilesCompleter() fc = FilesCompleter() - for x in ['/', '/d', '/data', 'qqq', '']: + for x in ["/", "/d", "/data", "qqq", ""]: assert equal_with_bash(x, ffc, fc, out=sys.stdout) @pytest.mark.skipif("sys.platform in ('win32', 'darwin')") @@ -92,7 +102,8 @@ class TestArgComplete(object): ls /usr/ """ from _pytest._argcomplete import FastFilesCompleter + ffc = FastFilesCompleter() fc = FilesCompleter() - for x in '/usr/'.split(): + for x in "/usr/".split(): assert not equal_with_bash(x, ffc, fc, out=sys.stdout) diff --git a/testing/test_assertion.py b/testing/test_assertion.py index 51229d3e1..6cf5b5313 100644 --- a/testing/test_assertion.py +++ b/testing/test_assertion.py @@ -19,23 +19,23 @@ def mock_config(): verbose = False def getoption(self, name): - if name == 'verbose': + if name == "verbose": return self.verbose - raise KeyError('Not mocked out: %s' % name) + raise KeyError("Not mocked out: %s" % name) return Config() class TestImportHookInstallation(object): - @pytest.mark.parametrize('initial_conftest', [True, False]) - @pytest.mark.parametrize('mode', ['plain', 'rewrite']) + @pytest.mark.parametrize("initial_conftest", [True, False]) + @pytest.mark.parametrize("mode", ["plain", "rewrite"]) def test_conftest_assertion_rewrite(self, testdir, initial_conftest, mode): """Test that conftest files are using assertion rewrite on import. (#1619) """ - testdir.tmpdir.join('foo/tests').ensure(dir=1) - conftest_path = 'conftest.py' if initial_conftest else 'foo/conftest.py' + testdir.tmpdir.join("foo/tests").ensure(dir=1) + conftest_path = "conftest.py" if initial_conftest else "foo/conftest.py" contents = { conftest_path: """ import pytest @@ -45,17 +45,17 @@ class TestImportHookInstallation(object): assert values.pop(0) == value return check """, - 'foo/tests/test_foo.py': """ + "foo/tests/test_foo.py": """ def test(check_first): check_first([10, 30], 30) - """ + """, } testdir.makepyfile(**contents) - result = testdir.runpytest_subprocess('--assert=%s' % mode) - if mode == 'plain': - expected = 'E AssertionError' - elif mode == 'rewrite': - expected = '*assert 10 == 30*' + result = testdir.runpytest_subprocess("--assert=%s" % mode) + if mode == "plain": + expected = "E AssertionError" + elif mode == "rewrite": + expected = "*assert 10 == 30*" else: assert 0 result.stdout.fnmatch_lines([expected]) @@ -65,25 +65,25 @@ class TestImportHookInstallation(object): Assertions in the pytester plugin must also benefit from assertion rewriting (#1920). """ - testdir.makepyfile(""" + testdir.makepyfile( + """ pytest_plugins = ['pytester'] def test_dummy_failure(testdir): # how meta! testdir.makepyfile('def test(): assert 0') r = testdir.inline_run() r.assertoutcome(passed=1) - """) + """ + ) result = testdir.runpytest_subprocess() - result.stdout.fnmatch_lines([ - '*assert 1 == 0*', - ]) + result.stdout.fnmatch_lines(["*assert 1 == 0*"]) - @pytest.mark.parametrize('mode', ['plain', 'rewrite']) + @pytest.mark.parametrize("mode", ["plain", "rewrite"]) def test_pytest_plugins_rewrite(self, testdir, mode): contents = { - 'conftest.py': """ + "conftest.py": """ pytest_plugins = ['ham'] """, - 'ham.py': """ + "ham.py": """ import pytest @pytest.fixture def check_first(): @@ -91,70 +91,72 @@ class TestImportHookInstallation(object): assert values.pop(0) == value return check """, - 'test_foo.py': """ + "test_foo.py": """ def test_foo(check_first): check_first([10, 30], 30) """, } testdir.makepyfile(**contents) - result = testdir.runpytest_subprocess('--assert=%s' % mode) - if mode == 'plain': - expected = 'E AssertionError' - elif mode == 'rewrite': - expected = '*assert 10 == 30*' + result = testdir.runpytest_subprocess("--assert=%s" % mode) + if mode == "plain": + expected = "E AssertionError" + elif mode == "rewrite": + expected = "*assert 10 == 30*" else: assert 0 result.stdout.fnmatch_lines([expected]) - @pytest.mark.parametrize('mode', ['str', 'list']) + @pytest.mark.parametrize("mode", ["str", "list"]) def test_pytest_plugins_rewrite_module_names(self, testdir, mode): """Test that pluginmanager correct marks pytest_plugins variables for assertion rewriting if they are defined as plain strings or list of strings (#1888). """ - plugins = '"ham"' if mode == 'str' else '["ham"]' + plugins = '"ham"' if mode == "str" else '["ham"]' contents = { - 'conftest.py': """ + "conftest.py": """ pytest_plugins = {plugins} - """.format(plugins=plugins), - 'ham.py': """ + """.format( + plugins=plugins + ), + "ham.py": """ import pytest """, - 'test_foo.py': """ + "test_foo.py": """ def test_foo(pytestconfig): assert 'ham' in pytestconfig.pluginmanager.rewrite_hook._must_rewrite """, } testdir.makepyfile(**contents) - result = testdir.runpytest_subprocess('--assert=rewrite') + result = testdir.runpytest_subprocess("--assert=rewrite") assert result.ret == 0 def test_pytest_plugins_rewrite_module_names_correctly(self, testdir): """Test that we match files correctly when they are marked for rewriting (#2939).""" contents = { - 'conftest.py': """ + "conftest.py": """ pytest_plugins = "ham" """, - 'ham.py': "", - 'hamster.py': "", - 'test_foo.py': """ + "ham.py": "", + "hamster.py": "", + "test_foo.py": """ def test_foo(pytestconfig): assert pytestconfig.pluginmanager.rewrite_hook.find_module('ham') is not None assert pytestconfig.pluginmanager.rewrite_hook.find_module('hamster') is None """, } testdir.makepyfile(**contents) - result = testdir.runpytest_subprocess('--assert=rewrite') + result = testdir.runpytest_subprocess("--assert=rewrite") assert result.ret == 0 - @pytest.mark.parametrize('mode', ['plain', 'rewrite']) - @pytest.mark.parametrize('plugin_state', ['development', 'installed']) + @pytest.mark.parametrize("mode", ["plain", "rewrite"]) + @pytest.mark.parametrize("plugin_state", ["development", "installed"]) def test_installed_plugin_rewrite(self, testdir, mode, plugin_state): # Make sure the hook is installed early enough so that plugins # installed via setuptools are rewritten. - testdir.tmpdir.join('hampkg').ensure(dir=1) + testdir.tmpdir.join("hampkg").ensure(dir=1) contents = { - 'hampkg/__init__.py': """ + "hampkg/__init__.py": """ import pytest @pytest.fixture @@ -163,7 +165,7 @@ class TestImportHookInstallation(object): assert values.pop(0) == value return check """, - 'spamplugin.py': """ + "spamplugin.py": """ import pytest from hampkg import check_first2 @@ -173,7 +175,7 @@ class TestImportHookInstallation(object): assert values.pop(0) == value return check """, - 'mainwrapper.py': """ + "mainwrapper.py": """ import pytest, pkg_resources plugin_state = "{plugin_state}" @@ -209,8 +211,10 @@ class TestImportHookInstallation(object): pkg_resources.iter_entry_points = iter_entry_points pytest.main() - """.format(plugin_state=plugin_state), - 'test_foo.py': """ + """.format( + plugin_state=plugin_state + ), + "test_foo.py": """ def test(check_first): check_first([10, 30], 30) @@ -219,42 +223,44 @@ class TestImportHookInstallation(object): """, } testdir.makepyfile(**contents) - result = testdir.run(sys.executable, 'mainwrapper.py', '-s', '--assert=%s' % mode) - if mode == 'plain': - expected = 'E AssertionError' - elif mode == 'rewrite': - expected = '*assert 10 == 30*' + result = testdir.run( + sys.executable, "mainwrapper.py", "-s", "--assert=%s" % mode + ) + if mode == "plain": + expected = "E AssertionError" + elif mode == "rewrite": + expected = "*assert 10 == 30*" else: assert 0 result.stdout.fnmatch_lines([expected]) def test_rewrite_ast(self, testdir): - testdir.tmpdir.join('pkg').ensure(dir=1) + testdir.tmpdir.join("pkg").ensure(dir=1) contents = { - 'pkg/__init__.py': """ + "pkg/__init__.py": """ import pytest pytest.register_assert_rewrite('pkg.helper') """, - 'pkg/helper.py': """ + "pkg/helper.py": """ def tool(): a, b = 2, 3 assert a == b """, - 'pkg/plugin.py': """ + "pkg/plugin.py": """ import pytest, pkg.helper @pytest.fixture def tool(): return pkg.helper.tool """, - 'pkg/other.py': """ + "pkg/other.py": """ values = [3, 2] def tool(): assert values.pop() == 3 """, - 'conftest.py': """ + "conftest.py": """ pytest_plugins = ['pkg.plugin'] """, - 'test_pkg.py': """ + "test_pkg.py": """ import pkg.other def test_tool(tool): tool() @@ -263,23 +269,29 @@ class TestImportHookInstallation(object): """, } testdir.makepyfile(**contents) - result = testdir.runpytest_subprocess('--assert=rewrite') - result.stdout.fnmatch_lines(['>*assert a == b*', - 'E*assert 2 == 3*', - '>*assert values.pop() == 3*', - 'E*AssertionError']) + result = testdir.runpytest_subprocess("--assert=rewrite") + result.stdout.fnmatch_lines( + [ + ">*assert a == b*", + "E*assert 2 == 3*", + ">*assert values.pop() == 3*", + "E*AssertionError", + ] + ) def test_register_assert_rewrite_checks_types(self): with pytest.raises(TypeError): - pytest.register_assert_rewrite(['pytest_tests_internal_non_existing']) - pytest.register_assert_rewrite('pytest_tests_internal_non_existing', - 'pytest_tests_internal_non_existing2') + pytest.register_assert_rewrite(["pytest_tests_internal_non_existing"]) + pytest.register_assert_rewrite( + "pytest_tests_internal_non_existing", "pytest_tests_internal_non_existing2" + ) class TestBinReprIntegration(object): def test_pytest_assertrepr_compare_called(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest values = [] def pytest_assertrepr_compare(op, left, right): @@ -288,91 +300,107 @@ class TestBinReprIntegration(object): @pytest.fixture def list(request): return values - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ def test_hello(): assert 0 == 1 def test_check(list): assert list == [("==", 0, 1)] - """) + """ + ) result = testdir.runpytest("-v") - result.stdout.fnmatch_lines([ - "*test_hello*FAIL*", - "*test_check*PASS*", - ]) + result.stdout.fnmatch_lines(["*test_hello*FAIL*", "*test_check*PASS*"]) def callequal(left, right, verbose=False): config = mock_config() config.verbose = verbose - return plugin.pytest_assertrepr_compare(config, '==', left, right) + return plugin.pytest_assertrepr_compare(config, "==", left, right) class TestAssert_reprcompare(object): + def test_different_types(self): - assert callequal([0, 1], 'foo') is None + assert callequal([0, 1], "foo") is None def test_summary(self): summary = callequal([0, 1], [0, 2])[0] assert len(summary) < 65 def test_text_diff(self): - diff = callequal('spam', 'eggs')[1:] - assert '- spam' in diff - assert '+ eggs' in diff + diff = callequal("spam", "eggs")[1:] + assert "- spam" in diff + assert "+ eggs" in diff def test_text_skipping(self): - lines = callequal('a' * 50 + 'spam', 'a' * 50 + 'eggs') - assert 'Skipping' in lines[1] + lines = callequal("a" * 50 + "spam", "a" * 50 + "eggs") + assert "Skipping" in lines[1] for line in lines: - assert 'a' * 50 not in line + assert "a" * 50 not in line def test_text_skipping_verbose(self): - lines = callequal('a' * 50 + 'spam', 'a' * 50 + 'eggs', verbose=True) - assert '- ' + 'a' * 50 + 'spam' in lines - assert '+ ' + 'a' * 50 + 'eggs' in lines + lines = callequal("a" * 50 + "spam", "a" * 50 + "eggs", verbose=True) + assert "- " + "a" * 50 + "spam" in lines + assert "+ " + "a" * 50 + "eggs" in lines def test_multiline_text_diff(self): - left = 'foo\nspam\nbar' - right = 'foo\neggs\nbar' + left = "foo\nspam\nbar" + right = "foo\neggs\nbar" diff = callequal(left, right) - assert '- spam' in diff - assert '+ eggs' in diff + assert "- spam" in diff + assert "+ eggs" in diff def test_list(self): expl = callequal([0, 1], [0, 2]) assert len(expl) > 1 @pytest.mark.parametrize( - ['left', 'right', 'expected'], [ - ([0, 1], [0, 2], """ + ["left", "right", "expected"], + [ + ( + [0, 1], + [0, 2], + """ Full diff: - [0, 1] ? ^ + [0, 2] ? ^ - """), - ({0: 1}, {0: 2}, """ + """, + ), + ( + {0: 1}, + {0: 2}, + """ Full diff: - {0: 1} ? ^ + {0: 2} ? ^ - """), - (set([0, 1]), set([0, 2]), """ + """, + ), + ( + {0, 1}, + {0, 2}, + """ Full diff: - set([0, 1]) ? ^ + set([0, 2]) ? ^ - """ if not PY3 else """ + """ + if not PY3 + else """ Full diff: - {0, 1} ? ^ + {0, 2} ? ^ - """) - ] + """, + ), + ], ) def test_iterable_full_diff(self, left, right, expected): """Test the full diff assertion failure explanation. @@ -381,8 +409,8 @@ class TestAssert_reprcompare(object): when verbose is True, then ndiff of the pprint is returned. """ expl = callequal(left, right, verbose=False) - assert expl[-1] == 'Use -v to get the full diff' - expl = '\n'.join(callequal(left, right, verbose=True)) + assert expl[-1] == "Use -v to get the full diff" + expl = "\n".join(callequal(left, right, verbose=True)) assert expl.endswith(textwrap.dedent(expected).strip()) def test_list_different_lengths(self): @@ -392,48 +420,46 @@ class TestAssert_reprcompare(object): assert len(expl) > 1 def test_dict(self): - expl = callequal({'a': 0}, {'a': 1}) + expl = callequal({"a": 0}, {"a": 1}) assert len(expl) > 1 def test_dict_omitting(self): - lines = callequal({'a': 0, 'b': 1}, {'a': 1, 'b': 1}) - assert lines[1].startswith('Omitting 1 identical item') - assert 'Common items' not in lines + lines = callequal({"a": 0, "b": 1}, {"a": 1, "b": 1}) + assert lines[1].startswith("Omitting 1 identical item") + assert "Common items" not in lines for line in lines[1:]: - assert 'b' not in line + assert "b" not in line def test_dict_omitting_with_verbosity_1(self): """ Ensure differing items are visible for verbosity=1 (#1512) """ - lines = callequal({'a': 0, 'b': 1}, {'a': 1, 'b': 1}, verbose=1) - assert lines[1].startswith('Omitting 1 identical item') - assert lines[2].startswith('Differing items') + lines = callequal({"a": 0, "b": 1}, {"a": 1, "b": 1}, verbose=1) + assert lines[1].startswith("Omitting 1 identical item") + assert lines[2].startswith("Differing items") assert lines[3] == "{'a': 0} != {'a': 1}" - assert 'Common items' not in lines + assert "Common items" not in lines def test_dict_omitting_with_verbosity_2(self): - lines = callequal({'a': 0, 'b': 1}, {'a': 1, 'b': 1}, verbose=2) - assert lines[1].startswith('Common items:') - assert 'Omitting' not in lines[1] + lines = callequal({"a": 0, "b": 1}, {"a": 1, "b": 1}, verbose=2) + assert lines[1].startswith("Common items:") + assert "Omitting" not in lines[1] assert lines[2] == "{'b': 1}" def test_set(self): - expl = callequal(set([0, 1]), set([0, 2])) + expl = callequal({0, 1}, {0, 2}) assert len(expl) > 1 def test_frozenzet(self): - expl = callequal(frozenset([0, 1]), set([0, 2])) + expl = callequal(frozenset([0, 1]), {0, 2}) assert len(expl) > 1 def test_Sequence(self): - col = py.builtin._tryimport( - "collections.abc", - "collections", - "sys") + col = py.builtin._tryimport("collections.abc", "collections", "sys") if not hasattr(col, "MutableSequence"): pytest.skip("cannot import MutableSequence") MutableSequence = col.MutableSequence class TestSequence(MutableSequence): # works with a Sequence subclass + def __init__(self, iterable): self.elements = list(iterable) @@ -462,62 +488,71 @@ class TestAssert_reprcompare(object): assert len(expl) > 1 def test_list_bad_repr(self): + class A(object): + def __repr__(self): raise ValueError(42) + expl = callequal([], [A()]) - assert 'ValueError' in "".join(expl) - expl = callequal({}, {'1': A()}) - assert 'faulty' in "".join(expl) + assert "ValueError" in "".join(expl) + expl = callequal({}, {"1": A()}) + assert "faulty" in "".join(expl) def test_one_repr_empty(self): """ the faulty empty string repr did trigger an unbound local error in _diff_text """ + class A(str): + def __repr__(self): - return '' - expl = callequal(A(), '') + return "" + + expl = callequal(A(), "") assert not expl def test_repr_no_exc(self): - expl = ' '.join(callequal('foo', 'bar')) - assert 'raised in repr()' not in expl + expl = " ".join(callequal("foo", "bar")) + assert "raised in repr()" not in expl def test_unicode(self): - left = py.builtin._totext('£€', 'utf-8') - right = py.builtin._totext('£', 'utf-8') + left = py.builtin._totext("£€", "utf-8") + right = py.builtin._totext("£", "utf-8") expl = callequal(left, right) - assert expl[0] == py.builtin._totext("'£€' == '£'", 'utf-8') - assert expl[1] == py.builtin._totext('- £€', 'utf-8') - assert expl[2] == py.builtin._totext('+ £', 'utf-8') + assert expl[0] == py.builtin._totext("'£€' == '£'", "utf-8") + assert expl[1] == py.builtin._totext("- £€", "utf-8") + assert expl[2] == py.builtin._totext("+ £", "utf-8") def test_nonascii_text(self): """ :issue: 877 non ascii python2 str caused a UnicodeDecodeError """ + class A(str): + def __repr__(self): - return '\xff' - expl = callequal(A(), '1') + return "\xff" + + expl = callequal(A(), "1") assert expl def test_format_nonascii_explanation(self): - assert util.format_explanation('λ') + assert util.format_explanation("λ") def test_mojibake(self): # issue 429 - left = 'e' - right = '\xc3\xa9' + left = "e" + right = "\xc3\xa9" if not isinstance(left, py.builtin.bytes): - left = py.builtin.bytes(left, 'utf-8') - right = py.builtin.bytes(right, 'utf-8') + left = py.builtin.bytes(left, "utf-8") + right = py.builtin.bytes(right, "utf-8") expl = callequal(left, right) for line in expl: assert isinstance(line, py.builtin.text) - msg = py.builtin._totext('\n').join(expl) + msg = py.builtin._totext("\n").join(expl) assert msg @@ -525,91 +560,87 @@ class TestFormatExplanation(object): def test_special_chars_full(self, testdir): # Issue 453, for the bug this would raise IndexError - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_foo(): assert '\\n}' == '' - """) + """ + ) result = testdir.runpytest() assert result.ret == 1 - result.stdout.fnmatch_lines([ - "*AssertionError*", - ]) + result.stdout.fnmatch_lines(["*AssertionError*"]) def test_fmt_simple(self): - expl = 'assert foo' - assert util.format_explanation(expl) == 'assert foo' + expl = "assert foo" + assert util.format_explanation(expl) == "assert foo" def test_fmt_where(self): - expl = '\n'.join(['assert 1', - '{1 = foo', - '} == 2']) - res = '\n'.join(['assert 1 == 2', - ' + where 1 = foo']) + expl = "\n".join(["assert 1", "{1 = foo", "} == 2"]) + res = "\n".join(["assert 1 == 2", " + where 1 = foo"]) assert util.format_explanation(expl) == res def test_fmt_and(self): - expl = '\n'.join(['assert 1', - '{1 = foo', - '} == 2', - '{2 = bar', - '}']) - res = '\n'.join(['assert 1 == 2', - ' + where 1 = foo', - ' + and 2 = bar']) + expl = "\n".join(["assert 1", "{1 = foo", "} == 2", "{2 = bar", "}"]) + res = "\n".join(["assert 1 == 2", " + where 1 = foo", " + and 2 = bar"]) assert util.format_explanation(expl) == res def test_fmt_where_nested(self): - expl = '\n'.join(['assert 1', - '{1 = foo', - '{foo = bar', - '}', - '} == 2']) - res = '\n'.join(['assert 1 == 2', - ' + where 1 = foo', - ' + where foo = bar']) + expl = "\n".join(["assert 1", "{1 = foo", "{foo = bar", "}", "} == 2"]) + res = "\n".join(["assert 1 == 2", " + where 1 = foo", " + where foo = bar"]) assert util.format_explanation(expl) == res def test_fmt_newline(self): - expl = '\n'.join(['assert "foo" == "bar"', - '~- foo', - '~+ bar']) - res = '\n'.join(['assert "foo" == "bar"', - ' - foo', - ' + bar']) + expl = "\n".join(['assert "foo" == "bar"', "~- foo", "~+ bar"]) + res = "\n".join(['assert "foo" == "bar"', " - foo", " + bar"]) assert util.format_explanation(expl) == res def test_fmt_newline_escaped(self): - expl = '\n'.join(['assert foo == bar', - 'baz']) - res = 'assert foo == bar\\nbaz' + expl = "\n".join(["assert foo == bar", "baz"]) + res = "assert foo == bar\\nbaz" assert util.format_explanation(expl) == res def test_fmt_newline_before_where(self): - expl = '\n'.join(['the assertion message here', - '>assert 1', - '{1 = foo', - '} == 2', - '{2 = bar', - '}']) - res = '\n'.join(['the assertion message here', - 'assert 1 == 2', - ' + where 1 = foo', - ' + and 2 = bar']) + expl = "\n".join( + [ + "the assertion message here", + ">assert 1", + "{1 = foo", + "} == 2", + "{2 = bar", + "}", + ] + ) + res = "\n".join( + [ + "the assertion message here", + "assert 1 == 2", + " + where 1 = foo", + " + and 2 = bar", + ] + ) assert util.format_explanation(expl) == res def test_fmt_multi_newline_before_where(self): - expl = '\n'.join(['the assertion', - '~message here', - '>assert 1', - '{1 = foo', - '} == 2', - '{2 = bar', - '}']) - res = '\n'.join(['the assertion', - ' message here', - 'assert 1 == 2', - ' + where 1 = foo', - ' + and 2 = bar']) + expl = "\n".join( + [ + "the assertion", + "~message here", + ">assert 1", + "{1 = foo", + "} == 2", + "{2 = bar", + "}", + ] + ) + res = "\n".join( + [ + "the assertion", + " message here", + "assert 1 == 2", + " + where 1 = foo", + " + and 2 = bar", + ] + ) assert util.format_explanation(expl) == res @@ -627,58 +658,58 @@ class TestTruncateExplanation(object): assert result == expl def test_doesnt_truncate_at_when_input_is_5_lines_and_LT_max_chars(self): - expl = ['a' * 100 for x in range(5)] + expl = ["a" * 100 for x in range(5)] result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8 * 80) assert result == expl def test_truncates_at_8_lines_when_given_list_of_empty_strings(self): - expl = ['' for x in range(50)] + expl = ["" for x in range(50)] result = truncate._truncate_explanation(expl, max_lines=8, max_chars=100) assert result != expl assert len(result) == 8 + self.LINES_IN_TRUNCATION_MSG assert "Full output truncated" in result[-1] assert "43 lines hidden" in result[-1] - last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG - 1] + last_line_before_trunc_msg = result[-self.LINES_IN_TRUNCATION_MSG - 1] assert last_line_before_trunc_msg.endswith("...") def test_truncates_at_8_lines_when_first_8_lines_are_LT_max_chars(self): - expl = ['a' for x in range(100)] + expl = ["a" for x in range(100)] result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8 * 80) assert result != expl assert len(result) == 8 + self.LINES_IN_TRUNCATION_MSG assert "Full output truncated" in result[-1] assert "93 lines hidden" in result[-1] - last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG - 1] + last_line_before_trunc_msg = result[-self.LINES_IN_TRUNCATION_MSG - 1] assert last_line_before_trunc_msg.endswith("...") def test_truncates_at_8_lines_when_first_8_lines_are_EQ_max_chars(self): - expl = ['a' * 80 for x in range(16)] + expl = ["a" * 80 for x in range(16)] result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8 * 80) assert result != expl assert len(result) == 8 + self.LINES_IN_TRUNCATION_MSG assert "Full output truncated" in result[-1] assert "9 lines hidden" in result[-1] - last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG - 1] + last_line_before_trunc_msg = result[-self.LINES_IN_TRUNCATION_MSG - 1] assert last_line_before_trunc_msg.endswith("...") def test_truncates_at_4_lines_when_first_4_lines_are_GT_max_chars(self): - expl = ['a' * 250 for x in range(10)] + expl = ["a" * 250 for x in range(10)] result = truncate._truncate_explanation(expl, max_lines=8, max_chars=999) assert result != expl assert len(result) == 4 + self.LINES_IN_TRUNCATION_MSG assert "Full output truncated" in result[-1] assert "7 lines hidden" in result[-1] - last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG - 1] + last_line_before_trunc_msg = result[-self.LINES_IN_TRUNCATION_MSG - 1] assert last_line_before_trunc_msg.endswith("...") def test_truncates_at_1_line_when_first_line_is_GT_max_chars(self): - expl = ['a' * 250 for x in range(1000)] + expl = ["a" * 250 for x in range(1000)] result = truncate._truncate_explanation(expl, max_lines=8, max_chars=100) assert result != expl assert len(result) == 1 + self.LINES_IN_TRUNCATION_MSG assert "Full output truncated" in result[-1] assert "1000 lines hidden" in result[-1] - last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG - 1] + last_line_before_trunc_msg = result[-self.LINES_IN_TRUNCATION_MSG - 1] assert last_line_before_trunc_msg.endswith("...") def test_full_output_truncated(self, monkeypatch, testdir): @@ -687,140 +718,160 @@ class TestTruncateExplanation(object): line_count = 7 line_len = 100 expected_truncated_lines = 2 - testdir.makepyfile(r""" + testdir.makepyfile( + r""" def test_many_lines(): a = list([str(i)[0] * %d for i in range(%d)]) b = a[::2] a = '\n'.join(map(str, a)) b = '\n'.join(map(str, b)) assert a == b - """ % (line_len, line_count)) - monkeypatch.delenv('CI', raising=False) + """ + % (line_len, line_count) + ) + monkeypatch.delenv("CI", raising=False) result = testdir.runpytest() # without -vv, truncate the message showing a few diff lines only - result.stdout.fnmatch_lines([ - "*- 1*", - "*- 3*", - "*- 5*", - "*truncated (%d lines hidden)*use*-vv*" % expected_truncated_lines, - ]) + result.stdout.fnmatch_lines( + [ + "*- 1*", + "*- 3*", + "*- 5*", + "*truncated (%d lines hidden)*use*-vv*" % expected_truncated_lines, + ] + ) - result = testdir.runpytest('-vv') - result.stdout.fnmatch_lines([ - "* 6*", - ]) + result = testdir.runpytest("-vv") + result.stdout.fnmatch_lines(["* 6*"]) - monkeypatch.setenv('CI', '1') + monkeypatch.setenv("CI", "1") result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "* 6*", - ]) + result.stdout.fnmatch_lines(["* 6*"]) def test_python25_compile_issue257(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_rewritten(): assert 1 == 2 # some comment - """) + """ + ) result = testdir.runpytest() assert result.ret == 1 - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *E*assert 1 == 2* *1 failed* - """) + """ + ) def test_rewritten(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_rewritten(): assert "@py_builtins" in globals() - """) + """ + ) assert testdir.runpytest().ret == 0 def test_reprcompare_notin(mock_config): detail = plugin.pytest_assertrepr_compare( - mock_config, 'not in', 'foo', 'aaafoobbb')[1:] - assert detail == ["'foo' is contained here:", ' aaafoobbb', '? +++'] + mock_config, "not in", "foo", "aaafoobbb" + )[ + 1: + ] + assert detail == ["'foo' is contained here:", " aaafoobbb", "? +++"] def test_reprcompare_whitespaces(mock_config): - detail = plugin.pytest_assertrepr_compare( - mock_config, '==', '\r\n', '\n') - assert detail == [ - r"'\r\n' == '\n'", - r"Strings contain only whitespace, escaping them using repr()", - r"- '\r\n'", - r"? --", - r"+ '\n'", - ] + detail = plugin.pytest_assertrepr_compare(mock_config, "==", "\r\n", "\n") + assert ( + detail + == [ + r"'\r\n' == '\n'", + r"Strings contain only whitespace, escaping them using repr()", + r"- '\r\n'", + r"? --", + r"+ '\n'", + ] + ) def test_pytest_assertrepr_compare_integration(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_hello(): x = set(range(100)) y = x.copy() y.remove(50) assert x == y - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*def test_hello():*", - "*assert x == y*", - "*E*Extra items*left*", - "*E*50*", - ]) + result.stdout.fnmatch_lines( + ["*def test_hello():*", "*assert x == y*", "*E*Extra items*left*", "*E*50*"] + ) def test_sequence_comparison_uses_repr(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_hello(): x = set("hello x") y = set("hello y") assert x == y - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*def test_hello():*", - "*assert x == y*", - "*E*Extra items*left*", - "*E*'x'*", - "*E*Extra items*right*", - "*E*'y'*", - ]) + result.stdout.fnmatch_lines( + [ + "*def test_hello():*", + "*assert x == y*", + "*E*Extra items*left*", + "*E*'x'*", + "*E*Extra items*right*", + "*E*'y'*", + ] + ) def test_assertrepr_loaded_per_dir(testdir): - testdir.makepyfile(test_base=['def test_base(): assert 1 == 2']) - a = testdir.mkdir('a') - a_test = a.join('test_a.py') - a_test.write('def test_a(): assert 1 == 2') - a_conftest = a.join('conftest.py') + testdir.makepyfile(test_base=["def test_base(): assert 1 == 2"]) + a = testdir.mkdir("a") + a_test = a.join("test_a.py") + a_test.write("def test_a(): assert 1 == 2") + a_conftest = a.join("conftest.py") a_conftest.write('def pytest_assertrepr_compare(): return ["summary a"]') - b = testdir.mkdir('b') - b_test = b.join('test_b.py') - b_test.write('def test_b(): assert 1 == 2') - b_conftest = b.join('conftest.py') + b = testdir.mkdir("b") + b_test = b.join("test_b.py") + b_test.write("def test_b(): assert 1 == 2") + b_conftest = b.join("conftest.py") b_conftest.write('def pytest_assertrepr_compare(): return ["summary b"]') result = testdir.runpytest() - result.stdout.fnmatch_lines([ - '*def test_base():*', - '*E*assert 1 == 2*', - '*def test_a():*', - '*E*assert summary a*', - '*def test_b():*', - '*E*assert summary b*']) + result.stdout.fnmatch_lines( + [ + "*def test_base():*", + "*E*assert 1 == 2*", + "*def test_a():*", + "*E*assert summary a*", + "*def test_b():*", + "*E*assert summary b*", + ] + ) def test_assertion_options(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_hello(): x = 3 assert x == 4 - """) + """ + ) result = testdir.runpytest() assert "3 == 4" in result.stdout.str() result = testdir.runpytest_subprocess("--assert=plain") @@ -828,72 +879,82 @@ def test_assertion_options(testdir): def test_triple_quoted_string_issue113(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_hello(): assert "" == ''' - '''""") + '''""" + ) result = testdir.runpytest("--fulltrace") - result.stdout.fnmatch_lines([ - "*1 failed*", - ]) - assert 'SyntaxError' not in result.stdout.str() + result.stdout.fnmatch_lines(["*1 failed*"]) + assert "SyntaxError" not in result.stdout.str() def test_traceback_failure(testdir): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ def g(): return 2 def f(x): assert x == g() def test_onefails(): f(3) - """) + """ + ) result = testdir.runpytest(p1, "--tb=long") - result.stdout.fnmatch_lines([ - "*test_traceback_failure.py F*", - "====* FAILURES *====", - "____*____", - "", - " def test_onefails():", - "> f(3)", - "", - "*test_*.py:6: ", - "_ _ _ *", - # "", - " def f(x):", - "> assert x == g()", - "E assert 3 == 2", - "E + where 2 = g()", - "", - "*test_traceback_failure.py:4: AssertionError" - ]) + result.stdout.fnmatch_lines( + [ + "*test_traceback_failure.py F*", + "====* FAILURES *====", + "____*____", + "", + " def test_onefails():", + "> f(3)", + "", + "*test_*.py:6: ", + "_ _ _ *", + # "", + " def f(x):", + "> assert x == g()", + "E assert 3 == 2", + "E + where 2 = g()", + "", + "*test_traceback_failure.py:4: AssertionError", + ] + ) result = testdir.runpytest(p1) # "auto" - result.stdout.fnmatch_lines([ - "*test_traceback_failure.py F*", - "====* FAILURES *====", - "____*____", - "", - " def test_onefails():", - "> f(3)", - "", - "*test_*.py:6: ", - "", - " def f(x):", - "> assert x == g()", - "E assert 3 == 2", - "E + where 2 = g()", - "", - "*test_traceback_failure.py:4: AssertionError" - ]) + result.stdout.fnmatch_lines( + [ + "*test_traceback_failure.py F*", + "====* FAILURES *====", + "____*____", + "", + " def test_onefails():", + "> f(3)", + "", + "*test_*.py:6: ", + "", + " def f(x):", + "> assert x == g()", + "E assert 3 == 2", + "E + where 2 = g()", + "", + "*test_traceback_failure.py:4: AssertionError", + ] + ) -@pytest.mark.skipif(sys.version_info[:2] <= (3, 3), reason='Python 3.4+ shows chained exceptions on multiprocess') +@pytest.mark.skipif( + sys.version_info[:2] <= (3, 3), + reason="Python 3.4+ shows chained exceptions on multiprocess", +) def test_exception_handling_no_traceback(testdir): """ Handle chain exceptions in tasks submitted by the multiprocess module (#1984). """ - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ from multiprocessing import Pool def process_task(n): @@ -906,64 +967,76 @@ def test_exception_handling_no_traceback(testdir): def test_multitask_job(): multitask_job() - """) + """ + ) result = testdir.runpytest(p1, "--tb=long") - result.stdout.fnmatch_lines([ - "====* FAILURES *====", - "*multiprocessing.pool.RemoteTraceback:*", - "Traceback (most recent call last):", - "*assert n == 10", - "The above exception was the direct cause of the following exception:", - "> * multitask_job()", - ]) + result.stdout.fnmatch_lines( + [ + "====* FAILURES *====", + "*multiprocessing.pool.RemoteTraceback:*", + "Traceback (most recent call last):", + "*assert n == 10", + "The above exception was the direct cause of the following exception:", + "> * multitask_job()", + ] + ) -@pytest.mark.skipif("'__pypy__' in sys.builtin_module_names or sys.platform.startswith('java')") +@pytest.mark.skipif( + "'__pypy__' in sys.builtin_module_names or sys.platform.startswith('java')" +) def test_warn_missing(testdir): testdir.makepyfile("") result = testdir.run(sys.executable, "-OO", "-m", "pytest", "-h") - result.stderr.fnmatch_lines([ - "*WARNING*assert statements are not executed*", - ]) + result.stderr.fnmatch_lines(["*WARNING*assert statements are not executed*"]) result = testdir.run(sys.executable, "-OO", "-m", "pytest") - result.stderr.fnmatch_lines([ - "*WARNING*assert statements are not executed*", - ]) + result.stderr.fnmatch_lines(["*WARNING*assert statements are not executed*"]) def test_recursion_source_decode(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_something(): pass - """) - testdir.makeini(""" + """ + ) + testdir.makeini( + """ [pytest] python_files = *.py - """) + """ + ) result = testdir.runpytest("--collect-only") - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ - """) + """ + ) def test_AssertionError_message(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_hello(): x,y = 1,2 assert 0, (x,y) - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *def test_hello* *assert 0, (x,y)* *AssertionError: (1, 2)* - """) + """ + ) -@pytest.mark.skipif(PY3, reason='This bug does not exist on PY3') +@pytest.mark.skipif(PY3, reason="This bug does not exist on PY3") def test_set_with_unsortable_elements(): # issue #718 class UnsortableKey(object): + def __init__(self, name): self.name = name @@ -971,7 +1044,7 @@ def test_set_with_unsortable_elements(): raise RuntimeError() def __repr__(self): - return 'repr({0})'.format(self.name) + return "repr({})".format(self.name) def __eq__(self, other): return self.name == other.name @@ -979,12 +1052,13 @@ def test_set_with_unsortable_elements(): def __hash__(self): return hash(self.name) - left_set = set(UnsortableKey(str(i)) for i in range(1, 3)) - right_set = set(UnsortableKey(str(i)) for i in range(2, 4)) + left_set = {UnsortableKey(str(i)) for i in range(1, 3)} + right_set = {UnsortableKey(str(i)) for i in range(2, 4)} expl = callequal(left_set, right_set, verbose=True) # skip first line because it contains the "construction" of the set, which does not have a guaranteed order expl = expl[1:] - dedent = textwrap.dedent(""" + dedent = textwrap.dedent( + """ Extra items in the left set: repr(1) Extra items in the right set: @@ -993,86 +1067,106 @@ def test_set_with_unsortable_elements(): - repr(1) repr(2) + repr(3) - """).strip() - assert '\n'.join(expl) == dedent + """ + ).strip() + assert "\n".join(expl) == dedent def test_diff_newline_at_end(monkeypatch, testdir): - testdir.makepyfile(r""" + testdir.makepyfile( + r""" def test_diff(): assert 'asdf' == 'asdf\n' - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(r""" + result.stdout.fnmatch_lines( + r""" *assert 'asdf' == 'asdf\n' * - asdf * + asdf * ? + - """) + """ + ) def test_assert_tuple_warning(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_tuple(): assert(False, 'you shall not pass') - """) - result = testdir.runpytest('-rw') - result.stdout.fnmatch_lines([ - '*test_assert_tuple_warning.py:2', - '*assertion is always true*', - ]) + """ + ) + result = testdir.runpytest("-rw") + result.stdout.fnmatch_lines( + ["*test_assert_tuple_warning.py:2", "*assertion is always true*"] + ) def test_assert_indirect_tuple_no_warning(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_tuple(): tpl = ('foo', 'bar') assert tpl - """) - result = testdir.runpytest('-rw') - output = '\n'.join(result.stdout.lines) - assert 'WR1' not in output + """ + ) + result = testdir.runpytest("-rw") + output = "\n".join(result.stdout.lines) + assert "WR1" not in output def test_assert_with_unicode(monkeypatch, testdir): - testdir.makepyfile(u""" + testdir.makepyfile( + u""" # -*- coding: utf-8 -*- def test_unicode(): assert u'유니코드' == u'Unicode' - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(['*AssertionError*']) + result.stdout.fnmatch_lines(["*AssertionError*"]) def test_raise_unprintable_assertion_error(testdir): - testdir.makepyfile(r""" + testdir.makepyfile( + r""" def test_raise_assertion_error(): raise AssertionError('\xff') - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([r"> raise AssertionError('\xff')", 'E AssertionError: *']) + result.stdout.fnmatch_lines( + [r"> raise AssertionError('\xff')", "E AssertionError: *"] + ) def test_raise_assertion_error_raisin_repr(testdir): - testdir.makepyfile(u""" + testdir.makepyfile( + u""" class RaisingRepr(object): def __repr__(self): raise Exception() def test_raising_repr(): raise AssertionError(RaisingRepr()) - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(['E AssertionError: ']) + result.stdout.fnmatch_lines( + ["E AssertionError: "] + ) def test_issue_1944(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def f(): return assert f() == 10 - """) + """ + ) result = testdir.runpytest() result.stdout.fnmatch_lines(["*1 error*"]) assert "AttributeError: 'Module' object has no attribute '_obj'" not in result.stdout.str() diff --git a/testing/test_assertrewrite.py b/testing/test_assertrewrite.py index 4f7c95600..aa752dabe 100644 --- a/testing/test_assertrewrite.py +++ b/testing/test_assertrewrite.py @@ -12,7 +12,11 @@ import pytest import _pytest._code from _pytest.assertion import util -from _pytest.assertion.rewrite import rewrite_asserts, PYTEST_TAG, AssertionRewritingHook +from _pytest.assertion.rewrite import ( + rewrite_asserts, + PYTEST_TAG, + AssertionRewritingHook, +) from _pytest.main import EXIT_NOTESTSCOLLECTED ast = pytest.importorskip("ast") @@ -39,7 +43,7 @@ def rewrite(src): def getmsg(f, extra_ns=None, must_pass=False): """Rewrite the assertions in f, run it, and get the failure message.""" - src = '\n'.join(_pytest._code.Code(f).source().lines) + src = "\n".join(_pytest._code.Code(f).source().lines) mod = rewrite(src) code = compile(mod, "", "exec") ns = {} @@ -140,8 +144,10 @@ class TestAssertionRewrite(object): assert "warnings" not in "".join(result.outlines) def test_name(self): + def f(): assert False + assert getmsg(f) == "assert False" def f(): @@ -169,72 +175,77 @@ class TestAssertionRewrite(object): assert getmsg(f, {"cls": X}) == "assert cls == 42" def test_assert_already_has_message(self): + def f(): assert False, "something bad!" + assert getmsg(f) == "AssertionError: something bad!\nassert False" def test_assertion_message(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_foo(): assert 1 == 2, "The failure message" - """) + """ + ) result = testdir.runpytest() assert result.ret == 1 - result.stdout.fnmatch_lines([ - "*AssertionError*The failure message*", - "*assert 1 == 2*", - ]) + result.stdout.fnmatch_lines( + ["*AssertionError*The failure message*", "*assert 1 == 2*"] + ) def test_assertion_message_multiline(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_foo(): assert 1 == 2, "A multiline\\nfailure message" - """) + """ + ) result = testdir.runpytest() assert result.ret == 1 - result.stdout.fnmatch_lines([ - "*AssertionError*A multiline*", - "*failure message*", - "*assert 1 == 2*", - ]) + result.stdout.fnmatch_lines( + ["*AssertionError*A multiline*", "*failure message*", "*assert 1 == 2*"] + ) def test_assertion_message_tuple(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_foo(): assert 1 == 2, (1, 2) - """) + """ + ) result = testdir.runpytest() assert result.ret == 1 - result.stdout.fnmatch_lines([ - "*AssertionError*%s*" % repr((1, 2)), - "*assert 1 == 2*", - ]) + result.stdout.fnmatch_lines( + ["*AssertionError*%s*" % repr((1, 2)), "*assert 1 == 2*"] + ) def test_assertion_message_expr(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_foo(): assert 1 == 2, 1 + 2 - """) + """ + ) result = testdir.runpytest() assert result.ret == 1 - result.stdout.fnmatch_lines([ - "*AssertionError*3*", - "*assert 1 == 2*", - ]) + result.stdout.fnmatch_lines(["*AssertionError*3*", "*assert 1 == 2*"]) def test_assertion_message_escape(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_foo(): assert 1 == 2, 'To be escaped: %' - """) + """ + ) result = testdir.runpytest() assert result.ret == 1 - result.stdout.fnmatch_lines([ - "*AssertionError: To be escaped: %", - "*assert 1 == 2", - ]) + result.stdout.fnmatch_lines( + ["*AssertionError: To be escaped: %", "*assert 1 == 2"] + ) def test_boolop(self): + def f(): f = g = False assert f and g @@ -273,14 +284,20 @@ class TestAssertionRewrite(object): def f(): assert x() and x() - assert getmsg(f, {"x": x}) == """assert (False) + assert ( + getmsg(f, {"x": x}) + == """assert (False) + where False = x()""" + ) def f(): assert False or x() - assert getmsg(f, {"x": x}) == """assert (False or False) + assert ( + getmsg(f, {"x": x}) + == """assert (False or False) + where False = x()""" + ) def f(): assert 1 in {} and 2 in {} @@ -308,6 +325,7 @@ class TestAssertionRewrite(object): getmsg(f, must_pass=True) def test_short_circuit_evaluation(self): + def f(): assert True or explode # noqa @@ -320,6 +338,7 @@ class TestAssertionRewrite(object): getmsg(f, must_pass=True) def test_unary_op(self): + def f(): x = True assert not x @@ -345,6 +364,7 @@ class TestAssertionRewrite(object): assert getmsg(f) == "assert (+0 + 0)" def test_binary_op(self): + def f(): x = 1 y = -1 @@ -354,9 +374,11 @@ class TestAssertionRewrite(object): def f(): assert not 5 % 4 + assert getmsg(f) == "assert not (5 % 4)" def test_boolop_percent(self): + def f(): assert 3 % 2 and False @@ -364,11 +386,13 @@ class TestAssertionRewrite(object): def f(): assert False or 4 % 2 + assert getmsg(f) == "assert (False or (4 % 2))" @pytest.mark.skipif("sys.version_info < (3,5)") def test_at_operator_issue1290(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ class Matrix(object): def __init__(self, num): self.num = num @@ -376,10 +400,12 @@ class TestAssertionRewrite(object): return self.num * other.num def test_multmat_operator(): - assert Matrix(2) @ Matrix(3) == 6""") + assert Matrix(2) @ Matrix(3) == 6""" + ) testdir.runpytest().assert_outcomes(passed=1) def test_call(self): + def g(a=42, *args, **kwargs): return False @@ -388,48 +414,70 @@ class TestAssertionRewrite(object): def f(): assert g() - assert getmsg(f, ns) == """assert False + assert ( + getmsg(f, ns) + == """assert False + where False = g()""" + ) def f(): assert g(1) - assert getmsg(f, ns) == """assert False + assert ( + getmsg(f, ns) + == """assert False + where False = g(1)""" + ) def f(): assert g(1, 2) - assert getmsg(f, ns) == """assert False + assert ( + getmsg(f, ns) + == """assert False + where False = g(1, 2)""" + ) def f(): assert g(1, g=42) - assert getmsg(f, ns) == """assert False + assert ( + getmsg(f, ns) + == """assert False + where False = g(1, g=42)""" + ) def f(): assert g(1, 3, g=23) - assert getmsg(f, ns) == """assert False + assert ( + getmsg(f, ns) + == """assert False + where False = g(1, 3, g=23)""" + ) def f(): seq = [1, 2, 3] assert g(*seq) - assert getmsg(f, ns) == """assert False + assert ( + getmsg(f, ns) + == """assert False + where False = g(*[1, 2, 3])""" + ) def f(): x = "a" assert g(**{x: 2}) - assert getmsg(f, ns) == """assert False + assert ( + getmsg(f, ns) + == """assert False + where False = g(**{'a': 2})""" + ) def test_attribute(self): + class X(object): g = 3 @@ -438,15 +486,21 @@ class TestAssertionRewrite(object): def f(): assert not x.g # noqa - assert getmsg(f, ns) == """assert not 3 + assert ( + getmsg(f, ns) + == """assert not 3 + where 3 = x.g""" + ) def f(): x.a = False # noqa - assert x.a # noqa + assert x.a # noqa - assert getmsg(f, ns) == """assert False + assert ( + getmsg(f, ns) + == """assert False + where False = x.a""" + ) def test_comparisons(self): @@ -487,10 +541,13 @@ class TestAssertionRewrite(object): values = list(range(10)) assert len(values) == 11 - assert getmsg(f).startswith("""assert 10 == 11 - + where 10 = len([""") + assert getmsg(f).startswith( + """assert 10 == 11 + + where 10 = len([""" + ) def test_custom_reprcompare(self, monkeypatch): + def my_reprcompare(op, left, right): return "42" @@ -512,7 +569,9 @@ class TestAssertionRewrite(object): assert getmsg(f) == "assert 5 <= 4" def test_assert_raising_nonzero_in_comparison(self): + def f(): + class A(object): def __nonzero__(self): @@ -532,13 +591,16 @@ class TestAssertionRewrite(object): assert " < 0" in getmsg(f) def test_formatchar(self): + def f(): assert "%test" == "test" assert getmsg(f).startswith("assert '%test' == 'test'") def test_custom_repr(self): + def f(): + class Foo(object): a = 1 @@ -555,18 +617,22 @@ class TestRewriteOnImport(object): def test_pycache_is_a_file(self, testdir): testdir.tmpdir.join("__pycache__").write("Hello") - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_rewritten(): - assert "@py_builtins" in globals()""") + assert "@py_builtins" in globals()""" + ) assert testdir.runpytest().ret == 0 def test_pycache_is_readonly(self, testdir): cache = testdir.tmpdir.mkdir("__pycache__") old_mode = cache.stat().mode cache.chmod(old_mode ^ stat.S_IWRITE) - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_rewritten(): - assert "@py_builtins" in globals()""") + assert "@py_builtins" in globals()""" + ) try: assert testdir.runpytest().ret == 0 finally: @@ -582,19 +648,28 @@ class TestRewriteOnImport(object): finally: f.close() z.chmod(256) - testdir.makepyfile(""" + testdir.makepyfile( + """ import sys sys.path.append(%r) - import test_gum.test_lizard""" % (z_fn,)) + import test_gum.test_lizard""" + % (z_fn,) + ) assert testdir.runpytest().ret == EXIT_NOTESTSCOLLECTED def test_readonly(self, testdir): sub = testdir.mkdir("testing") sub.join("test_readonly.py").write( - py.builtin._totext(""" + py.builtin._totext( + """ def test_rewritten(): assert "@py_builtins" in globals() - """).encode("utf-8"), "wb") + """ + ).encode( + "utf-8" + ), + "wb", + ) old_mode = sub.stat().mode sub.chmod(320) try: @@ -603,27 +678,33 @@ def test_rewritten(): sub.chmod(old_mode) def test_dont_write_bytecode(self, testdir, monkeypatch): - testdir.makepyfile(""" + testdir.makepyfile( + """ import os def test_no_bytecode(): assert "__pycache__" in __cached__ assert not os.path.exists(__cached__) - assert not os.path.exists(os.path.dirname(__cached__))""") + assert not os.path.exists(os.path.dirname(__cached__))""" + ) monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", "1") assert testdir.runpytest_subprocess().ret == 0 def test_orphaned_pyc_file(self, testdir): - if sys.version_info < (3, 0) and hasattr(sys, 'pypy_version_info'): + if sys.version_info < (3, 0) and hasattr(sys, "pypy_version_info"): pytest.skip("pypy2 doesn't run orphaned pyc files") - testdir.makepyfile(""" + testdir.makepyfile( + """ import orphan def test_it(): assert orphan.value == 17 - """) - testdir.makepyfile(orphan=""" + """ + ) + testdir.makepyfile( + orphan=""" value = 17 - """) + """ + ) py_compile.compile("orphan.py") os.remove("orphan.py") @@ -639,14 +720,16 @@ def test_rewritten(): @pytest.mark.skipif('"__pypy__" in sys.modules') def test_pyc_vs_pyo(self, testdir, monkeypatch): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def test_optimized(): "hello" assert test_optimized.__doc__ is None""" - ) - p = py.path.local.make_numbered_dir(prefix="runpytest-", keep=None, - rootdir=testdir.tmpdir) + ) + p = py.path.local.make_numbered_dir( + prefix="runpytest-", keep=None, rootdir=testdir.tmpdir + ) tmp = "--basetemp=%s" % p monkeypatch.setenv("PYTHONOPTIMIZE", "2") monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False) @@ -662,9 +745,11 @@ def test_rewritten(): pkg = testdir.tmpdir.join("pkg") pkg.mkdir() pkg.join("__init__.py").ensure() - pkg.join("test_blah.py").write(""" + pkg.join("test_blah.py").write( + """ def test_rewritten(): - assert "@py_builtins" in globals()""") + assert "@py_builtins" in globals()""" + ) assert testdir.runpytest().ret == 0 def test_translate_newlines(self, testdir): @@ -673,11 +758,13 @@ def test_rewritten(): testdir.tmpdir.join("test_newlines.py").write(b, "wb") assert testdir.runpytest().ret == 0 - @pytest.mark.skipif(sys.version_info < (3, 4), - reason='packages without __init__.py not supported on python 2') + @pytest.mark.skipif( + sys.version_info < (3, 4), + reason="packages without __init__.py not supported on python 2", + ) def test_package_without__init__py(self, testdir): - pkg = testdir.mkdir('a_package_without_init_py') - pkg.join('module.py').ensure() + pkg = testdir.mkdir("a_package_without_init_py") + pkg.join("module.py").ensure() testdir.makepyfile("import a_package_without_init_py.module") assert testdir.runpytest().ret == EXIT_NOTESTSCOLLECTED @@ -688,18 +775,22 @@ def test_rewritten(): def mywarn(code, msg): warnings.append((code, msg)) - monkeypatch.setattr(hook.config, 'warn', mywarn) - hook.mark_rewrite('_pytest') - assert '_pytest' in warnings[0][1] + monkeypatch.setattr(hook.config, "warn", mywarn) + hook.mark_rewrite("_pytest") + assert "_pytest" in warnings[0][1] def test_rewrite_module_imported_from_conftest(self, testdir): - testdir.makeconftest(''' + testdir.makeconftest( + """ import test_rewrite_module_imported - ''') - testdir.makepyfile(test_rewrite_module_imported=''' + """ + ) + testdir.makepyfile( + test_rewrite_module_imported=""" def test_rewritten(): assert "@py_builtins" in globals() - ''') + """ + ) assert testdir.runpytest_subprocess().ret == 0 def test_remember_rewritten_modules(self, pytestconfig, testdir, monkeypatch): @@ -708,46 +799,50 @@ def test_rewritten(): doesn't give false positives (#2005). """ monkeypatch.syspath_prepend(testdir.tmpdir) - testdir.makepyfile(test_remember_rewritten_modules='') + testdir.makepyfile(test_remember_rewritten_modules="") warnings = [] hook = AssertionRewritingHook(pytestconfig) - monkeypatch.setattr(hook.config, 'warn', lambda code, msg: warnings.append(msg)) - hook.find_module('test_remember_rewritten_modules') - hook.load_module('test_remember_rewritten_modules') - hook.mark_rewrite('test_remember_rewritten_modules') - hook.mark_rewrite('test_remember_rewritten_modules') + monkeypatch.setattr(hook.config, "warn", lambda code, msg: warnings.append(msg)) + hook.find_module("test_remember_rewritten_modules") + hook.load_module("test_remember_rewritten_modules") + hook.mark_rewrite("test_remember_rewritten_modules") + hook.mark_rewrite("test_remember_rewritten_modules") assert warnings == [] def test_rewrite_warning_using_pytest_plugins(self, testdir): - testdir.makepyfile(**{ - 'conftest.py': "pytest_plugins = ['core', 'gui', 'sci']", - 'core.py': "", - 'gui.py': "pytest_plugins = ['core', 'sci']", - 'sci.py': "pytest_plugins = ['core']", - 'test_rewrite_warning_pytest_plugins.py': "def test(): pass", - }) + testdir.makepyfile( + **{ + "conftest.py": "pytest_plugins = ['core', 'gui', 'sci']", + "core.py": "", + "gui.py": "pytest_plugins = ['core', 'sci']", + "sci.py": "pytest_plugins = ['core']", + "test_rewrite_warning_pytest_plugins.py": "def test(): pass", + } + ) testdir.chdir() result = testdir.runpytest_subprocess() - result.stdout.fnmatch_lines(['*= 1 passed in *=*']) - assert 'pytest-warning summary' not in result.stdout.str() + result.stdout.fnmatch_lines(["*= 1 passed in *=*"]) + assert "pytest-warning summary" not in result.stdout.str() def test_rewrite_warning_using_pytest_plugins_env_var(self, testdir, monkeypatch): - monkeypatch.setenv('PYTEST_PLUGINS', 'plugin') - testdir.makepyfile(**{ - 'plugin.py': "", - 'test_rewrite_warning_using_pytest_plugins_env_var.py': """ + monkeypatch.setenv("PYTEST_PLUGINS", "plugin") + testdir.makepyfile( + **{ + "plugin.py": "", + "test_rewrite_warning_using_pytest_plugins_env_var.py": """ import plugin pytest_plugins = ['plugin'] def test(): pass """, - }) + } + ) testdir.chdir() result = testdir.runpytest_subprocess() - result.stdout.fnmatch_lines(['*= 1 passed in *=*']) - assert 'pytest-warning summary' not in result.stdout.str() + result.stdout.fnmatch_lines(["*= 1 passed in *=*"]) + assert "pytest-warning summary" not in result.stdout.str() - @pytest.mark.skipif(sys.version_info[0] > 2, reason='python 2 only') + @pytest.mark.skipif(sys.version_info[0] > 2, reason="python 2 only") def test_rewrite_future_imports(self, testdir): """Test that rewritten modules don't inherit the __future__ flags from the assertrewrite module. @@ -757,28 +852,32 @@ def test_rewritten(): The test below will fail if __future__.division is enabled """ - testdir.makepyfile(''' + testdir.makepyfile( + """ def test(): x = 1 / 2 assert type(x) is int - ''') + """ + ) result = testdir.runpytest() assert result.ret == 0 class TestAssertionRewriteHookDetails(object): + def test_loader_is_package_false_for_module(self, testdir): - testdir.makepyfile(test_fun=""" + testdir.makepyfile( + test_fun=""" def test_loader(): assert not __loader__.is_package(__name__) - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "* 1 passed*", - ]) + result.stdout.fnmatch_lines(["* 1 passed*"]) def test_loader_is_package_true_for_package(self, testdir): - testdir.makepyfile(test_fun=""" + testdir.makepyfile( + test_fun=""" def test_loader(): assert not __loader__.is_package(__name__) @@ -787,12 +886,11 @@ class TestAssertionRewriteHookDetails(object): def test_missing(): assert not __loader__.is_package('pytest_not_there') - """) - testdir.mkpydir('fun') + """ + ) + testdir.mkpydir("fun") result = testdir.runpytest() - result.stdout.fnmatch_lines([ - '* 3 passed*', - ]) + result.stdout.fnmatch_lines(["* 3 passed*"]) @pytest.mark.skipif("sys.version_info[0] >= 3") @pytest.mark.xfail("hasattr(sys, 'pypy_translation_info')") @@ -805,35 +903,43 @@ class TestAssertionRewriteHookDetails(object): @pytest.mark.skipif("sys.version_info[0] >= 3") def test_detect_coding_cookie(self, testdir): - testdir.makepyfile(test_cookie=""" + testdir.makepyfile( + test_cookie=""" # -*- coding: utf-8 -*- u"St\xc3\xa4d" def test_rewritten(): - assert "@py_builtins" in globals()""") + assert "@py_builtins" in globals()""" + ) assert testdir.runpytest().ret == 0 @pytest.mark.skipif("sys.version_info[0] >= 3") def test_detect_coding_cookie_second_line(self, testdir): - testdir.makepyfile(test_cookie=""" + testdir.makepyfile( + test_cookie=""" # -*- coding: utf-8 -*- u"St\xc3\xa4d" def test_rewritten(): - assert "@py_builtins" in globals()""") + assert "@py_builtins" in globals()""" + ) assert testdir.runpytest().ret == 0 @pytest.mark.skipif("sys.version_info[0] >= 3") def test_detect_coding_cookie_crlf(self, testdir): - testdir.makepyfile(test_cookie=""" + testdir.makepyfile( + test_cookie=""" # -*- coding: utf-8 -*- u"St\xc3\xa4d" def test_rewritten(): - assert "@py_builtins" in globals()""") + assert "@py_builtins" in globals()""" + ) assert testdir.runpytest().ret == 0 def test_sys_meta_path_munged(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_meta_path(): - import sys; sys.meta_path = []""") + import sys; sys.meta_path = []""" + ) assert testdir.runpytest().ret == 0 def test_write_pyc(self, testdir, tmpdir, monkeypatch): @@ -841,6 +947,7 @@ class TestAssertionRewriteHookDetails(object): from _pytest.assertion import AssertionState import atomicwrites from contextlib import contextmanager + config = testdir.parseconfig([]) state = AssertionState(config, "rewrite") source_path = tmpdir.ensure("source.py") @@ -848,7 +955,7 @@ class TestAssertionRewriteHookDetails(object): assert _write_pyc(state, [1], source_path.stat(), pycpath) @contextmanager - def atomic_write_failed(fn, mode='r', overwrite=False): + def atomic_write_failed(fn, mode="r", overwrite=False): e = IOError() e.errno = 10 raise e @@ -866,9 +973,9 @@ class TestAssertionRewriteHookDetails(object): """ pytest.importorskip("pkg_resources") - testdir.mkpydir('testpkg') + testdir.mkpydir("testpkg") contents = { - 'testpkg/test_pkg': """ + "testpkg/test_pkg": """ import pkg_resources import pytest @@ -879,10 +986,10 @@ class TestAssertionRewriteHookDetails(object): res = pkg_resources.resource_string(__name__, 'resource.txt') res = res.decode('ascii') assert res == 'Load me please.' - """, + """ } testdir.makepyfile(**contents) - testdir.maketxtfile(**{'testpkg/resource': "Load me please."}) + testdir.maketxtfile(**{"testpkg/resource": "Load me please."}) result = testdir.runpytest_subprocess() result.assert_outcomes(passed=1) @@ -896,28 +1003,33 @@ class TestAssertionRewriteHookDetails(object): import py_compile from _pytest.assertion.rewrite import _read_pyc - source = tmpdir.join('source.py') - pyc = source + 'c' + source = tmpdir.join("source.py") + pyc = source + "c" - source.write('def test(): pass') + source.write("def test(): pass") py_compile.compile(str(source), str(pyc)) - contents = pyc.read(mode='rb') + contents = pyc.read(mode="rb") strip_bytes = 20 # header is around 8 bytes, strip a little more assert len(contents) > strip_bytes - pyc.write(contents[:strip_bytes], mode='wb') + pyc.write(contents[:strip_bytes], mode="wb") assert _read_pyc(source, str(pyc)) is None # no error def test_reload_is_same(self, testdir): # A file that will be picked up during collecting. testdir.tmpdir.join("file.py").ensure() - testdir.tmpdir.join("pytest.ini").write(textwrap.dedent(""" + testdir.tmpdir.join("pytest.ini").write( + textwrap.dedent( + """ [pytest] python_files = *.py - """)) + """ + ) + ) - testdir.makepyfile(test_fun=""" + testdir.makepyfile( + test_fun=""" import sys try: from imp import reload @@ -927,30 +1039,34 @@ class TestAssertionRewriteHookDetails(object): def test_loader(): import file assert sys.modules["file"] is reload(file) - """) - result = testdir.runpytest('-s') - result.stdout.fnmatch_lines([ - "* 1 passed*", - ]) + """ + ) + result = testdir.runpytest("-s") + result.stdout.fnmatch_lines(["* 1 passed*"]) def test_get_data_support(self, testdir): """Implement optional PEP302 api (#808). """ path = testdir.mkpydir("foo") - path.join("test_foo.py").write(_pytest._code.Source(""" + path.join("test_foo.py").write( + _pytest._code.Source( + """ class Test(object): def test_foo(self): import pkgutil data = pkgutil.get_data('foo.test_foo', 'data.txt') assert data == b'Hey' - """)) - path.join('data.txt').write('Hey') + """ + ) + ) + path.join("data.txt").write("Hey") result = testdir.runpytest() - result.stdout.fnmatch_lines('*1 passed*') + result.stdout.fnmatch_lines("*1 passed*") def test_issue731(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ class LongReprWithBraces(object): def __repr__(self): return 'LongReprWithBraces({' + ('a' * 80) + '}' + ('a' * 120) + ')' @@ -961,47 +1077,62 @@ def test_issue731(testdir): def test_long_repr(): obj = LongReprWithBraces() assert obj.some_method() - """) + """ + ) result = testdir.runpytest() - assert 'unbalanced braces' not in result.stdout.str() + assert "unbalanced braces" not in result.stdout.str() class TestIssue925(object): + def test_simple_case(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_ternary_display(): assert (False == False) == False - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines('*E*assert (False == False) == False') + result.stdout.fnmatch_lines("*E*assert (False == False) == False") def test_long_case(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_ternary_display(): assert False == (False == True) == True - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines('*E*assert (False == True) == True') + result.stdout.fnmatch_lines("*E*assert (False == True) == True") def test_many_brackets(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_ternary_display(): assert True == ((False == True) == True) - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines('*E*assert True == ((False == True) == True)') + result.stdout.fnmatch_lines("*E*assert True == ((False == True) == True)") class TestIssue2121(): + def test_simple(self, testdir): - testdir.tmpdir.join("tests/file.py").ensure().write(""" + testdir.tmpdir.join("tests/file.py").ensure().write( + """ def test_simple_failure(): assert 1 + 1 == 3 -""") - testdir.tmpdir.join("pytest.ini").write(textwrap.dedent(""" +""" + ) + testdir.tmpdir.join("pytest.ini").write( + textwrap.dedent( + """ [pytest] python_files = tests/**.py - """)) + """ + ) + ) result = testdir.runpytest() - result.stdout.fnmatch_lines('*E*assert (1 + 1) == 3') + result.stdout.fnmatch_lines("*E*assert (1 + 1) == 3") diff --git a/testing/test_cacheprovider.py b/testing/test_cacheprovider.py index 51e45dd48..33d1dd844 100644 --- a/testing/test_cacheprovider.py +++ b/testing/test_cacheprovider.py @@ -10,6 +10,7 @@ pytest_plugins = "pytester", class TestNewAPI(object): + def test_config_cache_makedir(self, testdir): testdir.makeini("[pytest]") config = testdir.parseconfigure() @@ -31,50 +32,54 @@ class TestNewAPI(object): def test_cache_writefail_cachfile_silent(self, testdir): testdir.makeini("[pytest]") - testdir.tmpdir.join('.pytest_cache').write('gone wrong') + testdir.tmpdir.join(".pytest_cache").write("gone wrong") config = testdir.parseconfigure() cache = config.cache - cache.set('test/broken', []) + cache.set("test/broken", []) - @pytest.mark.skipif(sys.platform.startswith('win'), reason='no chmod on windows') + @pytest.mark.skipif(sys.platform.startswith("win"), reason="no chmod on windows") def test_cache_writefail_permissions(self, testdir): testdir.makeini("[pytest]") - testdir.tmpdir.ensure_dir('.pytest_cache').chmod(0) + testdir.tmpdir.ensure_dir(".pytest_cache").chmod(0) config = testdir.parseconfigure() cache = config.cache - cache.set('test/broken', []) + cache.set("test/broken", []) - @pytest.mark.skipif(sys.platform.startswith('win'), reason='no chmod on windows') + @pytest.mark.skipif(sys.platform.startswith("win"), reason="no chmod on windows") def test_cache_failure_warns(self, testdir): - testdir.tmpdir.ensure_dir('.pytest_cache').chmod(0) - testdir.makepyfile(""" + testdir.tmpdir.ensure_dir(".pytest_cache").chmod(0) + testdir.makepyfile( + """ def test_error(): raise Exception - """) - result = testdir.runpytest('-rw') + """ + ) + result = testdir.runpytest("-rw") assert result.ret == 1 - result.stdout.fnmatch_lines([ - "*could not create cache path*", - "*2 warnings*", - ]) + result.stdout.fnmatch_lines(["*could not create cache path*", "*2 warnings*"]) def test_config_cache(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_configure(config): # see that we get cache information early on assert hasattr(config, "cache") - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ def test_session(pytestconfig): assert hasattr(pytestconfig, "cache") - """) + """ + ) result = testdir.runpytest() assert result.ret == 0 result.stdout.fnmatch_lines(["*1 passed*"]) def test_cachefuncarg(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def test_cachefuncarg(cache): val = cache.get("some/thing", None) @@ -83,101 +88,118 @@ class TestNewAPI(object): pytest.raises(TypeError, lambda: cache.get("some/thing")) val = cache.get("some/thing", []) assert val == [1] - """) + """ + ) result = testdir.runpytest() assert result.ret == 0 result.stdout.fnmatch_lines(["*1 passed*"]) def test_custom_rel_cache_dir(self, testdir): - rel_cache_dir = os.path.join('custom_cache_dir', 'subdir') - testdir.makeini(""" + rel_cache_dir = os.path.join("custom_cache_dir", "subdir") + testdir.makeini( + """ [pytest] cache_dir = {cache_dir} - """.format(cache_dir=rel_cache_dir)) - testdir.makepyfile(test_errored='def test_error():\n assert False') + """.format( + cache_dir=rel_cache_dir + ) + ) + testdir.makepyfile(test_errored="def test_error():\n assert False") testdir.runpytest() assert testdir.tmpdir.join(rel_cache_dir).isdir() def test_custom_abs_cache_dir(self, testdir, tmpdir_factory): - tmp = str(tmpdir_factory.mktemp('tmp')) - abs_cache_dir = os.path.join(tmp, 'custom_cache_dir') - testdir.makeini(""" + tmp = str(tmpdir_factory.mktemp("tmp")) + abs_cache_dir = os.path.join(tmp, "custom_cache_dir") + testdir.makeini( + """ [pytest] cache_dir = {cache_dir} - """.format(cache_dir=abs_cache_dir)) - testdir.makepyfile(test_errored='def test_error():\n assert False') + """.format( + cache_dir=abs_cache_dir + ) + ) + testdir.makepyfile(test_errored="def test_error():\n assert False") testdir.runpytest() assert py.path.local(abs_cache_dir).isdir() def test_custom_cache_dir_with_env_var(self, testdir, monkeypatch): - monkeypatch.setenv('env_var', 'custom_cache_dir') - testdir.makeini(""" + monkeypatch.setenv("env_var", "custom_cache_dir") + testdir.makeini( + """ [pytest] cache_dir = {cache_dir} - """.format(cache_dir='$env_var')) - testdir.makepyfile(test_errored='def test_error():\n assert False') + """.format( + cache_dir="$env_var" + ) + ) + testdir.makepyfile(test_errored="def test_error():\n assert False") testdir.runpytest() - assert testdir.tmpdir.join('custom_cache_dir').isdir() + assert testdir.tmpdir.join("custom_cache_dir").isdir() def test_cache_reportheader(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_hello(): pass - """) + """ + ) result = testdir.runpytest("-v") - result.stdout.fnmatch_lines([ - "cachedir: .pytest_cache" - ]) + result.stdout.fnmatch_lines(["cachedir: .pytest_cache"]) def test_cache_show(testdir): result = testdir.runpytest("--cache-show") assert result.ret == 0 - result.stdout.fnmatch_lines([ - "*cache is empty*" - ]) - testdir.makeconftest(""" + result.stdout.fnmatch_lines(["*cache is empty*"]) + testdir.makeconftest( + """ def pytest_configure(config): config.cache.set("my/name", [1,2,3]) config.cache.set("other/some", {1:2}) dp = config.cache.makedir("mydb") dp.ensure("hello") dp.ensure("world") - """) + """ + ) result = testdir.runpytest() assert result.ret == 5 # no tests executed result = testdir.runpytest("--cache-show") - result.stdout.fnmatch_lines_random([ - "*cachedir:*", - "-*cache values*-", - "*my/name contains:", - " [1, 2, 3]", - "*other/some contains*", - " {*1*: 2}", - "-*cache directories*-", - "*mydb/hello*length 0*", - "*mydb/world*length 0*", - ]) + result.stdout.fnmatch_lines_random( + [ + "*cachedir:*", + "-*cache values*-", + "*my/name contains:", + " [1, 2, 3]", + "*other/some contains*", + " {*1*: 2}", + "-*cache directories*-", + "*mydb/hello*length 0*", + "*mydb/world*length 0*", + ] + ) class TestLastFailed(object): def test_lastfailed_usecase(self, testdir, monkeypatch): monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1) - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def test_1(): assert 0 def test_2(): assert 0 def test_3(): assert 1 - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*2 failed*", - ]) - p.write(_pytest._code.Source(""" + result.stdout.fnmatch_lines(["*2 failed*"]) + p.write( + _pytest._code.Source( + """ def test_1(): assert 1 @@ -186,140 +208,134 @@ class TestLastFailed(object): def test_3(): assert 0 - """)) + """ + ) + ) result = testdir.runpytest("--lf") - result.stdout.fnmatch_lines([ - "*2 passed*1 desel*", - ]) + result.stdout.fnmatch_lines(["*2 passed*1 desel*"]) result = testdir.runpytest("--lf") - result.stdout.fnmatch_lines([ - "*1 failed*2 passed*", - ]) + result.stdout.fnmatch_lines(["*1 failed*2 passed*"]) result = testdir.runpytest("--lf", "--cache-clear") - result.stdout.fnmatch_lines([ - "*1 failed*2 passed*", - ]) + result.stdout.fnmatch_lines(["*1 failed*2 passed*"]) # Run this again to make sure clear-cache is robust - if os.path.isdir('.pytest_cache'): - shutil.rmtree('.pytest_cache') + if os.path.isdir(".pytest_cache"): + shutil.rmtree(".pytest_cache") result = testdir.runpytest("--lf", "--cache-clear") - result.stdout.fnmatch_lines([ - "*1 failed*2 passed*", - ]) + result.stdout.fnmatch_lines(["*1 failed*2 passed*"]) def test_failedfirst_order(self, testdir): - testdir.tmpdir.join('test_a.py').write(_pytest._code.Source(""" + testdir.tmpdir.join("test_a.py").write( + _pytest._code.Source( + """ def test_always_passes(): assert 1 - """)) - testdir.tmpdir.join('test_b.py').write(_pytest._code.Source(""" + """ + ) + ) + testdir.tmpdir.join("test_b.py").write( + _pytest._code.Source( + """ def test_always_fails(): assert 0 - """)) + """ + ) + ) result = testdir.runpytest() # Test order will be collection order; alphabetical - result.stdout.fnmatch_lines([ - "test_a.py*", - "test_b.py*", - ]) + result.stdout.fnmatch_lines(["test_a.py*", "test_b.py*"]) result = testdir.runpytest("--ff") # Test order will be failing tests firs - result.stdout.fnmatch_lines([ - "test_b.py*", - "test_a.py*", - ]) + result.stdout.fnmatch_lines(["test_b.py*", "test_a.py*"]) def test_lastfailed_failedfirst_order(self, testdir): - testdir.makepyfile(**{ - 'test_a.py': """ + testdir.makepyfile( + **{ + "test_a.py": """ def test_always_passes(): assert 1 """, - 'test_b.py': """ + "test_b.py": """ def test_always_fails(): assert 0 """, - }) + } + ) result = testdir.runpytest() # Test order will be collection order; alphabetical - result.stdout.fnmatch_lines([ - "test_a.py*", - "test_b.py*", - ]) + result.stdout.fnmatch_lines(["test_a.py*", "test_b.py*"]) result = testdir.runpytest("--lf", "--ff") # Test order will be failing tests firs - result.stdout.fnmatch_lines([ - "test_b.py*", - ]) - assert 'test_a.py' not in result.stdout.str() + result.stdout.fnmatch_lines(["test_b.py*"]) + assert "test_a.py" not in result.stdout.str() def test_lastfailed_difference_invocations(self, testdir, monkeypatch): monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1) - testdir.makepyfile(test_a=""" + testdir.makepyfile( + test_a=""" def test_a1(): assert 0 def test_a2(): assert 1 - """, test_b=""" + """, + test_b=""" def test_b1(): assert 0 - """) + """, + ) p = testdir.tmpdir.join("test_a.py") p2 = testdir.tmpdir.join("test_b.py") result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*2 failed*", - ]) + result.stdout.fnmatch_lines(["*2 failed*"]) result = testdir.runpytest("--lf", p2) - result.stdout.fnmatch_lines([ - "*1 failed*", - ]) - p2.write(_pytest._code.Source(""" + result.stdout.fnmatch_lines(["*1 failed*"]) + p2.write( + _pytest._code.Source( + """ def test_b1(): assert 1 - """)) + """ + ) + ) result = testdir.runpytest("--lf", p2) - result.stdout.fnmatch_lines([ - "*1 passed*", - ]) + result.stdout.fnmatch_lines(["*1 passed*"]) result = testdir.runpytest("--lf", p) - result.stdout.fnmatch_lines([ - "*1 failed*1 desel*", - ]) + result.stdout.fnmatch_lines(["*1 failed*1 desel*"]) def test_lastfailed_usecase_splice(self, testdir, monkeypatch): monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1) - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_1(): assert 0 - """) + """ + ) p2 = testdir.tmpdir.join("test_something.py") - p2.write(_pytest._code.Source(""" + p2.write( + _pytest._code.Source( + """ def test_2(): assert 0 - """)) + """ + ) + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*2 failed*", - ]) + result.stdout.fnmatch_lines(["*2 failed*"]) result = testdir.runpytest("--lf", p2) - result.stdout.fnmatch_lines([ - "*1 failed*", - ]) + result.stdout.fnmatch_lines(["*1 failed*"]) result = testdir.runpytest("--lf") - result.stdout.fnmatch_lines([ - "*2 failed*", - ]) + result.stdout.fnmatch_lines(["*2 failed*"]) def test_lastfailed_xpass(self, testdir): - testdir.inline_runsource(""" + testdir.inline_runsource( + """ import pytest @pytest.mark.xfail def test_hello(): assert 1 - """) + """ + ) config = testdir.parseconfigure() lastfailed = config.cache.get("cache/lastfailed", -1) assert lastfailed == -1 @@ -328,7 +344,8 @@ class TestLastFailed(object): """Test that failed parametrized tests with unmarshable parameters don't break pytest-cache. """ - testdir.makepyfile(r""" + testdir.makepyfile( + r""" import pytest @pytest.mark.parametrize('val', [ @@ -336,93 +353,106 @@ class TestLastFailed(object): ]) def test_fail(val): assert False - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines('*1 failed in*') + result.stdout.fnmatch_lines("*1 failed in*") def test_terminal_report_lastfailed(self, testdir): - test_a = testdir.makepyfile(test_a=""" + test_a = testdir.makepyfile( + test_a=""" def test_a1(): pass def test_a2(): pass - """) - test_b = testdir.makepyfile(test_b=""" + """ + ) + test_b = testdir.makepyfile( + test_b=""" def test_b1(): assert 0 def test_b2(): assert 0 - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - 'collected 4 items', - '*2 failed, 2 passed in*', - ]) + result.stdout.fnmatch_lines(["collected 4 items", "*2 failed, 2 passed in*"]) - result = testdir.runpytest('--lf') - result.stdout.fnmatch_lines([ - 'collected 4 items / 2 deselected', - 'run-last-failure: rerun previous 2 failures', - '*2 failed, 2 deselected in*', - ]) + result = testdir.runpytest("--lf") + result.stdout.fnmatch_lines( + [ + "collected 4 items / 2 deselected", + "run-last-failure: rerun previous 2 failures", + "*2 failed, 2 deselected in*", + ] + ) - result = testdir.runpytest(test_a, '--lf') - result.stdout.fnmatch_lines([ - 'collected 2 items', - 'run-last-failure: run all (no recorded failures)', - '*2 passed in*', - ]) + result = testdir.runpytest(test_a, "--lf") + result.stdout.fnmatch_lines( + [ + "collected 2 items", + "run-last-failure: run all (no recorded failures)", + "*2 passed in*", + ] + ) - result = testdir.runpytest(test_b, '--lf') - result.stdout.fnmatch_lines([ - 'collected 2 items', - 'run-last-failure: rerun previous 2 failures', - '*2 failed in*', - ]) + result = testdir.runpytest(test_b, "--lf") + result.stdout.fnmatch_lines( + [ + "collected 2 items", + "run-last-failure: rerun previous 2 failures", + "*2 failed in*", + ] + ) - result = testdir.runpytest('test_b.py::test_b1', '--lf') - result.stdout.fnmatch_lines([ - 'collected 1 item', - 'run-last-failure: rerun previous 1 failure', - '*1 failed in*', - ]) + result = testdir.runpytest("test_b.py::test_b1", "--lf") + result.stdout.fnmatch_lines( + [ + "collected 1 item", + "run-last-failure: rerun previous 1 failure", + "*1 failed in*", + ] + ) def test_terminal_report_failedfirst(self, testdir): - testdir.makepyfile(test_a=""" + testdir.makepyfile( + test_a=""" def test_a1(): assert 0 def test_a2(): pass - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - 'collected 2 items', - '*1 failed, 1 passed in*', - ]) + result.stdout.fnmatch_lines(["collected 2 items", "*1 failed, 1 passed in*"]) - result = testdir.runpytest('--ff') - result.stdout.fnmatch_lines([ - 'collected 2 items', - 'run-last-failure: rerun previous 1 failure first', - '*1 failed, 1 passed in*', - ]) + result = testdir.runpytest("--ff") + result.stdout.fnmatch_lines( + [ + "collected 2 items", + "run-last-failure: rerun previous 1 failure first", + "*1 failed, 1 passed in*", + ] + ) def test_lastfailed_collectfailure(self, testdir, monkeypatch): - testdir.makepyfile(test_maybe=""" + testdir.makepyfile( + test_maybe=""" import os env = os.environ if '1' == env['FAILIMPORT']: raise ImportError('fail') def test_hello(): assert '0' == env['FAILTEST'] - """) + """ + ) def rlf(fail_import, fail_run): - monkeypatch.setenv('FAILIMPORT', fail_import) - monkeypatch.setenv('FAILTEST', fail_run) + monkeypatch.setenv("FAILIMPORT", fail_import) + monkeypatch.setenv("FAILTEST", fail_run) - testdir.runpytest('-q') + testdir.runpytest("-q") config = testdir.parseconfigure() lastfailed = config.cache.get("cache/lastfailed", -1) return lastfailed @@ -431,23 +461,26 @@ class TestLastFailed(object): assert lastfailed == -1 lastfailed = rlf(fail_import=1, fail_run=0) - assert list(lastfailed) == ['test_maybe.py'] + assert list(lastfailed) == ["test_maybe.py"] lastfailed = rlf(fail_import=0, fail_run=1) - assert list(lastfailed) == ['test_maybe.py::test_hello'] + assert list(lastfailed) == ["test_maybe.py::test_hello"] def test_lastfailed_failure_subset(self, testdir, monkeypatch): - testdir.makepyfile(test_maybe=""" + testdir.makepyfile( + test_maybe=""" import os env = os.environ if '1' == env['FAILIMPORT']: raise ImportError('fail') def test_hello(): assert '0' == env['FAILTEST'] - """) + """ + ) - testdir.makepyfile(test_maybe2=""" + testdir.makepyfile( + test_maybe2=""" import os env = os.environ if '1' == env['FAILIMPORT']: @@ -457,93 +490,104 @@ class TestLastFailed(object): def test_pass(): pass - """) + """ + ) def rlf(fail_import, fail_run, args=()): - monkeypatch.setenv('FAILIMPORT', fail_import) - monkeypatch.setenv('FAILTEST', fail_run) + monkeypatch.setenv("FAILIMPORT", fail_import) + monkeypatch.setenv("FAILTEST", fail_run) - result = testdir.runpytest('-q', '--lf', *args) + result = testdir.runpytest("-q", "--lf", *args) config = testdir.parseconfigure() lastfailed = config.cache.get("cache/lastfailed", -1) return result, lastfailed result, lastfailed = rlf(fail_import=0, fail_run=0) assert lastfailed == -1 - result.stdout.fnmatch_lines([ - '*3 passed*', - ]) + result.stdout.fnmatch_lines(["*3 passed*"]) result, lastfailed = rlf(fail_import=1, fail_run=0) - assert sorted(list(lastfailed)) == ['test_maybe.py', 'test_maybe2.py'] + assert sorted(list(lastfailed)) == ["test_maybe.py", "test_maybe2.py"] - result, lastfailed = rlf(fail_import=0, fail_run=0, - args=('test_maybe2.py',)) - assert list(lastfailed) == ['test_maybe.py'] + result, lastfailed = rlf(fail_import=0, fail_run=0, args=("test_maybe2.py",)) + assert list(lastfailed) == ["test_maybe.py"] # edge case of test selection - even if we remember failures # from other tests we still need to run all tests if no test # matches the failures - result, lastfailed = rlf(fail_import=0, fail_run=0, - args=('test_maybe2.py',)) - assert list(lastfailed) == ['test_maybe.py'] - result.stdout.fnmatch_lines([ - '*2 passed*', - ]) + result, lastfailed = rlf(fail_import=0, fail_run=0, args=("test_maybe2.py",)) + assert list(lastfailed) == ["test_maybe.py"] + result.stdout.fnmatch_lines(["*2 passed*"]) def test_lastfailed_creates_cache_when_needed(self, testdir): # Issue #1342 - testdir.makepyfile(test_empty='') - testdir.runpytest('-q', '--lf') - assert not os.path.exists('.pytest_cache/v/cache/lastfailed') + testdir.makepyfile(test_empty="") + testdir.runpytest("-q", "--lf") + assert not os.path.exists(".pytest_cache/v/cache/lastfailed") - testdir.makepyfile(test_successful='def test_success():\n assert True') - testdir.runpytest('-q', '--lf') - assert not os.path.exists('.pytest_cache/v/cache/lastfailed') + testdir.makepyfile(test_successful="def test_success():\n assert True") + testdir.runpytest("-q", "--lf") + assert not os.path.exists(".pytest_cache/v/cache/lastfailed") - testdir.makepyfile(test_errored='def test_error():\n assert False') - testdir.runpytest('-q', '--lf') - assert os.path.exists('.pytest_cache/v/cache/lastfailed') + testdir.makepyfile(test_errored="def test_error():\n assert False") + testdir.runpytest("-q", "--lf") + assert os.path.exists(".pytest_cache/v/cache/lastfailed") def test_xfail_not_considered_failure(self, testdir): - testdir.makepyfile(''' + testdir.makepyfile( + """ import pytest @pytest.mark.xfail def test(): assert 0 - ''') + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines('*1 xfailed*') + result.stdout.fnmatch_lines("*1 xfailed*") assert self.get_cached_last_failed(testdir) == [] def test_xfail_strict_considered_failure(self, testdir): - testdir.makepyfile(''' + testdir.makepyfile( + """ import pytest @pytest.mark.xfail(strict=True) def test(): pass - ''') + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines('*1 failed*') - assert self.get_cached_last_failed(testdir) == ['test_xfail_strict_considered_failure.py::test'] + result.stdout.fnmatch_lines("*1 failed*") + assert ( + self.get_cached_last_failed(testdir) + == ["test_xfail_strict_considered_failure.py::test"] + ) - @pytest.mark.parametrize('mark', ['mark.xfail', 'mark.skip']) + @pytest.mark.parametrize("mark", ["mark.xfail", "mark.skip"]) def test_failed_changed_to_xfail_or_skip(self, testdir, mark): - testdir.makepyfile(''' + testdir.makepyfile( + """ import pytest def test(): assert 0 - ''') + """ + ) result = testdir.runpytest() - assert self.get_cached_last_failed(testdir) == ['test_failed_changed_to_xfail_or_skip.py::test'] + assert ( + self.get_cached_last_failed(testdir) + == ["test_failed_changed_to_xfail_or_skip.py::test"] + ) assert result.ret == 1 - testdir.makepyfile(''' + testdir.makepyfile( + """ import pytest @pytest.{mark} def test(): assert 0 - '''.format(mark=mark)) + """.format( + mark=mark + ) + ) result = testdir.runpytest() assert result.ret == 0 assert self.get_cached_last_failed(testdir) == [] @@ -558,59 +602,72 @@ class TestLastFailed(object): Test workflow where user fixes errors gradually file by file using --lf. """ # 1. initial run - test_bar = testdir.makepyfile(test_bar=""" + test_bar = testdir.makepyfile( + test_bar=""" def test_bar_1(): pass def test_bar_2(): assert 0 - """) - test_foo = testdir.makepyfile(test_foo=""" + """ + ) + test_foo = testdir.makepyfile( + test_foo=""" def test_foo_3(): pass def test_foo_4(): assert 0 - """) + """ + ) testdir.runpytest() - assert self.get_cached_last_failed(testdir) == ['test_bar.py::test_bar_2', 'test_foo.py::test_foo_4'] + assert ( + self.get_cached_last_failed(testdir) + == ["test_bar.py::test_bar_2", "test_foo.py::test_foo_4"] + ) # 2. fix test_bar_2, run only test_bar.py - testdir.makepyfile(test_bar=""" + testdir.makepyfile( + test_bar=""" def test_bar_1(): pass def test_bar_2(): pass - """) + """ + ) result = testdir.runpytest(test_bar) - result.stdout.fnmatch_lines('*2 passed*') + result.stdout.fnmatch_lines("*2 passed*") # ensure cache does not forget that test_foo_4 failed once before - assert self.get_cached_last_failed(testdir) == ['test_foo.py::test_foo_4'] + assert self.get_cached_last_failed(testdir) == ["test_foo.py::test_foo_4"] - result = testdir.runpytest('--last-failed') - result.stdout.fnmatch_lines('*1 failed, 3 deselected*') - assert self.get_cached_last_failed(testdir) == ['test_foo.py::test_foo_4'] + result = testdir.runpytest("--last-failed") + result.stdout.fnmatch_lines("*1 failed, 3 deselected*") + assert self.get_cached_last_failed(testdir) == ["test_foo.py::test_foo_4"] # 3. fix test_foo_4, run only test_foo.py - test_foo = testdir.makepyfile(test_foo=""" + test_foo = testdir.makepyfile( + test_foo=""" def test_foo_3(): pass def test_foo_4(): pass - """) - result = testdir.runpytest(test_foo, '--last-failed') - result.stdout.fnmatch_lines('*1 passed, 1 deselected*') + """ + ) + result = testdir.runpytest(test_foo, "--last-failed") + result.stdout.fnmatch_lines("*1 passed, 1 deselected*") assert self.get_cached_last_failed(testdir) == [] - result = testdir.runpytest('--last-failed') - result.stdout.fnmatch_lines('*4 passed*') + result = testdir.runpytest("--last-failed") + result.stdout.fnmatch_lines("*4 passed*") assert self.get_cached_last_failed(testdir) == [] def test_lastfailed_no_failures_behavior_all_passed(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_1(): assert True def test_2(): assert True - """) + """ + ) result = testdir.runpytest() result.stdout.fnmatch_lines(["*2 passed*"]) result = testdir.runpytest("--lf") @@ -621,12 +678,14 @@ class TestLastFailed(object): result.stdout.fnmatch_lines(["*2 desel*"]) def test_lastfailed_no_failures_behavior_empty_cache(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_1(): assert True def test_2(): assert False - """) + """ + ) result = testdir.runpytest("--lf", "--cache-clear") result.stdout.fnmatch_lines(["*1 failed*1 passed*"]) result = testdir.runpytest("--lf", "--cache-clear", "--lfnf", "all") @@ -636,42 +695,49 @@ class TestLastFailed(object): class TestNewFirst(object): - def test_newfirst_usecase(self, testdir): - testdir.makepyfile(**{ - 'test_1/test_1.py': ''' - def test_1(): assert 1 - def test_2(): assert 1 - def test_3(): assert 1 - ''', - 'test_2/test_2.py': ''' - def test_1(): assert 1 - def test_2(): assert 1 - def test_3(): assert 1 - ''' - }) - testdir.tmpdir.join('test_1/test_1.py').setmtime(1) + def test_newfirst_usecase(self, testdir): + testdir.makepyfile( + **{ + "test_1/test_1.py": """ + def test_1(): assert 1 + def test_2(): assert 1 + def test_3(): assert 1 + """, + "test_2/test_2.py": """ + def test_1(): assert 1 + def test_2(): assert 1 + def test_3(): assert 1 + """, + } + ) + + testdir.tmpdir.join("test_1/test_1.py").setmtime(1) result = testdir.runpytest("-v") - result.stdout.fnmatch_lines([ - "*test_1/test_1.py::test_1 PASSED*", - "*test_1/test_1.py::test_2 PASSED*", - "*test_1/test_1.py::test_3 PASSED*", - "*test_2/test_2.py::test_1 PASSED*", - "*test_2/test_2.py::test_2 PASSED*", - "*test_2/test_2.py::test_3 PASSED*", - ]) + result.stdout.fnmatch_lines( + [ + "*test_1/test_1.py::test_1 PASSED*", + "*test_1/test_1.py::test_2 PASSED*", + "*test_1/test_1.py::test_3 PASSED*", + "*test_2/test_2.py::test_1 PASSED*", + "*test_2/test_2.py::test_2 PASSED*", + "*test_2/test_2.py::test_3 PASSED*", + ] + ) result = testdir.runpytest("-v", "--nf") - result.stdout.fnmatch_lines([ - "*test_2/test_2.py::test_1 PASSED*", - "*test_2/test_2.py::test_2 PASSED*", - "*test_2/test_2.py::test_3 PASSED*", - "*test_1/test_1.py::test_1 PASSED*", - "*test_1/test_1.py::test_2 PASSED*", - "*test_1/test_1.py::test_3 PASSED*", - ]) + result.stdout.fnmatch_lines( + [ + "*test_2/test_2.py::test_1 PASSED*", + "*test_2/test_2.py::test_2 PASSED*", + "*test_2/test_2.py::test_3 PASSED*", + "*test_1/test_1.py::test_1 PASSED*", + "*test_1/test_1.py::test_2 PASSED*", + "*test_1/test_1.py::test_3 PASSED*", + ] + ) testdir.tmpdir.join("test_1/test_1.py").write( "def test_1(): assert 1\n" @@ -679,66 +745,76 @@ class TestNewFirst(object): "def test_3(): assert 1\n" "def test_4(): assert 1\n" ) - testdir.tmpdir.join('test_1/test_1.py').setmtime(1) + testdir.tmpdir.join("test_1/test_1.py").setmtime(1) result = testdir.runpytest("-v", "--nf") - result.stdout.fnmatch_lines([ - "*test_1/test_1.py::test_4 PASSED*", - "*test_2/test_2.py::test_1 PASSED*", - "*test_2/test_2.py::test_2 PASSED*", - "*test_2/test_2.py::test_3 PASSED*", - "*test_1/test_1.py::test_1 PASSED*", - "*test_1/test_1.py::test_2 PASSED*", - "*test_1/test_1.py::test_3 PASSED*", - ]) + result.stdout.fnmatch_lines( + [ + "*test_1/test_1.py::test_4 PASSED*", + "*test_2/test_2.py::test_1 PASSED*", + "*test_2/test_2.py::test_2 PASSED*", + "*test_2/test_2.py::test_3 PASSED*", + "*test_1/test_1.py::test_1 PASSED*", + "*test_1/test_1.py::test_2 PASSED*", + "*test_1/test_1.py::test_3 PASSED*", + ] + ) def test_newfirst_parametrize(self, testdir): - testdir.makepyfile(**{ - 'test_1/test_1.py': ''' + testdir.makepyfile( + **{ + "test_1/test_1.py": """ import pytest @pytest.mark.parametrize('num', [1, 2]) def test_1(num): assert num - ''', - 'test_2/test_2.py': ''' + """, + "test_2/test_2.py": """ import pytest @pytest.mark.parametrize('num', [1, 2]) def test_1(num): assert num - ''' - }) + """, + } + ) - testdir.tmpdir.join('test_1/test_1.py').setmtime(1) + testdir.tmpdir.join("test_1/test_1.py").setmtime(1) result = testdir.runpytest("-v") - result.stdout.fnmatch_lines([ - "*test_1/test_1.py::test_1[1*", - "*test_1/test_1.py::test_1[2*", - "*test_2/test_2.py::test_1[1*", - "*test_2/test_2.py::test_1[2*" - ]) + result.stdout.fnmatch_lines( + [ + "*test_1/test_1.py::test_1[1*", + "*test_1/test_1.py::test_1[2*", + "*test_2/test_2.py::test_1[1*", + "*test_2/test_2.py::test_1[2*", + ] + ) result = testdir.runpytest("-v", "--nf") - result.stdout.fnmatch_lines([ - "*test_2/test_2.py::test_1[1*", - "*test_2/test_2.py::test_1[2*", - "*test_1/test_1.py::test_1[1*", - "*test_1/test_1.py::test_1[2*", - ]) + result.stdout.fnmatch_lines( + [ + "*test_2/test_2.py::test_1[1*", + "*test_2/test_2.py::test_1[2*", + "*test_1/test_1.py::test_1[1*", + "*test_1/test_1.py::test_1[2*", + ] + ) testdir.tmpdir.join("test_1/test_1.py").write( "import pytest\n" "@pytest.mark.parametrize('num', [1, 2, 3])\n" "def test_1(num): assert num\n" ) - testdir.tmpdir.join('test_1/test_1.py').setmtime(1) + testdir.tmpdir.join("test_1/test_1.py").setmtime(1) result = testdir.runpytest("-v", "--nf") - result.stdout.fnmatch_lines([ - "*test_1/test_1.py::test_1[3*", - "*test_2/test_2.py::test_1[1*", - "*test_2/test_2.py::test_1[2*", - "*test_1/test_1.py::test_1[1*", - "*test_1/test_1.py::test_1[2*", - ]) + result.stdout.fnmatch_lines( + [ + "*test_1/test_1.py::test_1[3*", + "*test_2/test_2.py::test_1[1*", + "*test_2/test_2.py::test_1[2*", + "*test_1/test_1.py::test_1[1*", + "*test_1/test_1.py::test_1[2*", + ] + ) diff --git a/testing/test_capture.py b/testing/test_capture.py index bc8ae6534..5cedc99bb 100644 --- a/testing/test_capture.py +++ b/testing/test_capture.py @@ -1,4 +1,5 @@ from __future__ import absolute_import, division, print_function + # note: py.io capture tests where copied from # pylib 1.4.20.dev2 (rev 13d9af95547e) from __future__ import with_statement @@ -11,7 +12,7 @@ import _pytest._code import py import pytest import contextlib - +from six import binary_type, text_type from _pytest import capture from _pytest.capture import CaptureManager from _pytest.main import EXIT_NOTESTSCOLLECTED @@ -19,30 +20,19 @@ from _pytest.main import EXIT_NOTESTSCOLLECTED needsosdup = pytest.mark.xfail("not hasattr(os, 'dup')") -if sys.version_info >= (3, 0): - def tobytes(obj): - if isinstance(obj, str): - obj = obj.encode('UTF-8') - assert isinstance(obj, bytes) - return obj - def totext(obj): - if isinstance(obj, bytes): - obj = str(obj, 'UTF-8') - assert isinstance(obj, str) - return obj -else: - def tobytes(obj): - if isinstance(obj, unicode): - obj = obj.encode('UTF-8') - assert isinstance(obj, str) - return obj +def tobytes(obj): + if isinstance(obj, text_type): + obj = obj.encode("UTF-8") + assert isinstance(obj, binary_type) + return obj - def totext(obj): - if isinstance(obj, str): - obj = unicode(obj, 'UTF-8') - assert isinstance(obj, unicode) - return obj + +def totext(obj): + if isinstance(obj, binary_type): + obj = text_type(obj, "UTF-8") + assert isinstance(obj, text_type) + return obj def oswritebytes(fd, obj): @@ -58,21 +48,24 @@ def StdCapture(out=True, err=True, in_=True): class TestCaptureManager(object): + def test_getmethod_default_no_fd(self, monkeypatch): from _pytest.capture import pytest_addoption from _pytest.config import Parser + parser = Parser() pytest_addoption(parser) default = parser._groups[0].options[0].default assert default == "fd" if hasattr(os, "dup") else "sys" parser = Parser() - monkeypatch.delattr(os, 'dup', raising=False) + monkeypatch.delattr(os, "dup", raising=False) pytest_addoption(parser) assert parser._groups[0].options[0].default == "sys" @needsosdup - @pytest.mark.parametrize("method", - ['no', 'sys', pytest.mark.skipif('not hasattr(os, "dup")', 'fd')]) + @pytest.mark.parametrize( + "method", ["no", "sys", pytest.mark.skipif('not hasattr(os, "dup")', "fd")] + ) def test_capturing_basic_api(self, method): capouter = StdCaptureFD() old = sys.stdout, sys.stderr, sys.stdin @@ -110,7 +103,7 @@ class TestCaptureManager(object): capouter.stop_capturing() -@pytest.mark.parametrize("method", ['fd', 'sys']) +@pytest.mark.parametrize("method", ["fd", "sys"]) def test_capturing_unicode(testdir, method): if hasattr(sys, "pypy_version_info") and sys.pypy_version_info < (2, 2): pytest.xfail("does not work on pypy < 2.2") @@ -118,47 +111,49 @@ def test_capturing_unicode(testdir, method): obj = "'b\u00f6y'" else: obj = "u'\u00f6y'" - testdir.makepyfile(""" + testdir.makepyfile( + """ # coding=utf8 # taken from issue 227 from nosetests def test_unicode(): import sys print (sys.stdout) print (%s) - """ % obj) + """ + % obj + ) result = testdir.runpytest("--capture=%s" % method) - result.stdout.fnmatch_lines([ - "*1 passed*" - ]) + result.stdout.fnmatch_lines(["*1 passed*"]) -@pytest.mark.parametrize("method", ['fd', 'sys']) +@pytest.mark.parametrize("method", ["fd", "sys"]) def test_capturing_bytes_in_utf8_encoding(testdir, method): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_unicode(): print ('b\\u00f6y') - """) + """ + ) result = testdir.runpytest("--capture=%s" % method) - result.stdout.fnmatch_lines([ - "*1 passed*" - ]) + result.stdout.fnmatch_lines(["*1 passed*"]) def test_collect_capturing(testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ print ("collect %s failure" % 13) import xyz42123 - """) + """ + ) result = testdir.runpytest(p) - result.stdout.fnmatch_lines([ - "*Captured stdout*", - "*collect 13 failure*", - ]) + result.stdout.fnmatch_lines(["*Captured stdout*", "*collect 13 failure*"]) class TestPerTestCapturing(object): + def test_capture_and_fixtures(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def setup_module(mod): print ("setup module") def setup_function(function): @@ -169,19 +164,23 @@ class TestPerTestCapturing(object): def test_func2(): print ("in func2") assert 0 - """) + """ + ) result = testdir.runpytest(p) - result.stdout.fnmatch_lines([ - "setup module*", - "setup test_func1*", - "in func1*", - "setup test_func2*", - "in func2*", - ]) + result.stdout.fnmatch_lines( + [ + "setup module*", + "setup test_func1*", + "in func1*", + "setup test_func2*", + "in func2*", + ] + ) @pytest.mark.xfail(reason="unimplemented feature") def test_capture_scope_cache(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import sys def setup_module(func): print ("module-setup") @@ -192,32 +191,38 @@ class TestPerTestCapturing(object): assert 0 def teardown_function(func): print ("in teardown") - """) + """ + ) result = testdir.runpytest(p) - result.stdout.fnmatch_lines([ - "*test_func():*", - "*Captured stdout during setup*", - "module-setup*", - "function-setup*", - "*Captured stdout*", - "in teardown*", - ]) + result.stdout.fnmatch_lines( + [ + "*test_func():*", + "*Captured stdout during setup*", + "module-setup*", + "function-setup*", + "*Captured stdout*", + "in teardown*", + ] + ) def test_no_carry_over(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def test_func1(): print ("in func1") def test_func2(): print ("in func2") assert 0 - """) + """ + ) result = testdir.runpytest(p) s = result.stdout.str() assert "in func1" not in s assert "in func2" in s def test_teardown_capturing(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def setup_function(function): print ("setup func1") def teardown_function(function): @@ -226,35 +231,43 @@ class TestPerTestCapturing(object): def test_func1(): print ("in func1") pass - """) + """ + ) result = testdir.runpytest(p) - result.stdout.fnmatch_lines([ - '*teardown_function*', - '*Captured stdout*', - "setup func1*", - "in func1*", - "teardown func1*", - # "*1 fixture failure*" - ]) + result.stdout.fnmatch_lines( + [ + "*teardown_function*", + "*Captured stdout*", + "setup func1*", + "in func1*", + "teardown func1*", + # "*1 fixture failure*" + ] + ) def test_teardown_capturing_final(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def teardown_module(mod): print ("teardown module") assert 0 def test_func(): pass - """) + """ + ) result = testdir.runpytest(p) - result.stdout.fnmatch_lines([ - "*def teardown_module(mod):*", - "*Captured stdout*", - "*teardown module*", - "*1 error*", - ]) + result.stdout.fnmatch_lines( + [ + "*def teardown_module(mod):*", + "*Captured stdout*", + "*teardown module*", + "*1 error*", + ] + ) def test_capturing_outerr(self, testdir): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ import sys def test_capturing(): print (42) @@ -263,35 +276,42 @@ class TestPerTestCapturing(object): print (1) sys.stderr.write(str(2)) raise ValueError - """) + """ + ) result = testdir.runpytest(p1) - result.stdout.fnmatch_lines([ - "*test_capturing_outerr.py .F*", - "====* FAILURES *====", - "____*____", - "*test_capturing_outerr.py:8: ValueError", - "*--- Captured stdout *call*", - "1", - "*--- Captured stderr *call*", - "2", - ]) + result.stdout.fnmatch_lines( + [ + "*test_capturing_outerr.py .F*", + "====* FAILURES *====", + "____*____", + "*test_capturing_outerr.py:8: ValueError", + "*--- Captured stdout *call*", + "1", + "*--- Captured stderr *call*", + "2", + ] + ) class TestLoggingInteraction(object): + def test_logging_stream_ownership(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def test_logging(): import logging import pytest stream = capture.CaptureIO() logging.basicConfig(stream=stream) stream.close() # to free memory/release resources - """) + """ + ) result = testdir.runpytest_subprocess(p) assert result.stderr.str().find("atexit") == -1 def test_logging_and_immediate_setupteardown(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import logging def setup_function(function): logging.warn("hello1") @@ -303,21 +323,21 @@ class TestLoggingInteraction(object): def teardown_function(function): logging.warn("hello3") assert 0 - """) - for optargs in (('--capture=sys',), ('--capture=fd',)): + """ + ) + for optargs in (("--capture=sys",), ("--capture=fd",)): print(optargs) result = testdir.runpytest_subprocess(p, *optargs) s = result.stdout.str() - result.stdout.fnmatch_lines([ - "*WARN*hello3", # errors show first! - "*WARN*hello1", - "*WARN*hello2", - ]) + result.stdout.fnmatch_lines( + ["*WARN*hello3", "*WARN*hello1", "*WARN*hello2"] # errors show first! + ) # verify proper termination assert "closed" not in s def test_logging_and_crossscope_fixtures(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import logging def setup_module(function): logging.warn("hello1") @@ -329,137 +349,156 @@ class TestLoggingInteraction(object): def teardown_module(function): logging.warn("hello3") assert 0 - """) - for optargs in (('--capture=sys',), ('--capture=fd',)): + """ + ) + for optargs in (("--capture=sys",), ("--capture=fd",)): print(optargs) result = testdir.runpytest_subprocess(p, *optargs) s = result.stdout.str() - result.stdout.fnmatch_lines([ - "*WARN*hello3", # errors come first - "*WARN*hello1", - "*WARN*hello2", - ]) + result.stdout.fnmatch_lines( + ["*WARN*hello3", "*WARN*hello1", "*WARN*hello2"] # errors come first + ) # verify proper termination assert "closed" not in s def test_conftestlogging_is_shown(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import logging logging.basicConfig() logging.warn("hello435") - """) + """ + ) # make sure that logging is still captured in tests result = testdir.runpytest_subprocess("-s", "-p", "no:capturelog") assert result.ret == EXIT_NOTESTSCOLLECTED - result.stderr.fnmatch_lines([ - "WARNING*hello435*", - ]) - assert 'operation on closed file' not in result.stderr.str() + result.stderr.fnmatch_lines(["WARNING*hello435*"]) + assert "operation on closed file" not in result.stderr.str() def test_conftestlogging_and_test_logging(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import logging logging.basicConfig() - """) + """ + ) # make sure that logging is still captured in tests - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def test_hello(): import logging logging.warn("hello433") assert 0 - """) + """ + ) result = testdir.runpytest_subprocess(p, "-p", "no:capturelog") assert result.ret != 0 - result.stdout.fnmatch_lines([ - "WARNING*hello433*", - ]) - assert 'something' not in result.stderr.str() - assert 'operation on closed file' not in result.stderr.str() + result.stdout.fnmatch_lines(["WARNING*hello433*"]) + assert "something" not in result.stderr.str() + assert "operation on closed file" not in result.stderr.str() class TestCaptureFixture(object): + @pytest.mark.parametrize("opt", [[], ["-s"]]) def test_std_functional(self, testdir, opt): - reprec = testdir.inline_runsource(""" + reprec = testdir.inline_runsource( + """ def test_hello(capsys): print (42) out, err = capsys.readouterr() assert out.startswith("42") - """, *opt) + """, + *opt + ) reprec.assertoutcome(passed=1) def test_capsyscapfd(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def test_one(capsys, capfd): pass def test_two(capfd, capsys): pass - """) + """ + ) result = testdir.runpytest(p) - result.stdout.fnmatch_lines([ - "*ERROR*setup*test_one*", - "E*capfd*capsys*same*time*", - "*ERROR*setup*test_two*", - "E*capsys*capfd*same*time*", - "*2 error*"]) + result.stdout.fnmatch_lines( + [ + "*ERROR*setup*test_one*", + "E*capfd*capsys*same*time*", + "*ERROR*setup*test_two*", + "E*capsys*capfd*same*time*", + "*2 error*", + ] + ) def test_capturing_getfixturevalue(self, testdir): """Test that asking for "capfd" and "capsys" using request.getfixturevalue in the same test is an error. """ - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_one(capsys, request): request.getfixturevalue("capfd") def test_two(capfd, request): request.getfixturevalue("capsys") - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*test_one*", - "*capsys*capfd*same*time*", - "*test_two*", - "*capfd*capsys*same*time*", - "*2 failed in*", - ]) + result.stdout.fnmatch_lines( + [ + "*test_one*", + "*capsys*capfd*same*time*", + "*test_two*", + "*capfd*capsys*same*time*", + "*2 failed in*", + ] + ) def test_capsyscapfdbinary(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def test_one(capsys, capfdbinary): pass - """) + """ + ) result = testdir.runpytest(p) - result.stdout.fnmatch_lines([ - "*ERROR*setup*test_one*", - "E*capfdbinary*capsys*same*time*", - "*1 error*"]) + result.stdout.fnmatch_lines( + ["*ERROR*setup*test_one*", "E*capfdbinary*capsys*same*time*", "*1 error*"] + ) @pytest.mark.parametrize("method", ["sys", "fd"]) def test_capture_is_represented_on_failure_issue128(self, testdir, method): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def test_hello(cap%s): print ("xxx42xxx") assert 0 - """ % method) + """ + % method + ) result = testdir.runpytest(p) - result.stdout.fnmatch_lines([ - "xxx42xxx", - ]) + result.stdout.fnmatch_lines(["xxx42xxx"]) @needsosdup def test_stdfd_functional(self, testdir): - reprec = testdir.inline_runsource(""" + reprec = testdir.inline_runsource( + """ def test_hello(capfd): import os os.write(1, "42".encode('ascii')) out, err = capfd.readouterr() assert out.startswith("42") capfd.close() - """) + """ + ) reprec.assertoutcome(passed=1) @needsosdup def test_capfdbinary(self, testdir): - reprec = testdir.inline_runsource(""" + reprec = testdir.inline_runsource( + """ def test_hello(capfdbinary): import os # some likely un-decodable bytes @@ -467,15 +506,16 @@ class TestCaptureFixture(object): out, err = capfdbinary.readouterr() assert out == b'\\xfe\\x98\\x20' assert err == b'' - """) + """ + ) reprec.assertoutcome(passed=1) @pytest.mark.skipif( - sys.version_info < (3,), - reason='only have capsysbinary in python 3', + sys.version_info < (3,), reason="only have capsysbinary in python 3" ) def test_capsysbinary(self, testdir): - reprec = testdir.inline_runsource(""" + reprec = testdir.inline_runsource( + """ def test_hello(capsysbinary): import sys # some likely un-decodable bytes @@ -483,64 +523,70 @@ class TestCaptureFixture(object): out, err = capsysbinary.readouterr() assert out == b'\\xfe\\x98\\x20' assert err == b'' - """) + """ + ) reprec.assertoutcome(passed=1) @pytest.mark.skipif( - sys.version_info >= (3,), - reason='only have capsysbinary in python 3', + sys.version_info >= (3,), reason="only have capsysbinary in python 3" ) def test_capsysbinary_forbidden_in_python2(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_hello(capsysbinary): pass - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*test_hello*", - "*capsysbinary is only supported on python 3*", - "*1 error in*", - ]) + result.stdout.fnmatch_lines( + [ + "*test_hello*", + "*capsysbinary is only supported on python 3*", + "*1 error in*", + ] + ) def test_partial_setup_failure(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def test_hello(capsys, missingarg): pass - """) + """ + ) result = testdir.runpytest(p) - result.stdout.fnmatch_lines([ - "*test_partial_setup_failure*", - "*1 error*", - ]) + result.stdout.fnmatch_lines(["*test_partial_setup_failure*", "*1 error*"]) @needsosdup def test_keyboardinterrupt_disables_capturing(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def test_hello(capfd): import os os.write(1, str(42).encode('ascii')) raise KeyboardInterrupt() - """) + """ + ) result = testdir.runpytest_subprocess(p) - result.stdout.fnmatch_lines([ - "*KeyboardInterrupt*" - ]) + result.stdout.fnmatch_lines(["*KeyboardInterrupt*"]) assert result.ret == 2 @pytest.mark.issue14 def test_capture_and_logging(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import logging def test_log(capsys): logging.error('x') - """) + """ + ) result = testdir.runpytest_subprocess(p) - assert 'closed' not in result.stderr.str() + assert "closed" not in result.stderr.str() - @pytest.mark.parametrize('fixture', ['capsys', 'capfd']) - @pytest.mark.parametrize('no_capture', [True, False]) + @pytest.mark.parametrize("fixture", ["capsys", "capfd"]) + @pytest.mark.parametrize("no_capture", [True, False]) def test_disabled_capture_fixture(self, testdir, fixture, no_capture): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_disabled({fixture}): print('captured before') with {fixture}.disabled(): @@ -550,25 +596,31 @@ class TestCaptureFixture(object): def test_normal(): print('test_normal executed') - """.format(fixture=fixture)) - args = ('-s',) if no_capture else () + """.format( + fixture=fixture + ) + ) + args = ("-s",) if no_capture else () result = testdir.runpytest_subprocess(*args) - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *while capture is disabled* - """) - assert 'captured before' not in result.stdout.str() - assert 'captured after' not in result.stdout.str() + """ + ) + assert "captured before" not in result.stdout.str() + assert "captured after" not in result.stdout.str() if no_capture: - assert 'test_normal executed' in result.stdout.str() + assert "test_normal executed" in result.stdout.str() else: - assert 'test_normal executed' not in result.stdout.str() + assert "test_normal executed" not in result.stdout.str() - @pytest.mark.parametrize('fixture', ['capsys', 'capfd']) + @pytest.mark.parametrize("fixture", ["capsys", "capfd"]) def test_fixture_use_by_other_fixtures(self, testdir, fixture): """ Ensure that capsys and capfd can be used by other fixtures during setup and teardown. """ - testdir.makepyfile(""" + testdir.makepyfile( + """ from __future__ import print_function import sys import pytest @@ -591,83 +643,96 @@ class TestCaptureFixture(object): out, err = captured_print assert out == 'stdout contents begin\\n' assert err == 'stderr contents begin\\n' - """.format(fixture=fixture)) + """.format( + fixture=fixture + ) + ) result = testdir.runpytest_subprocess() result.stdout.fnmatch_lines("*1 passed*") - assert 'stdout contents begin' not in result.stdout.str() - assert 'stderr contents begin' not in result.stdout.str() + assert "stdout contents begin" not in result.stdout.str() + assert "stderr contents begin" not in result.stdout.str() def test_setup_failure_does_not_kill_capturing(testdir): sub1 = testdir.mkpydir("sub1") - sub1.join("conftest.py").write(_pytest._code.Source(""" + sub1.join("conftest.py").write( + _pytest._code.Source( + """ def pytest_runtest_setup(item): raise ValueError(42) - """)) + """ + ) + ) sub1.join("test_mod.py").write("def test_func1(): pass") - result = testdir.runpytest(testdir.tmpdir, '--traceconfig') - result.stdout.fnmatch_lines([ - "*ValueError(42)*", - "*1 error*" - ]) + result = testdir.runpytest(testdir.tmpdir, "--traceconfig") + result.stdout.fnmatch_lines(["*ValueError(42)*", "*1 error*"]) def test_fdfuncarg_skips_on_no_osdup(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import os if hasattr(os, 'dup'): del os.dup def test_hello(capfd): pass - """) + """ + ) result = testdir.runpytest_subprocess("--capture=no") - result.stdout.fnmatch_lines([ - "*1 skipped*" - ]) + result.stdout.fnmatch_lines(["*1 skipped*"]) def test_capture_conftest_runtest_setup(testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_runtest_setup(): print ("hello19") - """) + """ + ) testdir.makepyfile("def test_func(): pass") result = testdir.runpytest() assert result.ret == 0 - assert 'hello19' not in result.stdout.str() + assert "hello19" not in result.stdout.str() def test_capture_badoutput_issue412(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import os def test_func(): omg = bytearray([1,129,1]) os.write(1, omg) assert 0 - """) - result = testdir.runpytest('--cap=fd') - result.stdout.fnmatch_lines(''' + """ + ) + result = testdir.runpytest("--cap=fd") + result.stdout.fnmatch_lines( + """ *def test_func* *assert 0* *Captured* *1 failed* - ''') + """ + ) def test_capture_early_option_parsing(testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_runtest_setup(): print ("hello19") - """) + """ + ) testdir.makepyfile("def test_func(): pass") result = testdir.runpytest("-vs") assert result.ret == 0 - assert 'hello19' in result.stdout.str() + assert "hello19" in result.stdout.str() def test_capture_binary_output(testdir): - testdir.makepyfile(r""" + testdir.makepyfile( + r""" import pytest def test_a(): @@ -680,31 +745,33 @@ def test_capture_binary_output(testdir): if __name__ == '__main__': test_foo() - """) - result = testdir.runpytest('--assert=plain') + """ + ) + result = testdir.runpytest("--assert=plain") result.assert_outcomes(passed=2) def test_error_during_readouterr(testdir): """Make sure we suspend capturing if errors occur during readouterr""" - testdir.makepyfile(pytest_xyz=""" + testdir.makepyfile( + pytest_xyz=""" from _pytest.capture import FDCapture def bad_snap(self): raise Exception('boom') assert FDCapture.snap FDCapture.snap = bad_snap - """) + """ + ) result = testdir.runpytest_subprocess( "-p", "pytest_xyz", "--version", syspathinsert=True ) - result.stderr.fnmatch_lines([ - "*in bad_snap", - " raise Exception('boom')", - "Exception: boom", - ]) + result.stderr.fnmatch_lines( + ["*in bad_snap", " raise Exception('boom')", "Exception: boom"] + ) class TestCaptureIO(object): + def test_text(self): f = capture.CaptureIO() f.write("hello") @@ -718,23 +785,20 @@ class TestCaptureIO(object): f.write("\u00f6") pytest.raises(TypeError, "f.write(bytes('hello', 'UTF-8'))") else: - f.write(unicode("\u00f6", 'UTF-8')) + f.write(text_type("\u00f6", "UTF-8")) f.write("hello") # bytes s = f.getvalue() f.close() - assert isinstance(s, unicode) + assert isinstance(s, text_type) - @pytest.mark.skipif( - sys.version_info[0] == 2, - reason='python 3 only behaviour', - ) + @pytest.mark.skipif(sys.version_info[0] == 2, reason="python 3 only behaviour") def test_write_bytes_to_buffer(self): """In python3, stdout / stderr are text io wrappers (exposing a buffer property of the underlying bytestream). See issue #1407 """ f = capture.CaptureIO() - f.buffer.write(b'foo\r\n') - assert f.getvalue() == 'foo\r\n' + f.buffer.write(b"foo\r\n") + assert f.getvalue() == "foo\r\n" def test_bytes_io(): @@ -747,6 +811,7 @@ def test_bytes_io(): def test_dontreadfrominput(): from _pytest.capture import DontReadFromInput + f = DontReadFromInput() assert not f.isatty() pytest.raises(IOError, f.read) @@ -757,9 +822,10 @@ def test_dontreadfrominput(): f.close() # just for completeness -@pytest.mark.skipif('sys.version_info < (3,)', reason='python2 has no buffer') +@pytest.mark.skipif("sys.version_info < (3,)", reason="python2 has no buffer") def test_dontreadfrominput_buffer_python3(): from _pytest.capture import DontReadFromInput + f = DontReadFromInput() fb = f.buffer assert not fb.isatty() @@ -771,9 +837,10 @@ def test_dontreadfrominput_buffer_python3(): f.close() # just for completeness -@pytest.mark.skipif('sys.version_info >= (3,)', reason='python2 has no buffer') +@pytest.mark.skipif("sys.version_info >= (3,)", reason="python2 has no buffer") def test_dontreadfrominput_buffer_python2(): from _pytest.capture import DontReadFromInput + f = DontReadFromInput() with pytest.raises(AttributeError): f.buffer @@ -782,7 +849,7 @@ def test_dontreadfrominput_buffer_python2(): @pytest.yield_fixture def tmpfile(testdir): - f = testdir.makepyfile("").open('wb+') + f = testdir.makepyfile("").open("wb+") yield f if not f.closed: f.close() @@ -820,7 +887,7 @@ def test_dupfile_on_bytesio(): f = capture.safe_text_dupfile(io, "wb") f.write("hello") assert io.getvalue() == b"hello" - assert 'BytesIO object' in f.name + assert "BytesIO object" in f.name def test_dupfile_on_textio(): @@ -828,7 +895,7 @@ def test_dupfile_on_textio(): f = capture.safe_text_dupfile(io, "wb") f.write("hello") assert io.getvalue() == "hello" - assert not hasattr(f, 'name') + assert not hasattr(f, "name") @contextlib.contextmanager @@ -870,7 +937,7 @@ class TestFDCapture(object): def test_simple_many_check_open_files(self, testdir): with lsof_check(): - with testdir.makepyfile("").open('wb+') as tmpfile: + with testdir.makepyfile("").open("wb+") as tmpfile: self.test_simple_many(tmpfile) def test_simple_fail_second_start(self, tmpfile): @@ -892,7 +959,7 @@ class TestFDCapture(object): cap.start() x = os.read(0, 100).strip() cap.done() - assert x == tobytes('') + assert x == tobytes("") def test_writeorg(self, tmpfile): data1, data2 = tobytes("foo"), tobytes("bar") @@ -904,7 +971,7 @@ class TestFDCapture(object): scap = cap.snap() cap.done() assert scap == totext(data1) - with open(tmpfile.name, 'rb') as stmp_file: + with open(tmpfile.name, "rb") as stmp_file: stmp = stmp_file.read() assert stmp == data2 @@ -994,14 +1061,15 @@ class TestStdCapture(object): out, err = cap.readouterr() assert out == py.builtin._totext("hx\xc4\x85\xc4\x87\n", "utf8") - @pytest.mark.skipif('sys.version_info >= (3,)', - reason='text output different for bytes on python3') + @pytest.mark.skipif( + "sys.version_info >= (3,)", reason="text output different for bytes on python3" + ) def test_capturing_readouterr_decode_error_handling(self): with self.getcapture() as cap: # triggered an internal error in pytest - print('\xa6') + print("\xa6") out, err = cap.readouterr() - assert out == py.builtin._totext('\ufffd\n', 'unicode-escape') + assert out == py.builtin._totext("\ufffd\n", "unicode-escape") def test_reset_twice_error(self): with self.getcapture() as cap: @@ -1073,18 +1141,22 @@ class TestStdCaptureFD(TestStdCapture): captureclass = staticmethod(StdCaptureFD) def test_simple_only_fd(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import os def test_x(): os.write(1, "hello\\n".encode("ascii")) assert 0 - """) + """ + ) result = testdir.runpytest_subprocess() - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *test_x* *assert 0* *Captured stdout* - """) + """ + ) def test_intermingling(self): with self.getcapture() as cap: @@ -1111,7 +1183,8 @@ class TestStdCaptureFDinvalidFD(object): pytestmark = needsosdup def test_stdcapture_fd_invalid_fd(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import os from _pytest import capture def StdCaptureFD(out=True, err=True, in_=True): @@ -1129,10 +1202,11 @@ class TestStdCaptureFDinvalidFD(object): os.close(0) cap = StdCaptureFD(out=False, err=False, in_=True) cap.stop_capturing() - """) + """ + ) result = testdir.runpytest_subprocess("--capture=fd") assert result.ret == 0 - assert result.parseoutcomes()['passed'] == 3 + assert result.parseoutcomes()["passed"] == 3 def test_capture_not_started_but_reset(): @@ -1141,12 +1215,12 @@ def test_capture_not_started_but_reset(): def test_using_capsys_fixture_works_with_sys_stdout_encoding(capsys): - test_text = 'test text' + test_text = "test text" - print(test_text.encode(sys.stdout.encoding, 'replace')) + print(test_text.encode(sys.stdout.encoding, "replace")) (out, err) = capsys.readouterr() assert out - assert err == '' + assert err == "" def test_capsys_results_accessible_by_attribute(capsys): @@ -1158,7 +1232,7 @@ def test_capsys_results_accessible_by_attribute(capsys): @needsosdup -@pytest.mark.parametrize('use', [True, False]) +@pytest.mark.parametrize("use", [True, False]) def test_fdcapture_tmpfile_remains_the_same(tmpfile, use): if not use: tmpfile = True @@ -1175,29 +1249,34 @@ def test_fdcapture_tmpfile_remains_the_same(tmpfile, use): @needsosdup def test_close_and_capture_again(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import os def test_close(): os.close(1) def test_capture_again(): os.write(1, b"hello\\n") assert 0 - """) + """ + ) result = testdir.runpytest_subprocess() - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *test_capture_again* *assert 0* *stdout* *hello* - """) + """ + ) -@pytest.mark.parametrize('method', ['SysCapture', 'FDCapture']) +@pytest.mark.parametrize("method", ["SysCapture", "FDCapture"]) def test_capturing_and_logging_fundamentals(testdir, method): - if method == "StdCaptureFD" and not hasattr(os, 'dup'): + if method == "StdCaptureFD" and not hasattr(os, "dup"): pytest.skip("need os.dup") # here we check a fundamental feature - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import sys, os import py, logging from _pytest import capture @@ -1215,31 +1294,41 @@ def test_capturing_and_logging_fundamentals(testdir, method): outerr = cap.readouterr() print ("suspend2, captured %%s" %% (outerr,)) - """ % (method,)) + """ + % (method,) + ) result = testdir.runpython(p) - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ suspend, captured*hello1* suspend2, captured*WARNING:root:hello3* - """) - result.stderr.fnmatch_lines(""" + """ + ) + result.stderr.fnmatch_lines( + """ WARNING:root:hello2 - """) + """ + ) assert "atexit" not in result.stderr.str() def test_error_attribute_issue555(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import sys def test_capattr(): assert sys.stdout.errors == "strict" assert sys.stderr.errors == "strict" - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) -@pytest.mark.skipif(not sys.platform.startswith('win') and sys.version_info[:2] >= (3, 6), - reason='only py3.6+ on windows') +@pytest.mark.skipif( + not sys.platform.startswith("win") and sys.version_info[:2] >= (3, 6), + reason="only py3.6+ on windows", +) def test_py36_windowsconsoleio_workaround_non_standard_streams(): """ Ensure _py36_windowsconsoleio_workaround function works with objects that @@ -1248,6 +1337,7 @@ def test_py36_windowsconsoleio_workaround_non_standard_streams(): from _pytest.capture import _py36_windowsconsoleio_workaround class DummyStream(object): + def write(self, s): pass @@ -1256,19 +1346,22 @@ def test_py36_windowsconsoleio_workaround_non_standard_streams(): def test_dontreadfrominput_has_encoding(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import sys def test_capattr(): # should not raise AttributeError assert sys.stdout.encoding assert sys.stderr.encoding - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_crash_on_closing_tmpfile_py27(testdir): - testdir.makepyfile(''' + testdir.makepyfile( + """ from __future__ import print_function import time import threading @@ -1285,10 +1378,11 @@ def test_crash_on_closing_tmpfile_py27(testdir): t.start() time.sleep(0.5) - ''') + """ + ) result = testdir.runpytest_subprocess() assert result.ret == 0 - assert 'IOError' not in result.stdout.str() + assert "IOError" not in result.stdout.str() def test_pickling_and_unpickling_encoded_file(): diff --git a/testing/test_collection.py b/testing/test_collection.py index f2d542c62..657d64c74 100644 --- a/testing/test_collection.py +++ b/testing/test_collection.py @@ -8,16 +8,20 @@ from _pytest.main import Session, EXIT_NOTESTSCOLLECTED, _in_venv class TestCollector(object): + def test_collect_versus_item(self): from pytest import Collector, Item + assert not issubclass(Collector, Item) assert not issubclass(Item, Collector) def test_compat_attributes(self, testdir, recwarn): - modcol = testdir.getmodulecol(""" + modcol = testdir.getmodulecol( + """ def test_pass(): pass def test_fail(): assert 0 - """) + """ + ) recwarn.clear() assert modcol.Module == pytest.Module assert modcol.Class == pytest.Class @@ -26,10 +30,12 @@ class TestCollector(object): assert modcol.Function == pytest.Function def test_check_equality(self, testdir): - modcol = testdir.getmodulecol(""" + modcol = testdir.getmodulecol( + """ def test_pass(): pass def test_fail(): assert 0 - """) + """ + ) fn1 = testdir.collect_by_name(modcol, "test_pass") assert isinstance(fn1, pytest.Function) fn2 = testdir.collect_by_name(modcol, "test_pass") @@ -38,7 +44,7 @@ class TestCollector(object): assert fn1 == fn2 assert fn1 != modcol if sys.version_info < (3, 0): - assert cmp(fn1, fn2) == 0 + assert cmp(fn1, fn2) == 0 # NOQA assert hash(fn1) == hash(fn2) fn3 = testdir.collect_by_name(modcol, "test_fail") @@ -54,14 +60,15 @@ class TestCollector(object): assert modcol != fn def test_getparent(self, testdir): - modcol = testdir.getmodulecol(""" + modcol = testdir.getmodulecol( + """ class TestClass(object): def test_foo(): pass - """) + """ + ) cls = testdir.collect_by_name(modcol, "TestClass") - fn = testdir.collect_by_name( - testdir.collect_by_name(cls, "()"), "test_foo") + fn = testdir.collect_by_name(testdir.collect_by_name(cls, "()"), "test_foo") parent = fn.getparent(pytest.Module) assert parent is modcol @@ -74,14 +81,16 @@ class TestCollector(object): def test_getcustomfile_roundtrip(self, testdir): hello = testdir.makefile(".xxx", hello="world") - testdir.makepyfile(conftest=""" + testdir.makepyfile( + conftest=""" import pytest class CustomFile(pytest.File): pass def pytest_collect_file(path, parent): if path.ext == ".xxx": return CustomFile(path, parent=parent) - """) + """ + ) node = testdir.getpathnode(hello) assert isinstance(node, pytest.File) assert node.name == "hello.xxx" @@ -91,32 +100,32 @@ class TestCollector(object): def test_can_skip_class_with_test_attr(self, testdir): """Assure test class is skipped when using `__test__=False` (See #2007).""" - testdir.makepyfile(""" + testdir.makepyfile( + """ class TestFoo(object): __test__ = False def __init__(self): pass def test_foo(): assert True - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - 'collected 0 items', - '*no tests ran in*', - ]) + result.stdout.fnmatch_lines(["collected 0 items", "*no tests ran in*"]) class TestCollectFS(object): + def test_ignored_certain_directories(self, testdir): tmpdir = testdir.tmpdir - tmpdir.ensure("build", 'test_notfound.py') - tmpdir.ensure("dist", 'test_notfound.py') - tmpdir.ensure("_darcs", 'test_notfound.py') - tmpdir.ensure("CVS", 'test_notfound.py') - tmpdir.ensure("{arch}", 'test_notfound.py') - tmpdir.ensure(".whatever", 'test_notfound.py') - tmpdir.ensure(".bzr", 'test_notfound.py') - tmpdir.ensure("normal", 'test_found.py') + tmpdir.ensure("build", "test_notfound.py") + tmpdir.ensure("dist", "test_notfound.py") + tmpdir.ensure("_darcs", "test_notfound.py") + tmpdir.ensure("CVS", "test_notfound.py") + tmpdir.ensure("{arch}", "test_notfound.py") + tmpdir.ensure(".whatever", "test_notfound.py") + tmpdir.ensure(".bzr", "test_notfound.py") + tmpdir.ensure("normal", "test_found.py") for x in tmpdir.visit("test_*.py"): x.write("def test_hello(): pass") @@ -125,9 +134,17 @@ class TestCollectFS(object): assert "test_notfound" not in s assert "test_found" in s - @pytest.mark.parametrize('fname', - ("activate", "activate.csh", "activate.fish", - "Activate", "Activate.bat", "Activate.ps1")) + @pytest.mark.parametrize( + "fname", + ( + "activate", + "activate.csh", + "activate.fish", + "Activate", + "Activate.bat", + "Activate.ps1", + ), + ) def test_ignored_virtualenvs(self, testdir, fname): bindir = "Scripts" if sys.platform.startswith("win") else "bin" testdir.tmpdir.ensure("virtual", bindir, fname) @@ -144,9 +161,17 @@ class TestCollectFS(object): result = testdir.runpytest("virtual") assert "test_invenv" in result.stdout.str() - @pytest.mark.parametrize('fname', - ("activate", "activate.csh", "activate.fish", - "Activate", "Activate.bat", "Activate.ps1")) + @pytest.mark.parametrize( + "fname", + ( + "activate", + "activate.csh", + "activate.fish", + "Activate", + "Activate.bat", + "Activate.ps1", + ), + ) def test_ignored_virtualenvs_norecursedirs_precedence(self, testdir, fname): bindir = "Scripts" if sys.platform.startswith("win") else "bin" # norecursedirs takes priority @@ -159,24 +184,34 @@ class TestCollectFS(object): result = testdir.runpytest("--collect-in-virtualenv", ".virtual") assert "test_invenv" in result.stdout.str() - @pytest.mark.parametrize('fname', - ("activate", "activate.csh", "activate.fish", - "Activate", "Activate.bat", "Activate.ps1")) + @pytest.mark.parametrize( + "fname", + ( + "activate", + "activate.csh", + "activate.fish", + "Activate", + "Activate.bat", + "Activate.ps1", + ), + ) def test__in_venv(self, testdir, fname): """Directly test the virtual env detection function""" bindir = "Scripts" if sys.platform.startswith("win") else "bin" # no bin/activate, not a virtualenv - base_path = testdir.tmpdir.mkdir('venv') + base_path = testdir.tmpdir.mkdir("venv") assert _in_venv(base_path) is False # with bin/activate, totally a virtualenv base_path.ensure(bindir, fname) assert _in_venv(base_path) is True def test_custom_norecursedirs(self, testdir): - testdir.makeini(""" + testdir.makeini( + """ [pytest] norecursedirs = mydir xyz* - """) + """ + ) tmpdir = testdir.tmpdir tmpdir.ensure("mydir", "test_hello.py").write("def test_1(): pass") tmpdir.ensure("xyz123", "test_2.py").write("def test_2(): 0/0") @@ -187,10 +222,12 @@ class TestCollectFS(object): rec.assertoutcome(failed=1) def test_testpaths_ini(self, testdir, monkeypatch): - testdir.makeini(""" + testdir.makeini( + """ [pytest] testpaths = gui uts - """) + """ + ) tmpdir = testdir.tmpdir tmpdir.ensure("env", "test_1.py").write("def test_env(): pass") tmpdir.ensure("gui", "test_2.py").write("def test_gui(): pass") @@ -198,28 +235,30 @@ class TestCollectFS(object): # executing from rootdir only tests from `testpaths` directories # are collected - items, reprec = testdir.inline_genitems('-v') - assert [x.name for x in items] == ['test_gui', 'test_uts'] + items, reprec = testdir.inline_genitems("-v") + assert [x.name for x in items] == ["test_gui", "test_uts"] # check that explicitly passing directories in the command-line # collects the tests - for dirname in ('env', 'gui', 'uts'): + for dirname in ("env", "gui", "uts"): items, reprec = testdir.inline_genitems(tmpdir.join(dirname)) - assert [x.name for x in items] == ['test_%s' % dirname] + assert [x.name for x in items] == ["test_%s" % dirname] # changing cwd to each subdirectory and running pytest without # arguments collects the tests in that directory normally - for dirname in ('env', 'gui', 'uts'): + for dirname in ("env", "gui", "uts"): monkeypatch.chdir(testdir.tmpdir.join(dirname)) items, reprec = testdir.inline_genitems() - assert [x.name for x in items] == ['test_%s' % dirname] + assert [x.name for x in items] == ["test_%s" % dirname] class TestCollectPluginHookRelay(object): + def test_pytest_collect_file(self, testdir): wascalled = [] class Plugin(object): + def pytest_collect_file(self, path, parent): if not path.basename.startswith("."): # Ignore hidden files, e.g. .testmondata. @@ -228,12 +267,13 @@ class TestCollectPluginHookRelay(object): testdir.makefile(".abc", "xyz") pytest.main([testdir.tmpdir], plugins=[Plugin()]) assert len(wascalled) == 1 - assert wascalled[0].ext == '.abc' + assert wascalled[0].ext == ".abc" def test_pytest_collect_directory(self, testdir): wascalled = [] class Plugin(object): + def pytest_collect_directory(self, path, parent): wascalled.append(path.basename) @@ -247,10 +287,13 @@ class TestCollectPluginHookRelay(object): class TestPrunetraceback(object): def test_custom_repr_failure(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import not_exists - """) - testdir.makeconftest(""" + """ + ) + testdir.makeconftest( + """ import pytest def pytest_collect_file(path, parent): return MyFile(path, parent) @@ -263,20 +306,21 @@ class TestPrunetraceback(object): if excinfo.errisinstance(MyError): return "hello world" return pytest.File.repr_failure(self, excinfo) - """) + """ + ) result = testdir.runpytest(p) - result.stdout.fnmatch_lines([ - "*ERROR collecting*", - "*hello world*", - ]) + result.stdout.fnmatch_lines(["*ERROR collecting*", "*hello world*"]) @pytest.mark.xfail(reason="other mechanism for adding to reporting needed") def test_collect_report_postprocessing(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import not_exists - """) - testdir.makeconftest(""" + """ + ) + testdir.makeconftest( + """ import pytest @pytest.hookimpl(hookwrapper=True) def pytest_make_collect_report(): @@ -284,21 +328,22 @@ class TestPrunetraceback(object): rep = outcome.get_result() rep.headerlines += ["header1"] outcome.force_result(rep) - """) + """ + ) result = testdir.runpytest(p) - result.stdout.fnmatch_lines([ - "*ERROR collecting*", - "*header1*", - ]) + result.stdout.fnmatch_lines(["*ERROR collecting*", "*header1*"]) class TestCustomConftests(object): + def test_ignore_collect_path(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_ignore_collect(path, config): return path.basename.startswith("x") or \ path.basename == "test_one.py" - """) + """ + ) sub = testdir.mkdir("xy123") sub.ensure("test_hello.py").write("syntax error") sub.join("conftest.py").write("syntax error") @@ -309,10 +354,12 @@ class TestCustomConftests(object): result.stdout.fnmatch_lines(["*1 passed*"]) def test_ignore_collect_not_called_on_argument(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_ignore_collect(path, config): return True - """) + """ + ) p = testdir.makepyfile("def test_hello(): pass") result = testdir.runpytest(p) assert result.ret == 0 @@ -322,14 +369,16 @@ class TestCustomConftests(object): result.stdout.fnmatch_lines("*collected 0 items*") def test_collectignore_exclude_on_option(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ collect_ignore = ['hello', 'test_world.py'] def pytest_addoption(parser): parser.addoption("--XX", action="store_true", default=False) def pytest_configure(config): if config.getvalue("XX"): collect_ignore[:] = [] - """) + """ + ) testdir.mkdir("hello") testdir.makepyfile(test_world="def test_hello(): pass") result = testdir.runpytest() @@ -340,55 +389,55 @@ class TestCustomConftests(object): assert "passed" in result.stdout.str() def test_pytest_fs_collect_hooks_are_seen(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest class MyModule(pytest.Module): pass def pytest_collect_file(path, parent): if path.ext == ".py": return MyModule(path, parent) - """) + """ + ) testdir.mkdir("sub") testdir.makepyfile("def test_x(): pass") result = testdir.runpytest("--collect-only") - result.stdout.fnmatch_lines([ - "*MyModule*", - "*test_x*" - ]) + result.stdout.fnmatch_lines(["*MyModule*", "*test_x*"]) def test_pytest_collect_file_from_sister_dir(self, testdir): sub1 = testdir.mkpydir("sub1") sub2 = testdir.mkpydir("sub2") - conf1 = testdir.makeconftest(""" + conf1 = testdir.makeconftest( + """ import pytest class MyModule1(pytest.Module): pass def pytest_collect_file(path, parent): if path.ext == ".py": return MyModule1(path, parent) - """) + """ + ) conf1.move(sub1.join(conf1.basename)) - conf2 = testdir.makeconftest(""" + conf2 = testdir.makeconftest( + """ import pytest class MyModule2(pytest.Module): pass def pytest_collect_file(path, parent): if path.ext == ".py": return MyModule2(path, parent) - """) + """ + ) conf2.move(sub2.join(conf2.basename)) p = testdir.makepyfile("def test_x(): pass") p.copy(sub1.join(p.basename)) p.copy(sub2.join(p.basename)) result = testdir.runpytest("--collect-only") - result.stdout.fnmatch_lines([ - "*MyModule1*", - "*MyModule2*", - "*test_x*" - ]) + result.stdout.fnmatch_lines(["*MyModule1*", "*MyModule2*", "*test_x*"]) class TestSession(object): + def test_parsearg(self, testdir): p = testdir.makepyfile("def test_func(): pass") subdir = testdir.mkdir("sub") @@ -425,9 +474,13 @@ class TestSession(object): def get_reported_items(self, hookrec): """Return pytest.Item instances reported by the pytest_collectreport hook""" - calls = hookrec.getcalls('pytest_collectreport') - return [x for call in calls for x in call.report.result - if isinstance(x, pytest.Item)] + calls = hookrec.getcalls("pytest_collectreport") + return [ + x + for call in calls + for x in call.report.result + if isinstance(x, pytest.Item) + ] def test_collect_protocol_single_function(self, testdir): p = testdir.makepyfile("def test_func(): pass") @@ -439,40 +492,46 @@ class TestSession(object): assert newid == id pprint.pprint(hookrec.calls) topdir = testdir.tmpdir # noqa - hookrec.assert_contains([ - ("pytest_collectstart", "collector.fspath == topdir"), - ("pytest_make_collect_report", "collector.fspath == topdir"), - ("pytest_collectstart", "collector.fspath == p"), - ("pytest_make_collect_report", "collector.fspath == p"), - ("pytest_pycollect_makeitem", "name == 'test_func'"), - ("pytest_collectreport", "report.result[0].name == 'test_func'"), - ]) + hookrec.assert_contains( + [ + ("pytest_collectstart", "collector.fspath == topdir"), + ("pytest_make_collect_report", "collector.fspath == topdir"), + ("pytest_collectstart", "collector.fspath == p"), + ("pytest_make_collect_report", "collector.fspath == p"), + ("pytest_pycollect_makeitem", "name == 'test_func'"), + ("pytest_collectreport", "report.result[0].name == 'test_func'"), + ] + ) # ensure we are reporting the collection of the single test item (#2464) - assert [x.name for x in self.get_reported_items(hookrec)] == ['test_func'] + assert [x.name for x in self.get_reported_items(hookrec)] == ["test_func"] def test_collect_protocol_method(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ class TestClass(object): def test_method(self): pass - """) + """ + ) normid = p.basename + "::TestClass::()::test_method" - for id in [p.basename, - p.basename + "::TestClass", - p.basename + "::TestClass::()", - normid, - ]: + for id in [ + p.basename, + p.basename + "::TestClass", + p.basename + "::TestClass::()", + normid, + ]: items, hookrec = testdir.inline_genitems(id) assert len(items) == 1 assert items[0].name == "test_method" newid = items[0].nodeid assert newid == normid # ensure we are reporting the collection of the single test item (#2464) - assert [x.name for x in self.get_reported_items(hookrec)] == ['test_method'] + assert [x.name for x in self.get_reported_items(hookrec)] == ["test_method"] def test_collect_custom_nodes_multi_id(self, testdir): p = testdir.makepyfile("def test_func(): pass") - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest class SpecialItem(pytest.Item): def runtest(self): @@ -483,22 +542,26 @@ class TestSession(object): def pytest_collect_file(path, parent): if path.basename == %r: return SpecialFile(fspath=path, parent=parent) - """ % p.basename) + """ + % p.basename + ) id = p.basename items, hookrec = testdir.inline_genitems(id) pprint.pprint(hookrec.calls) assert len(items) == 2 - hookrec.assert_contains([ - ("pytest_collectstart", - "collector.fspath == collector.session.fspath"), - ("pytest_collectstart", - "collector.__class__.__name__ == 'SpecialFile'"), - ("pytest_collectstart", - "collector.__class__.__name__ == 'Module'"), - ("pytest_pycollect_makeitem", "name == 'test_func'"), - ("pytest_collectreport", "report.nodeid.startswith(p.basename)"), - ]) + hookrec.assert_contains( + [ + ("pytest_collectstart", "collector.fspath == collector.session.fspath"), + ( + "pytest_collectstart", + "collector.__class__.__name__ == 'SpecialFile'", + ), + ("pytest_collectstart", "collector.__class__.__name__ == 'Module'"), + ("pytest_pycollect_makeitem", "name == 'test_func'"), + ("pytest_collectreport", "report.nodeid.startswith(p.basename)"), + ] + ) assert len(self.get_reported_items(hookrec)) == 2 def test_collect_subdir_event_ordering(self, testdir): @@ -510,12 +573,13 @@ class TestSession(object): items, hookrec = testdir.inline_genitems() assert len(items) == 1 pprint.pprint(hookrec.calls) - hookrec.assert_contains([ - ("pytest_collectstart", "collector.fspath == test_aaa"), - ("pytest_pycollect_makeitem", "name == 'test_func'"), - ("pytest_collectreport", - "report.nodeid.startswith('aaa/test_aaa.py')"), - ]) + hookrec.assert_contains( + [ + ("pytest_collectstart", "collector.fspath == test_aaa"), + ("pytest_pycollect_makeitem", "name == 'test_func'"), + ("pytest_collectreport", "report.nodeid.startswith('aaa/test_aaa.py')"), + ] + ) def test_collect_two_commandline_args(self, testdir): p = testdir.makepyfile("def test_func(): pass") @@ -531,14 +595,16 @@ class TestSession(object): items, hookrec = testdir.inline_genitems(id) assert len(items) == 2 pprint.pprint(hookrec.calls) - hookrec.assert_contains([ - ("pytest_collectstart", "collector.fspath == test_aaa"), - ("pytest_pycollect_makeitem", "name == 'test_func'"), - ("pytest_collectreport", "report.nodeid == 'aaa/test_aaa.py'"), - ("pytest_collectstart", "collector.fspath == test_bbb"), - ("pytest_pycollect_makeitem", "name == 'test_func'"), - ("pytest_collectreport", "report.nodeid == 'bbb/test_bbb.py'"), - ]) + hookrec.assert_contains( + [ + ("pytest_collectstart", "collector.fspath == test_aaa"), + ("pytest_pycollect_makeitem", "name == 'test_func'"), + ("pytest_collectreport", "report.nodeid == 'aaa/test_aaa.py'"), + ("pytest_collectstart", "collector.fspath == test_bbb"), + ("pytest_pycollect_makeitem", "name == 'test_func'"), + ("pytest_collectreport", "report.nodeid == 'bbb/test_bbb.py'"), + ] + ) def test_serialization_byid(self, testdir): testdir.makepyfile("def test_func(): pass") @@ -551,28 +617,31 @@ class TestSession(object): assert item2.fspath == item.fspath def test_find_byid_without_instance_parents(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ class TestClass(object): def test_method(self): pass - """) + """ + ) arg = p.basename + "::TestClass::test_method" items, hookrec = testdir.inline_genitems(arg) assert len(items) == 1 item, = items assert item.nodeid.endswith("TestClass::()::test_method") # ensure we are reporting the collection of the single test item (#2464) - assert [x.name for x in self.get_reported_items(hookrec)] == ['test_method'] + assert [x.name for x in self.get_reported_items(hookrec)] == ["test_method"] class Test_getinitialnodes(object): + def test_global_file(self, testdir, tmpdir): x = tmpdir.ensure("x.py") with tmpdir.as_cwd(): config = testdir.parseconfigure(x) col = testdir.getnode(config, x) assert isinstance(col, pytest.Module) - assert col.name == 'x.py' + assert col.name == "x.py" assert col.parent.parent is None for col in col.listchain(): assert col.config is config @@ -586,21 +655,24 @@ class Test_getinitialnodes(object): config = testdir.parseconfigure(x) col = testdir.getnode(config, x) assert isinstance(col, pytest.Module) - assert col.name == 'x.py' + assert col.name == "x.py" assert col.parent.parent is None for col in col.listchain(): assert col.config is config class Test_genitems(object): + def test_check_collect_hashes(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def test_1(): pass def test_2(): pass - """) + """ + ) p.copy(p.dirpath(p.purebasename + "2" + ".py")) items, reprec = testdir.inline_genitems(p.dirpath()) assert len(items) == 4 @@ -611,7 +683,8 @@ class Test_genitems(object): assert i != j def test_example_items1(self, testdir): - p = testdir.makepyfile(''' + p = testdir.makepyfile( + """ def testone(): pass @@ -621,12 +694,13 @@ class Test_genitems(object): class TestY(TestX): pass - ''') + """ + ) items, reprec = testdir.inline_genitems(p) assert len(items) == 3 - assert items[0].name == 'testone' - assert items[1].name == 'testmethod_one' - assert items[2].name == 'testmethod_one' + assert items[0].name == "testone" + assert items[1].name == "testmethod_one" + assert items[2].name == "testmethod_one" # let's also test getmodpath here assert items[0].getmodpath() == "testone" @@ -642,12 +716,15 @@ class Test_genitems(object): tests that python_classes and python_functions config options work as prefixes and glob-like patterns (issue #600). """ - testdir.makeini(""" + testdir.makeini( + """ [pytest] python_classes = *Suite Test python_functions = *_test test - """) - p = testdir.makepyfile(''' + """ + ) + p = testdir.makepyfile( + """ class MyTestSuite(object): def x_test(self): pass @@ -655,14 +732,16 @@ class Test_genitems(object): class TestCase(object): def test_y(self): pass - ''') + """ + ) items, reprec = testdir.inline_genitems(p) ids = [x.getmodpath() for x in items] - assert ids == ['MyTestSuite.x_test', 'TestCase.test_y'] + assert ids == ["MyTestSuite.x_test", "TestCase.test_y"] def test_matchnodes_two_collections_same_file(testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest def pytest_configure(config): config.pluginmanager.register(Plugin2()) @@ -686,25 +765,25 @@ def test_matchnodes_two_collections_same_file(testdir): class Item2(pytest.Item): def runtest(self): pass - """) + """ + ) p = testdir.makefile(".abc", "") result = testdir.runpytest() assert result.ret == 0 - result.stdout.fnmatch_lines([ - "*2 passed*", - ]) + result.stdout.fnmatch_lines(["*2 passed*"]) res = testdir.runpytest("%s::hello" % p.basename) - res.stdout.fnmatch_lines([ - "*1 passed*", - ]) + res.stdout.fnmatch_lines(["*1 passed*"]) class TestNodekeywords(object): + def test_no_under(self, testdir): - modcol = testdir.getmodulecol(""" + modcol = testdir.getmodulecol( + """ def test_pass(): pass def test_fail(): assert 0 - """) + """ + ) values = list(modcol.keywords) assert modcol.name in values for x in values: @@ -712,13 +791,15 @@ class TestNodekeywords(object): assert modcol.name in repr(modcol.keywords) def test_issue345(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_should_not_be_selected(): assert False, 'I should not have been selected to run' def test___repr__(): pass - """) + """ + ) reprec = testdir.inline_run("-k repr") reprec.assertoutcome(passed=1, failed=0) @@ -752,13 +833,15 @@ def test_exit_on_collection_error(testdir): res = testdir.runpytest() assert res.ret == 2 - res.stdout.fnmatch_lines([ - "collected 2 items / 2 errors", - "*ERROR collecting test_02_import_error.py*", - "*No module named *asdfa*", - "*ERROR collecting test_03_import_error.py*", - "*No module named *asdfa*", - ]) + res.stdout.fnmatch_lines( + [ + "collected 2 items / 2 errors", + "*ERROR collecting test_02_import_error.py*", + "*No module named *asdfa*", + "*ERROR collecting test_03_import_error.py*", + "*No module named *asdfa*", + ] + ) def test_exit_on_collection_with_maxfail_smaller_than_n_errors(testdir): @@ -771,12 +854,11 @@ def test_exit_on_collection_with_maxfail_smaller_than_n_errors(testdir): res = testdir.runpytest("--maxfail=1") assert res.ret == 1 - res.stdout.fnmatch_lines([ - "*ERROR collecting test_02_import_error.py*", - "*No module named *asdfa*", - ]) + res.stdout.fnmatch_lines( + ["*ERROR collecting test_02_import_error.py*", "*No module named *asdfa*"] + ) - assert 'test_03' not in res.stdout.str() + assert "test_03" not in res.stdout.str() def test_exit_on_collection_with_maxfail_bigger_than_n_errors(testdir): @@ -789,13 +871,15 @@ def test_exit_on_collection_with_maxfail_bigger_than_n_errors(testdir): res = testdir.runpytest("--maxfail=4") assert res.ret == 2 - res.stdout.fnmatch_lines([ - "collected 2 items / 2 errors", - "*ERROR collecting test_02_import_error.py*", - "*No module named *asdfa*", - "*ERROR collecting test_03_import_error.py*", - "*No module named *asdfa*", - ]) + res.stdout.fnmatch_lines( + [ + "collected 2 items / 2 errors", + "*ERROR collecting test_02_import_error.py*", + "*No module named *asdfa*", + "*ERROR collecting test_03_import_error.py*", + "*No module named *asdfa*", + ] + ) def test_continue_on_collection_errors(testdir): @@ -808,10 +892,9 @@ def test_continue_on_collection_errors(testdir): res = testdir.runpytest("--continue-on-collection-errors") assert res.ret == 1 - res.stdout.fnmatch_lines([ - "collected 2 items / 2 errors", - "*1 failed, 1 passed, 2 error*", - ]) + res.stdout.fnmatch_lines( + ["collected 2 items / 2 errors", "*1 failed, 1 passed, 2 error*"] + ) def test_continue_on_collection_errors_maxfail(testdir): @@ -827,21 +910,22 @@ def test_continue_on_collection_errors_maxfail(testdir): res = testdir.runpytest("--continue-on-collection-errors", "--maxfail=3") assert res.ret == 1 - res.stdout.fnmatch_lines([ - "collected 2 items / 2 errors", - "*1 failed, 2 error*", - ]) + res.stdout.fnmatch_lines(["collected 2 items / 2 errors", "*1 failed, 2 error*"]) def test_fixture_scope_sibling_conftests(testdir): """Regression test case for https://github.com/pytest-dev/pytest/issues/2836""" foo_path = testdir.mkpydir("foo") - foo_path.join("conftest.py").write(_pytest._code.Source(""" + foo_path.join("conftest.py").write( + _pytest._code.Source( + """ import pytest @pytest.fixture def fix(): return 1 - """)) + """ + ) + ) foo_path.join("test_foo.py").write("def test_foo(fix): assert fix == 1") # Tests in `food/` should not see the conftest fixture from `foo/` @@ -851,8 +935,10 @@ def test_fixture_scope_sibling_conftests(testdir): res = testdir.runpytest() assert res.ret == 1 - res.stdout.fnmatch_lines([ - "*ERROR at setup of test_food*", - "E*fixture 'fix' not found", - "*1 passed, 1 error*", - ]) + res.stdout.fnmatch_lines( + [ + "*ERROR at setup of test_food*", + "E*fixture 'fix' not found", + "*1 passed, 1 error*", + ] + ) diff --git a/testing/test_compat.py b/testing/test_compat.py index c74801c6c..550a8f1b3 100644 --- a/testing/test_compat.py +++ b/testing/test_compat.py @@ -7,6 +7,7 @@ from _pytest.outcomes import OutcomeException def test_is_generator(): + def zap(): yield @@ -20,6 +21,7 @@ def test_is_generator(): def test_real_func_loop_limit(): class Evil(object): + def __init__(self): self.left = 1000 @@ -28,7 +30,7 @@ def test_real_func_loop_limit(): def __getattr__(self, attr): if not self.left: - raise RuntimeError('its over') + raise RuntimeError("its over") self.left -= 1 return self @@ -39,10 +41,12 @@ def test_real_func_loop_limit(): print(res) -@pytest.mark.skipif(sys.version_info < (3, 4), - reason='asyncio available in Python 3.4+') +@pytest.mark.skipif( + sys.version_info < (3, 4), reason="asyncio available in Python 3.4+" +) def test_is_generator_asyncio(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ from _pytest.compat import is_generator import asyncio @asyncio.coroutine @@ -51,17 +55,20 @@ def test_is_generator_asyncio(testdir): def test_is_generator_asyncio(): assert not is_generator(baz) - """) + """ + ) # avoid importing asyncio into pytest's own process, # which in turn imports logging (#8) result = testdir.runpytest_subprocess() - result.stdout.fnmatch_lines(['*1 passed*']) + result.stdout.fnmatch_lines(["*1 passed*"]) -@pytest.mark.skipif(sys.version_info < (3, 5), - reason='async syntax available in Python 3.5+') +@pytest.mark.skipif( + sys.version_info < (3, 5), reason="async syntax available in Python 3.5+" +) def test_is_generator_async_syntax(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ from _pytest.compat import is_generator def test_is_generator_py35(): async def foo(): @@ -72,19 +79,21 @@ def test_is_generator_async_syntax(testdir): assert not is_generator(foo) assert not is_generator(bar) - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(['*1 passed*']) + result.stdout.fnmatch_lines(["*1 passed*"]) class ErrorsHelper(object): + @property def raise_exception(self): - raise Exception('exception should be catched') + raise Exception("exception should be catched") @property def raise_fail(self): - pytest.fail('fail should be catched') + pytest.fail("fail should be catched") def test_helper_failures(): @@ -97,5 +106,5 @@ def test_helper_failures(): def test_safe_getattr(): helper = ErrorsHelper() - assert safe_getattr(helper, 'raise_exception', 'default') == 'default' - assert safe_getattr(helper, 'raise_fail', 'default') == 'default' + assert safe_getattr(helper, "raise_exception", "default") == "default" + assert safe_getattr(helper, "raise_fail", "default") == "default" diff --git a/testing/test_config.py b/testing/test_config.py index 39105f5d6..56a51514d 100644 --- a/testing/test_config.py +++ b/testing/test_config.py @@ -4,76 +4,106 @@ import textwrap import pytest import _pytest._code -from _pytest.config import getcfg, get_common_ancestor, determine_setup, _iter_rewritable_modules +from _pytest.config import ( + getcfg, + get_common_ancestor, + determine_setup, + _iter_rewritable_modules, +) from _pytest.main import EXIT_NOTESTSCOLLECTED class TestParseIni(object): - @pytest.mark.parametrize('section, filename', - [('pytest', 'pytest.ini'), ('tool:pytest', 'setup.cfg')]) + @pytest.mark.parametrize( + "section, filename", [("pytest", "pytest.ini"), ("tool:pytest", "setup.cfg")] + ) def test_getcfg_and_config(self, testdir, tmpdir, section, filename): sub = tmpdir.mkdir("sub") sub.chdir() - tmpdir.join(filename).write(_pytest._code.Source(""" + tmpdir.join(filename).write( + _pytest._code.Source( + """ [{section}] name = value - """.format(section=section))) + """.format( + section=section + ) + ) + ) rootdir, inifile, cfg = getcfg([sub]) - assert cfg['name'] == "value" + assert cfg["name"] == "value" config = testdir.parseconfigure(sub) - assert config.inicfg['name'] == 'value' + assert config.inicfg["name"] == "value" def test_getcfg_empty_path(self): """correctly handle zero length arguments (a la pytest '')""" - getcfg(['']) + getcfg([""]) def test_append_parse_args(self, testdir, tmpdir, monkeypatch): - monkeypatch.setenv('PYTEST_ADDOPTS', '--color no -rs --tb="short"') - tmpdir.join("pytest.ini").write(_pytest._code.Source(""" + monkeypatch.setenv("PYTEST_ADDOPTS", '--color no -rs --tb="short"') + tmpdir.join("pytest.ini").write( + _pytest._code.Source( + """ [pytest] addopts = --verbose - """)) + """ + ) + ) config = testdir.parseconfig(tmpdir) - assert config.option.color == 'no' - assert config.option.reportchars == 's' - assert config.option.tbstyle == 'short' + assert config.option.color == "no" + assert config.option.reportchars == "s" + assert config.option.tbstyle == "short" assert config.option.verbose def test_tox_ini_wrong_version(self, testdir): - testdir.makefile('.ini', tox=""" + testdir.makefile( + ".ini", + tox=""" [pytest] minversion=9.0 - """) + """, + ) result = testdir.runpytest() assert result.ret != 0 - result.stderr.fnmatch_lines([ - "*tox.ini:2*requires*9.0*actual*" - ]) + result.stderr.fnmatch_lines(["*tox.ini:2*requires*9.0*actual*"]) - @pytest.mark.parametrize("section, name", [ - ('tool:pytest', 'setup.cfg'), - ('pytest', 'tox.ini'), - ('pytest', 'pytest.ini')], + @pytest.mark.parametrize( + "section, name", + [("tool:pytest", "setup.cfg"), ("pytest", "tox.ini"), ("pytest", "pytest.ini")], ) def test_ini_names(self, testdir, name, section): - testdir.tmpdir.join(name).write(textwrap.dedent(""" + testdir.tmpdir.join(name).write( + textwrap.dedent( + """ [{section}] minversion = 1.0 - """.format(section=section))) + """.format( + section=section + ) + ) + ) config = testdir.parseconfig() assert config.getini("minversion") == "1.0" def test_toxini_before_lower_pytestini(self, testdir): sub = testdir.tmpdir.mkdir("sub") - sub.join("tox.ini").write(textwrap.dedent(""" + sub.join("tox.ini").write( + textwrap.dedent( + """ [pytest] minversion = 2.0 - """)) - testdir.tmpdir.join("pytest.ini").write(textwrap.dedent(""" + """ + ) + ) + testdir.tmpdir.join("pytest.ini").write( + textwrap.dedent( + """ [pytest] minversion = 1.5 - """)) + """ + ) + ) config = testdir.parseconfigure(sub) assert config.getini("minversion") == "2.0" @@ -81,54 +111,72 @@ class TestParseIni(object): def test_confcutdir(self, testdir): sub = testdir.mkdir("sub") sub.chdir() - testdir.makeini(""" + testdir.makeini( + """ [pytest] addopts = --qwe - """) + """ + ) result = testdir.inline_run("--confcutdir=.") assert result.ret == 0 class TestConfigCmdlineParsing(object): + def test_parsing_again_fails(self, testdir): config = testdir.parseconfig() pytest.raises(AssertionError, lambda: config.parse([])) def test_explicitly_specified_config_file_is_loaded(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_addoption(parser): parser.addini("custom", "") - """) - testdir.makeini(""" + """ + ) + testdir.makeini( + """ [pytest] custom = 0 - """) - testdir.makefile(".cfg", custom=""" + """ + ) + testdir.makefile( + ".cfg", + custom=""" [pytest] custom = 1 - """) + """, + ) config = testdir.parseconfig("-c", "custom.cfg") assert config.getini("custom") == "1" - testdir.makefile(".cfg", custom_tool_pytest_section=""" + testdir.makefile( + ".cfg", + custom_tool_pytest_section=""" [tool:pytest] custom = 1 - """) + """, + ) config = testdir.parseconfig("-c", "custom_tool_pytest_section.cfg") assert config.getini("custom") == "1" def test_absolute_win32_path(self, testdir): - temp_cfg_file = testdir.makefile(".cfg", custom=""" + temp_cfg_file = testdir.makefile( + ".cfg", + custom=""" [pytest] addopts = --version - """) + """, + ) from os.path import normpath + temp_cfg_file = normpath(str(temp_cfg_file)) ret = pytest.main("-c " + temp_cfg_file) assert ret == _pytest.main.EXIT_OK class TestConfigAPI(object): + def test_config_trace(self, testdir): config = testdir.parseconfig() values = [] @@ -138,46 +186,51 @@ class TestConfigAPI(object): assert values[0] == "hello [config]\n" def test_config_getoption(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_addoption(parser): parser.addoption("--hello", "-X", dest="hello") - """) + """ + ) config = testdir.parseconfig("--hello=this") for x in ("hello", "--hello", "-X"): assert config.getoption(x) == "this" pytest.raises(ValueError, "config.getoption('qweqwe')") - @pytest.mark.skipif('sys.version_info[0] < 3') + @pytest.mark.skipif("sys.version_info[0] < 3") def test_config_getoption_unicode(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ from __future__ import unicode_literals def pytest_addoption(parser): parser.addoption('--hello', type=str) - """) - config = testdir.parseconfig('--hello=this') - assert config.getoption('hello') == 'this' + """ + ) + config = testdir.parseconfig("--hello=this") + assert config.getoption("hello") == "this" def test_config_getvalueorskip(self, testdir): config = testdir.parseconfig() - pytest.raises(pytest.skip.Exception, - "config.getvalueorskip('hello')") + pytest.raises(pytest.skip.Exception, "config.getvalueorskip('hello')") verbose = config.getvalueorskip("verbose") assert verbose == config.option.verbose def test_config_getvalueorskip_None(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_addoption(parser): parser.addoption("--hello") - """) + """ + ) config = testdir.parseconfig() with pytest.raises(pytest.skip.Exception): - config.getvalueorskip('hello') + config.getvalueorskip("hello") def test_getoption(self, testdir): config = testdir.parseconfig() with pytest.raises(ValueError): - config.getvalue('x') + config.getvalue("x") assert config.getoption("x", 1) == 1 def test_getconftest_pathlist(self, testdir, tmpdir): @@ -185,54 +238,66 @@ class TestConfigAPI(object): p = tmpdir.join("conftest.py") p.write("pathlist = ['.', %r]" % str(somepath)) config = testdir.parseconfigure(p) - assert config._getconftest_pathlist('notexist', path=tmpdir) is None - pl = config._getconftest_pathlist('pathlist', path=tmpdir) + assert config._getconftest_pathlist("notexist", path=tmpdir) is None + pl = config._getconftest_pathlist("pathlist", path=tmpdir) print(pl) assert len(pl) == 2 assert pl[0] == tmpdir assert pl[1] == somepath def test_addini(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_addoption(parser): parser.addini("myname", "my new ini value") - """) - testdir.makeini(""" + """ + ) + testdir.makeini( + """ [pytest] myname=hello - """) + """ + ) config = testdir.parseconfig() val = config.getini("myname") assert val == "hello" - pytest.raises(ValueError, config.getini, 'other') + pytest.raises(ValueError, config.getini, "other") def test_addini_pathlist(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_addoption(parser): parser.addini("paths", "my new ini value", type="pathlist") parser.addini("abc", "abc value") - """) - p = testdir.makeini(""" + """ + ) + p = testdir.makeini( + """ [pytest] paths=hello world/sub.py - """) + """ + ) config = testdir.parseconfig() values = config.getini("paths") assert len(values) == 2 - assert values[0] == p.dirpath('hello') - assert values[1] == p.dirpath('world/sub.py') - pytest.raises(ValueError, config.getini, 'other') + assert values[0] == p.dirpath("hello") + assert values[1] == p.dirpath("world/sub.py") + pytest.raises(ValueError, config.getini, "other") def test_addini_args(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_addoption(parser): parser.addini("args", "new args", type="args") parser.addini("a2", "", "args", default="1 2 3".split()) - """) - testdir.makeini(""" + """ + ) + testdir.makeini( + """ [pytest] args=123 "123 hello" "this" - """) + """ + ) config = testdir.parseconfig() values = config.getini("args") assert len(values) == 3 @@ -241,16 +306,20 @@ class TestConfigAPI(object): assert values == list("123") def test_addini_linelist(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_addoption(parser): parser.addini("xy", "", type="linelist") parser.addini("a2", "", "linelist") - """) - testdir.makeini(""" + """ + ) + testdir.makeini( + """ [pytest] xy= 123 345 second line - """) + """ + ) config = testdir.parseconfig() values = config.getini("xy") assert len(values) == 2 @@ -258,30 +327,40 @@ class TestConfigAPI(object): values = config.getini("a2") assert values == [] - @pytest.mark.parametrize('str_val, bool_val', - [('True', True), ('no', False), ('no-ini', True)]) + @pytest.mark.parametrize( + "str_val, bool_val", [("True", True), ("no", False), ("no-ini", True)] + ) def test_addini_bool(self, testdir, str_val, bool_val): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_addoption(parser): parser.addini("strip", "", type="bool", default=True) - """) - if str_val != 'no-ini': - testdir.makeini(""" + """ + ) + if str_val != "no-ini": + testdir.makeini( + """ [pytest] strip=%s - """ % str_val) + """ + % str_val + ) config = testdir.parseconfig() assert config.getini("strip") is bool_val def test_addinivalue_line_existing(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_addoption(parser): parser.addini("xy", "", type="linelist") - """) - testdir.makeini(""" + """ + ) + testdir.makeini( + """ [pytest] xy= 123 - """) + """ + ) config = testdir.parseconfig() values = config.getini("xy") assert len(values) == 1 @@ -292,10 +371,12 @@ class TestConfigAPI(object): assert values == ["123", "456"] def test_addinivalue_line_new(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_addoption(parser): parser.addini("xy", "", type="linelist") - """) + """ + ) config = testdir.parseconfig() assert not config.getini("xy") config.addinivalue_line("xy", "456") @@ -310,99 +391,124 @@ class TestConfigAPI(object): def test_confcutdir_check_isdir(self, testdir): """Give an error if --confcutdir is not a valid directory (#2078)""" with pytest.raises(pytest.UsageError): - testdir.parseconfig('--confcutdir', testdir.tmpdir.join('file').ensure(file=1)) + testdir.parseconfig( + "--confcutdir", testdir.tmpdir.join("file").ensure(file=1) + ) with pytest.raises(pytest.UsageError): - testdir.parseconfig('--confcutdir', testdir.tmpdir.join('inexistant')) - config = testdir.parseconfig('--confcutdir', testdir.tmpdir.join('dir').ensure(dir=1)) - assert config.getoption('confcutdir') == str(testdir.tmpdir.join('dir')) + testdir.parseconfig("--confcutdir", testdir.tmpdir.join("inexistant")) + config = testdir.parseconfig( + "--confcutdir", testdir.tmpdir.join("dir").ensure(dir=1) + ) + assert config.getoption("confcutdir") == str(testdir.tmpdir.join("dir")) - @pytest.mark.parametrize('names, expected', [ - (['bar.py'], ['bar']), - (['foo', 'bar.py'], []), - (['foo', 'bar.pyc'], []), - (['foo', '__init__.py'], ['foo']), - (['foo', 'bar', '__init__.py'], []), - ]) + @pytest.mark.parametrize( + "names, expected", + [ + (["bar.py"], ["bar"]), + (["foo", "bar.py"], []), + (["foo", "bar.pyc"], []), + (["foo", "__init__.py"], ["foo"]), + (["foo", "bar", "__init__.py"], []), + ], + ) def test_iter_rewritable_modules(self, names, expected): - assert list(_iter_rewritable_modules(['/'.join(names)])) == expected + assert list(_iter_rewritable_modules(["/".join(names)])) == expected class TestConfigFromdictargs(object): + def test_basic_behavior(self): from _pytest.config import Config - option_dict = { - 'verbose': 444, - 'foo': 'bar', - 'capture': 'no', - } - args = ['a', 'b'] + + option_dict = {"verbose": 444, "foo": "bar", "capture": "no"} + args = ["a", "b"] config = Config.fromdictargs(option_dict, args) with pytest.raises(AssertionError): - config.parse(['should refuse to parse again']) + config.parse(["should refuse to parse again"]) assert config.option.verbose == 444 - assert config.option.foo == 'bar' - assert config.option.capture == 'no' + assert config.option.foo == "bar" + assert config.option.capture == "no" assert config.args == args def test_origargs(self): """Show that fromdictargs can handle args in their "orig" format""" from _pytest.config import Config + option_dict = {} - args = ['-vvvv', '-s', 'a', 'b'] + args = ["-vvvv", "-s", "a", "b"] config = Config.fromdictargs(option_dict, args) - assert config.args == ['a', 'b'] + assert config.args == ["a", "b"] assert config._origargs == args assert config.option.verbose == 4 - assert config.option.capture == 'no' + assert config.option.capture == "no" def test_inifilename(self, tmpdir): - tmpdir.join("foo/bar.ini").ensure().write(_pytest._code.Source(""" + tmpdir.join("foo/bar.ini").ensure().write( + _pytest._code.Source( + """ [pytest] name = value - """)) + """ + ) + ) from _pytest.config import Config - inifile = '../../foo/bar.ini' - option_dict = { - 'inifilename': inifile, - 'capture': 'no', - } - cwd = tmpdir.join('a/b') - cwd.join('pytest.ini').ensure().write(_pytest._code.Source(""" + inifile = "../../foo/bar.ini" + option_dict = {"inifilename": inifile, "capture": "no"} + + cwd = tmpdir.join("a/b") + cwd.join("pytest.ini").ensure().write( + _pytest._code.Source( + """ [pytest] name = wrong-value should_not_be_set = true - """)) + """ + ) + ) with cwd.ensure(dir=True).as_cwd(): config = Config.fromdictargs(option_dict, ()) assert config.args == [str(cwd)] assert config.option.inifilename == inifile - assert config.option.capture == 'no' + assert config.option.capture == "no" # this indicates this is the file used for getting configuration values assert config.inifile == inifile - assert config.inicfg.get('name') == 'value' - assert config.inicfg.get('should_not_be_set') is None + assert config.inicfg.get("name") == "value" + assert config.inicfg.get("should_not_be_set") is None def test_options_on_small_file_do_not_blow_up(testdir): + def runfiletest(opts): reprec = testdir.inline_run(*opts) passed, skipped, failed = reprec.countoutcomes() assert failed == 2 assert skipped == passed == 0 - path = testdir.makepyfile(""" + + path = testdir.makepyfile( + """ def test_f1(): assert 0 def test_f2(): assert 0 - """) + """ + ) - for opts in ([], ['-l'], ['-s'], ['--tb=no'], ['--tb=short'], - ['--tb=long'], ['--fulltrace'], - ['--traceconfig'], ['-v'], ['-v', '-v']): + for opts in ( + [], + ["-l"], + ["-s"], + ["--tb=no"], + ["--tb=short"], + ["--tb=long"], + ["--fulltrace"], + ["--traceconfig"], + ["-v"], + ["-v", "-v"], + ): runfiletest(opts + [path]) @@ -413,27 +519,31 @@ def test_preparse_ordering_with_setuptools(testdir, monkeypatch): assert name == "pytest11" class Dist(object): - project_name = 'spam' - version = '1.0' + project_name = "spam" + version = "1.0" def _get_metadata(self, name): - return ['foo.txt,sha256=abc,123'] + return ["foo.txt,sha256=abc,123"] class EntryPoint(object): name = "mytestplugin" dist = Dist() def load(self): + class PseudoPlugin(object): x = 42 + return PseudoPlugin() return iter([EntryPoint()]) - monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter) - testdir.makeconftest(""" + monkeypatch.setattr(pkg_resources, "iter_entry_points", my_iter) + testdir.makeconftest( + """ pytest_plugins = "mytestplugin", - """) + """ + ) monkeypatch.setenv("PYTEST_PLUGINS", "mytestplugin") config = testdir.parseconfig() plugin = config.pluginmanager.getplugin("mytestplugin") @@ -447,11 +557,11 @@ def test_setuptools_importerror_issue1479(testdir, monkeypatch): assert name == "pytest11" class Dist(object): - project_name = 'spam' - version = '1.0' + project_name = "spam" + version = "1.0" def _get_metadata(self, name): - return ['foo.txt,sha256=abc,123'] + return ["foo.txt,sha256=abc,123"] class EntryPoint(object): name = "mytestplugin" @@ -462,12 +572,12 @@ def test_setuptools_importerror_issue1479(testdir, monkeypatch): return iter([EntryPoint()]) - monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter) + monkeypatch.setattr(pkg_resources, "iter_entry_points", my_iter) with pytest.raises(ImportError): testdir.parseconfig() -@pytest.mark.parametrize('block_it', [True, False]) +@pytest.mark.parametrize("block_it", [True, False]) def test_plugin_preparse_prevents_setuptools_loading(testdir, monkeypatch, block_it): pkg_resources = pytest.importorskip("pkg_resources") @@ -477,11 +587,11 @@ def test_plugin_preparse_prevents_setuptools_loading(testdir, monkeypatch, block assert name == "pytest11" class Dist(object): - project_name = 'spam' - version = '1.0' + project_name = "spam" + version = "1.0" def _get_metadata(self, name): - return ['foo.txt,sha256=abc,123'] + return ["foo.txt,sha256=abc,123"] class EntryPoint(object): name = "mytestplugin" @@ -492,67 +602,75 @@ def test_plugin_preparse_prevents_setuptools_loading(testdir, monkeypatch, block return iter([EntryPoint()]) - monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter) + monkeypatch.setattr(pkg_resources, "iter_entry_points", my_iter) args = ("-p", "no:mytestplugin") if block_it else () config = testdir.parseconfig(*args) config.pluginmanager.import_plugin("mytestplugin") if block_it: assert "mytestplugin" not in sys.modules - assert config.pluginmanager.get_plugin('mytestplugin') is None + assert config.pluginmanager.get_plugin("mytestplugin") is None else: - assert config.pluginmanager.get_plugin('mytestplugin') is plugin_module_placeholder + assert config.pluginmanager.get_plugin( + "mytestplugin" + ) is plugin_module_placeholder def test_cmdline_processargs_simple(testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_cmdline_preparse(args): args.append("-h") - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*pytest*", - "*-h*", - ]) + result.stdout.fnmatch_lines(["*pytest*", "*-h*"]) def test_invalid_options_show_extra_information(testdir): """display extra information when pytest exits due to unrecognized options in the command-line""" - testdir.makeini(""" + testdir.makeini( + """ [pytest] addopts = --invalid-option - """) + """ + ) result = testdir.runpytest() - result.stderr.fnmatch_lines([ - "*error: unrecognized arguments: --invalid-option*", - "* inifile: %s*" % testdir.tmpdir.join('tox.ini'), - "* rootdir: %s*" % testdir.tmpdir, - ]) + result.stderr.fnmatch_lines( + [ + "*error: unrecognized arguments: --invalid-option*", + "* inifile: %s*" % testdir.tmpdir.join("tox.ini"), + "* rootdir: %s*" % testdir.tmpdir, + ] + ) -@pytest.mark.parametrize('args', [ - ['dir1', 'dir2', '-v'], - ['dir1', '-v', 'dir2'], - ['dir2', '-v', 'dir1'], - ['-v', 'dir2', 'dir1'], -]) +@pytest.mark.parametrize( + "args", + [ + ["dir1", "dir2", "-v"], + ["dir1", "-v", "dir2"], + ["dir2", "-v", "dir1"], + ["-v", "dir2", "dir1"], + ], +) def test_consider_args_after_options_for_rootdir_and_inifile(testdir, args): """ Consider all arguments in the command-line for rootdir and inifile discovery, even if they happen to occur after an option. #949 """ # replace "dir1" and "dir2" from "args" into their real directory - root = testdir.tmpdir.mkdir('myroot') - d1 = root.mkdir('dir1') - d2 = root.mkdir('dir2') + root = testdir.tmpdir.mkdir("myroot") + d1 = root.mkdir("dir1") + d2 = root.mkdir("dir2") for i, arg in enumerate(args): - if arg == 'dir1': + if arg == "dir1": args[i] = d1 - elif arg == 'dir2': + elif arg == "dir2": args[i] = d2 with root.as_cwd(): result = testdir.runpytest(*args) - result.stdout.fnmatch_lines(['*rootdir: *myroot, inifile:']) + result.stdout.fnmatch_lines(["*rootdir: *myroot, inifile:"]) @pytest.mark.skipif("sys.platform == 'win32'") @@ -562,25 +680,28 @@ def test_toolongargs_issue224(testdir): def test_config_in_subdirectory_colon_command_line_issue2148(testdir): - conftest_source = ''' + conftest_source = """ def pytest_addoption(parser): parser.addini('foo', 'foo') - ''' + """ - testdir.makefile('.ini', **{ - 'pytest': '[pytest]\nfoo = root', - 'subdir/pytest': '[pytest]\nfoo = subdir', - }) + testdir.makefile( + ".ini", + **{"pytest": "[pytest]\nfoo = root", "subdir/pytest": "[pytest]\nfoo = subdir"} + ) - testdir.makepyfile(**{ - 'conftest': conftest_source, - 'subdir/conftest': conftest_source, - 'subdir/test_foo': ''' + testdir.makepyfile( + **{ + "conftest": conftest_source, + "subdir/conftest": conftest_source, + "subdir/test_foo": """ def test_foo(pytestconfig): assert pytestconfig.getini('foo') == 'subdir' - '''}) + """, + } + ) - result = testdir.runpytest('subdir/test_foo.py::test_foo') + result = testdir.runpytest("subdir/test_foo.py::test_foo") assert result.ret == 0 @@ -592,6 +713,7 @@ def test_notify_exception(testdir, capfd): assert "ValueError" in err class A(object): + def pytest_internalerror(self, excrepr): return True @@ -603,9 +725,11 @@ def test_notify_exception(testdir, capfd): def test_load_initial_conftest_last_ordering(testdir): from _pytest.config import get_config + pm = get_config().pluginmanager class My(object): + def pytest_load_initial_conftests(self): pass @@ -613,49 +737,52 @@ def test_load_initial_conftest_last_ordering(testdir): pm.register(m) hc = pm.hook.pytest_load_initial_conftests values = hc._nonwrappers + hc._wrappers - expected = [ - "_pytest.config", - 'test_config', - '_pytest.capture', - ] + expected = ["_pytest.config", "test_config", "_pytest.capture"] assert [x.function.__module__ for x in values] == expected def test_get_plugin_specs_as_list(): from _pytest.config import _get_plugin_specs_as_list + with pytest.raises(pytest.UsageError): - _get_plugin_specs_as_list(set(['foo'])) + _get_plugin_specs_as_list({"foo"}) with pytest.raises(pytest.UsageError): _get_plugin_specs_as_list(dict()) assert _get_plugin_specs_as_list(None) == [] - assert _get_plugin_specs_as_list('') == [] - assert _get_plugin_specs_as_list('foo') == ['foo'] - assert _get_plugin_specs_as_list('foo,bar') == ['foo', 'bar'] - assert _get_plugin_specs_as_list(['foo', 'bar']) == ['foo', 'bar'] - assert _get_plugin_specs_as_list(('foo', 'bar')) == ['foo', 'bar'] + assert _get_plugin_specs_as_list("") == [] + assert _get_plugin_specs_as_list("foo") == ["foo"] + assert _get_plugin_specs_as_list("foo,bar") == ["foo", "bar"] + assert _get_plugin_specs_as_list(["foo", "bar"]) == ["foo", "bar"] + assert _get_plugin_specs_as_list(("foo", "bar")) == ["foo", "bar"] class TestWarning(object): + def test_warn_config(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ values = [] def pytest_configure(config): config.warn("C1", "hello") def pytest_logwarning(code, message): if message == "hello" and code == "C1": values.append(1) - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ def test_proper(pytestconfig): import conftest assert conftest.values == [1] - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_warn_on_test_item_from_request(self, testdir, request): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture @@ -664,20 +791,24 @@ class TestWarning(object): def test_hello(fix): pass - """) + """ + ) result = testdir.runpytest("--disable-pytest-warnings") assert result.parseoutcomes()["warnings"] > 0 assert "hello" not in result.stdout.str() result = testdir.runpytest() - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ ===*warnings summary*=== *test_warn_on_test_item_from_request.py::test_hello* *hello* - """) + """ + ) class TestRootdir(object): + def test_simple_noini(self, tmpdir): assert get_common_ancestor([tmpdir]) == tmpdir a = tmpdir.mkdir("a") @@ -685,9 +816,9 @@ class TestRootdir(object): assert get_common_ancestor([tmpdir, a]) == tmpdir with tmpdir.as_cwd(): assert get_common_ancestor([]) == tmpdir - no_path = tmpdir.join('does-not-exist') + no_path = tmpdir.join("does-not-exist") assert get_common_ancestor([no_path]) == tmpdir - assert get_common_ancestor([no_path.join('a')]) == tmpdir + assert get_common_ancestor([no_path.join("a")]) == tmpdir @pytest.mark.parametrize("name", "setup.cfg tox.ini pytest.ini".split()) def test_with_ini(self, tmpdir, name): @@ -736,98 +867,136 @@ class TestRootdir(object): class TestOverrideIniArgs(object): + @pytest.mark.parametrize("name", "setup.cfg tox.ini pytest.ini".split()) def test_override_ini_names(self, testdir, name): - testdir.tmpdir.join(name).write(textwrap.dedent(""" + testdir.tmpdir.join(name).write( + textwrap.dedent( + """ [pytest] - custom = 1.0""")) - testdir.makeconftest(""" + custom = 1.0""" + ) + ) + testdir.makeconftest( + """ def pytest_addoption(parser): - parser.addini("custom", "")""") - testdir.makepyfile(""" + parser.addini("custom", "")""" + ) + testdir.makepyfile( + """ def test_pass(pytestconfig): ini_val = pytestconfig.getini("custom") - print('\\ncustom_option:%s\\n' % ini_val)""") + print('\\ncustom_option:%s\\n' % ini_val)""" + ) result = testdir.runpytest("--override-ini", "custom=2.0", "-s") assert result.ret == 0 result.stdout.fnmatch_lines(["custom_option:2.0"]) - result = testdir.runpytest("--override-ini", "custom=2.0", - "--override-ini=custom=3.0", "-s") + result = testdir.runpytest( + "--override-ini", "custom=2.0", "--override-ini=custom=3.0", "-s" + ) assert result.ret == 0 result.stdout.fnmatch_lines(["custom_option:3.0"]) def test_override_ini_pathlist(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_addoption(parser): - parser.addini("paths", "my new ini value", type="pathlist")""") - testdir.makeini(""" + parser.addini("paths", "my new ini value", type="pathlist")""" + ) + testdir.makeini( + """ [pytest] - paths=blah.py""") - testdir.makepyfile(""" + paths=blah.py""" + ) + testdir.makepyfile( + """ import py.path def test_pathlist(pytestconfig): config_paths = pytestconfig.getini("paths") print(config_paths) for cpf in config_paths: - print('\\nuser_path:%s' % cpf.basename)""") - result = testdir.runpytest("--override-ini", - 'paths=foo/bar1.py foo/bar2.py', "-s") - result.stdout.fnmatch_lines(["user_path:bar1.py", - "user_path:bar2.py"]) + print('\\nuser_path:%s' % cpf.basename)""" + ) + result = testdir.runpytest( + "--override-ini", "paths=foo/bar1.py foo/bar2.py", "-s" + ) + result.stdout.fnmatch_lines(["user_path:bar1.py", "user_path:bar2.py"]) def test_override_multiple_and_default(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_addoption(parser): addini = parser.addini addini("custom_option_1", "", default="o1") addini("custom_option_2", "", default="o2") addini("custom_option_3", "", default=False, type="bool") - addini("custom_option_4", "", default=True, type="bool")""") - testdir.makeini(""" + addini("custom_option_4", "", default=True, type="bool")""" + ) + testdir.makeini( + """ [pytest] custom_option_1=custom_option_1 custom_option_2=custom_option_2 - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ def test_multiple_options(pytestconfig): prefix = "custom_option" for x in range(1, 5): ini_value=pytestconfig.getini("%s_%d" % (prefix, x)) print('\\nini%d:%s' % (x, ini_value)) - """) + """ + ) result = testdir.runpytest( - "--override-ini", 'custom_option_1=fulldir=/tmp/user1', - '-o', 'custom_option_2=url=/tmp/user2?a=b&d=e', - "-o", 'custom_option_3=True', - "-o", 'custom_option_4=no', "-s") - result.stdout.fnmatch_lines(["ini1:fulldir=/tmp/user1", - "ini2:url=/tmp/user2?a=b&d=e", - "ini3:True", - "ini4:False"]) + "--override-ini", + "custom_option_1=fulldir=/tmp/user1", + "-o", + "custom_option_2=url=/tmp/user2?a=b&d=e", + "-o", + "custom_option_3=True", + "-o", + "custom_option_4=no", + "-s", + ) + result.stdout.fnmatch_lines( + [ + "ini1:fulldir=/tmp/user1", + "ini2:url=/tmp/user2?a=b&d=e", + "ini3:True", + "ini4:False", + ] + ) def test_override_ini_usage_error_bad_style(self, testdir): - testdir.makeini(""" + testdir.makeini( + """ [pytest] xdist_strict=False - """) - result = testdir.runpytest("--override-ini", 'xdist_strict True', "-s") + """ + ) + result = testdir.runpytest("--override-ini", "xdist_strict True", "-s") result.stderr.fnmatch_lines(["*ERROR* *expects option=value*"]) - @pytest.mark.parametrize('with_ini', [True, False]) + @pytest.mark.parametrize("with_ini", [True, False]) def test_override_ini_handled_asap(self, testdir, with_ini): """-o should be handled as soon as possible and always override what's in ini files (#2238)""" if with_ini: - testdir.makeini(""" + testdir.makeini( + """ [pytest] python_files=test_*.py - """) - testdir.makepyfile(unittest_ini_handle=""" + """ + ) + testdir.makepyfile( + unittest_ini_handle=""" def test(): pass - """) - result = testdir.runpytest("--override-ini", 'python_files=unittest_*.py') + """ + ) + result = testdir.runpytest("--override-ini", "python_files=unittest_*.py") result.stdout.fnmatch_lines(["*1 passed in*"]) def test_with_arg_outside_cwd_without_inifile(self, tmpdir, monkeypatch): @@ -846,8 +1015,7 @@ class TestOverrideIniArgs(object): assert rootdir == a assert inifile == parsed_inifile - @pytest.mark.parametrize('dirs', ([], ['does-not-exist'], - ['a/does-not-exist'])) + @pytest.mark.parametrize("dirs", ([], ["does-not-exist"], ["a/does-not-exist"])) def test_with_non_dir_arg(self, dirs, tmpdir): with tmpdir.ensure(dir=True).as_cwd(): rootdir, inifile, inicfg = determine_setup(None, dirs) @@ -858,46 +1026,47 @@ class TestOverrideIniArgs(object): a = tmpdir.mkdir("a") a.ensure("exist") with tmpdir.as_cwd(): - rootdir, inifile, inicfg = determine_setup(None, ['a/exist']) + rootdir, inifile, inicfg = determine_setup(None, ["a/exist"]) assert rootdir == tmpdir assert inifile is None def test_addopts_before_initini(self, monkeypatch): - cache_dir = '.custom_cache' - monkeypatch.setenv('PYTEST_ADDOPTS', '-o cache_dir=%s' % cache_dir) + cache_dir = ".custom_cache" + monkeypatch.setenv("PYTEST_ADDOPTS", "-o cache_dir=%s" % cache_dir) from _pytest.config import get_config + config = get_config() config._preparse([], addopts=True) - assert config._override_ini == ['cache_dir=%s' % cache_dir] + assert config._override_ini == ["cache_dir=%s" % cache_dir] def test_override_ini_does_not_contain_paths(self): """Check that -o no longer swallows all options after it (#3103)""" from _pytest.config import get_config + config = get_config() - config._preparse(['-o', 'cache_dir=/cache', '/some/test/path']) - assert config._override_ini == ['cache_dir=/cache'] + config._preparse(["-o", "cache_dir=/cache", "/some/test/path"]) + assert config._override_ini == ["cache_dir=/cache"] def test_multiple_override_ini_options(self, testdir, request): """Ensure a file path following a '-o' option does not generate an error (#3103)""" - testdir.makepyfile(**{ - "conftest.py": """ + testdir.makepyfile( + **{ + "conftest.py": """ def pytest_addoption(parser): parser.addini('foo', default=None, help='some option') parser.addini('bar', default=None, help='some option') """, - "test_foo.py": """ + "test_foo.py": """ def test(pytestconfig): assert pytestconfig.getini('foo') == '1' assert pytestconfig.getini('bar') == '0' """, - "test_bar.py": """ + "test_bar.py": """ def test(): assert False """, - }) - result = testdir.runpytest('-o', 'foo=1', '-o', 'bar=0', 'test_foo.py') - assert 'ERROR:' not in result.stderr.str() - result.stdout.fnmatch_lines([ - 'collected 1 item', - '*= 1 passed in *=', - ]) + } + ) + result = testdir.runpytest("-o", "foo=1", "-o", "bar=0", "test_foo.py") + assert "ERROR:" not in result.stderr.str() + result.stdout.fnmatch_lines(["collected 1 item", "*= 1 passed in *="]) diff --git a/testing/test_conftest.py b/testing/test_conftest.py index 93bf8ea8d..61b640976 100644 --- a/testing/test_conftest.py +++ b/testing/test_conftest.py @@ -11,6 +11,7 @@ from _pytest.main import EXIT_NOTESTSCOLLECTED, EXIT_USAGEERROR @pytest.fixture(scope="module", params=["global", "inpackage"]) def basedir(request, tmpdir_factory): from _pytest.tmpdir import tmpdir + tmpdir = tmpdir(request, tmpdir_factory) tmpdir.ensure("adir/conftest.py").write("a=1 ; Directory = 3") tmpdir.ensure("adir/b/conftest.py").write("b=2 ; a = 1.5") @@ -27,15 +28,19 @@ def ConftestWithSetinitial(path): def conftest_setinitial(conftest, args, confcutdir=None): + class Namespace(object): + def __init__(self): self.file_or_dir = args self.confcutdir = str(confcutdir) self.noconftest = False + conftest._set_initial_conftests(Namespace()) class TestConftestValueAccessGlobal(object): + def test_basic_init(self, basedir): conftest = PytestPluginManager() p = basedir.join("adir") @@ -47,15 +52,15 @@ class TestConftestValueAccessGlobal(object): conftest._getconftestmodules(basedir) snap1 = len(conftest._path2confmods) # assert len(conftest._path2confmods) == snap1 + 1 - conftest._getconftestmodules(basedir.join('adir')) + conftest._getconftestmodules(basedir.join("adir")) assert len(conftest._path2confmods) == snap1 + 1 - conftest._getconftestmodules(basedir.join('b')) + conftest._getconftestmodules(basedir.join("b")) assert len(conftest._path2confmods) == snap1 + 2 def test_value_access_not_existing(self, basedir): conftest = ConftestWithSetinitial(basedir) with pytest.raises(KeyError): - conftest._rget_with_confmod('a', basedir) + conftest._rget_with_confmod("a", basedir) def test_value_access_by_path(self, basedir): conftest = ConftestWithSetinitial(basedir) @@ -105,7 +110,8 @@ def test_issue151_load_all_conftests(testdir): def test_conftest_global_import(testdir): testdir.makeconftest("x=3") - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import py, pytest from _pytest.config import PytestPluginManager conf = PytestPluginManager() @@ -120,7 +126,8 @@ def test_conftest_global_import(testdir): assert mod2.y == 4 import conftest assert conftest is mod2, (conftest, mod) - """) + """ + ) res = testdir.runpython(p) assert res.ret == 0 @@ -154,13 +161,13 @@ def test_conftestcutdir_inplace_considered(testdir): assert values[0].__file__.startswith(str(conf)) -@pytest.mark.parametrize("name", 'test tests whatever .dotdir'.split()) +@pytest.mark.parametrize("name", "test tests whatever .dotdir".split()) def test_setinitial_conftest_subdirs(testdir, name): sub = testdir.mkdir(name) subconftest = sub.ensure("conftest.py") conftest = PytestPluginManager() conftest_setinitial(conftest, [sub.dirpath()], confcutdir=testdir.tmpdir) - if name not in ('whatever', '.dotdir'): + if name not in ("whatever", ".dotdir"): assert subconftest in conftest._conftestpath2mod assert len(conftest._conftestpath2mod) == 1 else: @@ -171,13 +178,17 @@ def test_setinitial_conftest_subdirs(testdir, name): def test_conftest_confcutdir(testdir): testdir.makeconftest("assert 0") x = testdir.mkdir("x") - x.join("conftest.py").write(_pytest._code.Source(""" + x.join("conftest.py").write( + _pytest._code.Source( + """ def pytest_addoption(parser): parser.addoption("--xyz", action="store_true") - """)) + """ + ) + ) result = testdir.runpytest("-h", "--confcutdir=%s" % x, x) result.stdout.fnmatch_lines(["*--xyz*"]) - assert 'warning: could not load initial' not in result.stdout.str() + assert "warning: could not load initial" not in result.stdout.str() def test_no_conftest(testdir): @@ -191,10 +202,14 @@ def test_no_conftest(testdir): def test_conftest_existing_resultlog(testdir): x = testdir.mkdir("tests") - x.join("conftest.py").write(_pytest._code.Source(""" + x.join("conftest.py").write( + _pytest._code.Source( + """ def pytest_addoption(parser): parser.addoption("--xyz", action="store_true") - """)) + """ + ) + ) testdir.makefile(ext=".log", result="") # Writes result.log result = testdir.runpytest("-h", "--resultlog", "result.log") result.stdout.fnmatch_lines(["*--xyz*"]) @@ -202,10 +217,14 @@ def test_conftest_existing_resultlog(testdir): def test_conftest_existing_junitxml(testdir): x = testdir.mkdir("tests") - x.join("conftest.py").write(_pytest._code.Source(""" + x.join("conftest.py").write( + _pytest._code.Source( + """ def pytest_addoption(parser): parser.addoption("--xyz", action="store_true") - """)) + """ + ) + ) testdir.makefile(ext=".xml", junit="") # Writes junit.xml result = testdir.runpytest("-h", "--junitxml", "junit.xml") result.stdout.fnmatch_lines(["*--xyz*"]) @@ -222,7 +241,7 @@ def test_conftest_import_order(testdir, monkeypatch): conftest = PytestPluginManager() conftest._confcutdir = testdir.tmpdir - monkeypatch.setattr(conftest, '_importconftest', impct) + monkeypatch.setattr(conftest, "_importconftest", impct) assert conftest._getconftestmodules(sub) == [ct1, ct2] @@ -232,7 +251,9 @@ def test_fixture_dependency(testdir, monkeypatch): ct1.write("") sub = testdir.mkdir("sub") sub.join("__init__.py").write("") - sub.join("conftest.py").write(dedent(""" + sub.join("conftest.py").write( + dedent( + """ import pytest @pytest.fixture @@ -246,10 +267,14 @@ def test_fixture_dependency(testdir, monkeypatch): @pytest.fixture def bar(foo): return 'bar' - """)) + """ + ) + ) subsub = sub.mkdir("subsub") subsub.join("__init__.py").write("") - subsub.join("test_bar.py").write(dedent(""" + subsub.join("test_bar.py").write( + dedent( + """ import pytest @pytest.fixture @@ -258,108 +283,133 @@ def test_fixture_dependency(testdir, monkeypatch): def test_event_fixture(bar): assert bar == 'sub bar' - """)) + """ + ) + ) result = testdir.runpytest("sub") result.stdout.fnmatch_lines(["*1 passed*"]) def test_conftest_found_with_double_dash(testdir): sub = testdir.mkdir("sub") - sub.join("conftest.py").write(dedent(""" + sub.join("conftest.py").write( + dedent( + """ def pytest_addoption(parser): parser.addoption("--hello-world", action="store_true") - """)) + """ + ) + ) p = sub.join("test_hello.py") p.write("def test_hello(): pass") result = testdir.runpytest(str(p) + "::test_hello", "-h") - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *--hello-world* - """) + """ + ) class TestConftestVisibility(object): + def _setup_tree(self, testdir): # for issue616 # example mostly taken from: # https://mail.python.org/pipermail/pytest-dev/2014-September/002617.html runner = testdir.mkdir("empty") package = testdir.mkdir("package") - package.join("conftest.py").write(dedent("""\ + package.join("conftest.py").write( + dedent( + """\ import pytest @pytest.fixture def fxtr(): return "from-package" - """)) - package.join("test_pkgroot.py").write(dedent("""\ + """ + ) + ) + package.join("test_pkgroot.py").write( + dedent( + """\ def test_pkgroot(fxtr): assert fxtr == "from-package" - """)) + """ + ) + ) swc = package.mkdir("swc") swc.join("__init__.py").ensure() - swc.join("conftest.py").write(dedent("""\ + swc.join("conftest.py").write( + dedent( + """\ import pytest @pytest.fixture def fxtr(): return "from-swc" - """)) - swc.join("test_with_conftest.py").write(dedent("""\ + """ + ) + ) + swc.join("test_with_conftest.py").write( + dedent( + """\ def test_with_conftest(fxtr): assert fxtr == "from-swc" - """)) + """ + ) + ) snc = package.mkdir("snc") snc.join("__init__.py").ensure() - snc.join("test_no_conftest.py").write(dedent("""\ + snc.join("test_no_conftest.py").write( + dedent( + """\ def test_no_conftest(fxtr): assert fxtr == "from-package" # No local conftest.py, so should # use value from parent dir's - """)) + """ + ) + ) print("created directory structure:") for x in testdir.tmpdir.visit(): print(" " + x.relto(testdir.tmpdir)) - return { - "runner": runner, - "package": package, - "swc": swc, - "snc": snc} + return {"runner": runner, "package": package, "swc": swc, "snc": snc} # N.B.: "swc" stands for "subdir with conftest.py" # "snc" stands for "subdir no [i.e. without] conftest.py" - @pytest.mark.parametrize("chdir,testarg,expect_ntests_passed", [ - # Effective target: package/.. - ("runner", "..", 3), - ("package", "..", 3), - ("swc", "../..", 3), - ("snc", "../..", 3), - - # Effective target: package - ("runner", "../package", 3), - ("package", ".", 3), - ("swc", "..", 3), - ("snc", "..", 3), - - # Effective target: package/swc - ("runner", "../package/swc", 1), - ("package", "./swc", 1), - ("swc", ".", 1), - ("snc", "../swc", 1), - - # Effective target: package/snc - ("runner", "../package/snc", 1), - ("package", "./snc", 1), - ("swc", "../snc", 1), - ("snc", ".", 1), - ]) + @pytest.mark.parametrize( + "chdir,testarg,expect_ntests_passed", + [ + # Effective target: package/.. + ("runner", "..", 3), + ("package", "..", 3), + ("swc", "../..", 3), + ("snc", "../..", 3), + # Effective target: package + ("runner", "../package", 3), + ("package", ".", 3), + ("swc", "..", 3), + ("snc", "..", 3), + # Effective target: package/swc + ("runner", "../package/swc", 1), + ("package", "./swc", 1), + ("swc", ".", 1), + ("snc", "../swc", 1), + # Effective target: package/snc + ("runner", "../package/snc", 1), + ("package", "./snc", 1), + ("swc", "../snc", 1), + ("snc", ".", 1), + ], + ) @pytest.mark.issue616 def test_parsefactories_relative_node_ids( - self, testdir, chdir, testarg, expect_ntests_passed): + self, testdir, chdir, testarg, expect_ntests_passed + ): dirs = self._setup_tree(testdir) - print("pytest run in cwd: %s" % ( - dirs[chdir].relto(testdir.tmpdir))) + print("pytest run in cwd: %s" % (dirs[chdir].relto(testdir.tmpdir))) print("pytestarg : %s" % (testarg)) print("expected pass : %s" % (expect_ntests_passed)) with dirs[chdir].as_cwd(): @@ -367,108 +417,127 @@ class TestConftestVisibility(object): reprec.assertoutcome(passed=expect_ntests_passed) -@pytest.mark.parametrize('confcutdir,passed,error', [ - ('.', 2, 0), - ('src', 1, 1), - (None, 1, 1), -]) +@pytest.mark.parametrize( + "confcutdir,passed,error", [(".", 2, 0), ("src", 1, 1), (None, 1, 1)] +) def test_search_conftest_up_to_inifile(testdir, confcutdir, passed, error): """Test that conftest files are detected only up to an ini file, unless an explicit --confcutdir option is given. """ root = testdir.tmpdir - src = root.join('src').ensure(dir=1) - src.join('pytest.ini').write('[pytest]') - src.join('conftest.py').write(_pytest._code.Source(""" + src = root.join("src").ensure(dir=1) + src.join("pytest.ini").write("[pytest]") + src.join("conftest.py").write( + _pytest._code.Source( + """ import pytest @pytest.fixture def fix1(): pass - """)) - src.join('test_foo.py').write(_pytest._code.Source(""" + """ + ) + ) + src.join("test_foo.py").write( + _pytest._code.Source( + """ def test_1(fix1): pass def test_2(out_of_reach): pass - """)) - root.join('conftest.py').write(_pytest._code.Source(""" + """ + ) + ) + root.join("conftest.py").write( + _pytest._code.Source( + """ import pytest @pytest.fixture def out_of_reach(): pass - """)) + """ + ) + ) args = [str(src)] if confcutdir: - args = ['--confcutdir=%s' % root.join(confcutdir)] + args = ["--confcutdir=%s" % root.join(confcutdir)] result = testdir.runpytest(*args) - match = '' + match = "" if passed: - match += '*%d passed*' % passed + match += "*%d passed*" % passed if error: - match += '*%d error*' % error + match += "*%d error*" % error result.stdout.fnmatch_lines(match) def test_issue1073_conftest_special_objects(testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ class DontTouchMe(object): def __getattr__(self, x): raise Exception('cant touch me') x = DontTouchMe() - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ def test_some(): pass - """) + """ + ) res = testdir.runpytest() assert res.ret == 0 def test_conftest_exception_handling(testdir): - testdir.makeconftest(''' + testdir.makeconftest( + """ raise ValueError() - ''') - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ def test_some(): pass - """) + """ + ) res = testdir.runpytest() assert res.ret == 4 - assert 'raise ValueError()' in [line.strip() for line in res.errlines] + assert "raise ValueError()" in [line.strip() for line in res.errlines] def test_hook_proxy(testdir): """Session's gethookproxy() would cache conftests incorrectly (#2016). It was decided to remove the cache altogether. """ - testdir.makepyfile(**{ - 'root/demo-0/test_foo1.py': "def test1(): pass", - - 'root/demo-a/test_foo2.py': "def test1(): pass", - 'root/demo-a/conftest.py': """ + testdir.makepyfile( + **{ + "root/demo-0/test_foo1.py": "def test1(): pass", + "root/demo-a/test_foo2.py": "def test1(): pass", + "root/demo-a/conftest.py": """ def pytest_ignore_collect(path, config): return True """, - - 'root/demo-b/test_foo3.py': "def test1(): pass", - 'root/demo-c/test_foo4.py': "def test1(): pass", - }) + "root/demo-b/test_foo3.py": "def test1(): pass", + "root/demo-c/test_foo4.py": "def test1(): pass", + } + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - '*test_foo1.py*', - '*test_foo3.py*', - '*test_foo4.py*', - '*3 passed*', - ]) + result.stdout.fnmatch_lines( + ["*test_foo1.py*", "*test_foo3.py*", "*test_foo4.py*", "*3 passed*"] + ) def test_required_option_help(testdir): testdir.makeconftest("assert 0") x = testdir.mkdir("x") - x.join("conftest.py").write(_pytest._code.Source(""" + x.join("conftest.py").write( + _pytest._code.Source( + """ def pytest_addoption(parser): parser.addoption("--xyz", action="store_true", required=True) - """)) + """ + ) + ) result = testdir.runpytest("-h", x) - assert 'argument --xyz is required' not in result.stdout.str() - assert 'general:' in result.stdout.str() + assert "argument --xyz is required" not in result.stdout.str() + assert "general:" in result.stdout.str() diff --git a/testing/test_doctest.py b/testing/test_doctest.py index 314398395..8ef7cfd65 100644 --- a/testing/test_doctest.py +++ b/testing/test_doctest.py @@ -11,12 +11,14 @@ class TestDoctests(object): def test_collect_testtextfile(self, testdir): w = testdir.maketxtfile(whatever="") - checkfile = testdir.maketxtfile(test_something=""" + checkfile = testdir.maketxtfile( + test_something=""" alskdjalsdk >>> i = 5 >>> i-1 4 - """) + """ + ) for x in (testdir.tmpdir, checkfile): # print "checking that %s returns custom items" % (x,) @@ -31,28 +33,27 @@ class TestDoctests(object): def test_collect_module_empty(self, testdir): path = testdir.makepyfile(whatever="#") for p in (path, testdir.tmpdir): - items, reprec = testdir.inline_genitems(p, - '--doctest-modules') + items, reprec = testdir.inline_genitems(p, "--doctest-modules") assert len(items) == 0 def test_collect_module_single_modulelevel_doctest(self, testdir): path = testdir.makepyfile(whatever='""">>> pass"""') for p in (path, testdir.tmpdir): - items, reprec = testdir.inline_genitems(p, - '--doctest-modules') + items, reprec = testdir.inline_genitems(p, "--doctest-modules") assert len(items) == 1 assert isinstance(items[0], DoctestItem) assert isinstance(items[0].parent, DoctestModule) def test_collect_module_two_doctest_one_modulelevel(self, testdir): - path = testdir.makepyfile(whatever=""" + path = testdir.makepyfile( + whatever=""" '>>> x = None' def my_func(): ">>> magic = 42 " - """) + """ + ) for p in (path, testdir.tmpdir): - items, reprec = testdir.inline_genitems(p, - '--doctest-modules') + items, reprec = testdir.inline_genitems(p, "--doctest-modules") assert len(items) == 2 assert isinstance(items[0], DoctestItem) assert isinstance(items[1], DoctestItem) @@ -60,7 +61,8 @@ class TestDoctests(object): assert items[0].parent is items[1].parent def test_collect_module_two_doctest_no_modulelevel(self, testdir): - path = testdir.makepyfile(whatever=""" + path = testdir.makepyfile( + whatever=""" '# Empty' def my_func(): ">>> magic = 42 " @@ -74,10 +76,10 @@ class TestDoctests(object): # This is another function >>> import os # this one does have a doctest ''' - """) + """ + ) for p in (path, testdir.tmpdir): - items, reprec = testdir.inline_genitems(p, - '--doctest-modules') + items, reprec = testdir.inline_genitems(p, "--doctest-modules") assert len(items) == 2 assert isinstance(items[0], DoctestItem) assert isinstance(items[1], DoctestItem) @@ -85,99 +87,108 @@ class TestDoctests(object): assert items[0].parent is items[1].parent def test_simple_doctestfile(self, testdir): - p = testdir.maketxtfile(test_doc=""" + p = testdir.maketxtfile( + test_doc=""" >>> x = 1 >>> x == 1 False - """) - reprec = testdir.inline_run(p, ) + """ + ) + reprec = testdir.inline_run(p) reprec.assertoutcome(failed=1) def test_new_pattern(self, testdir): - p = testdir.maketxtfile(xdoc=""" + p = testdir.maketxtfile( + xdoc=""" >>> x = 1 >>> x == 1 False - """) + """ + ) reprec = testdir.inline_run(p, "--doctest-glob=x*.txt") reprec.assertoutcome(failed=1) def test_multiple_patterns(self, testdir): """Test support for multiple --doctest-glob arguments (#1255). """ - testdir.maketxtfile(xdoc=""" + testdir.maketxtfile( + xdoc=""" >>> 1 1 - """) - testdir.makefile('.foo', test=""" + """ + ) + testdir.makefile( + ".foo", + test=""" >>> 1 1 - """) - testdir.maketxtfile(test_normal=""" + """, + ) + testdir.maketxtfile( + test_normal=""" >>> 1 1 - """) - expected = set(['xdoc.txt', 'test.foo', 'test_normal.txt']) - assert set(x.basename for x in testdir.tmpdir.listdir()) == expected + """ + ) + expected = {"xdoc.txt", "test.foo", "test_normal.txt"} + assert {x.basename for x in testdir.tmpdir.listdir()} == expected args = ["--doctest-glob=xdoc*.txt", "--doctest-glob=*.foo"] result = testdir.runpytest(*args) - result.stdout.fnmatch_lines([ - '*test.foo *', - '*xdoc.txt *', - '*2 passed*', - ]) + result.stdout.fnmatch_lines(["*test.foo *", "*xdoc.txt *", "*2 passed*"]) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - '*test_normal.txt *', - '*1 passed*', - ]) + result.stdout.fnmatch_lines(["*test_normal.txt *", "*1 passed*"]) @pytest.mark.parametrize( - ' test_string, encoding', - [ - (u'foo', 'ascii'), - (u'öäü', 'latin1'), - (u'öäü', 'utf-8') - ] + " test_string, encoding", + [(u"foo", "ascii"), (u"öäü", "latin1"), (u"öäü", "utf-8")], ) def test_encoding(self, testdir, test_string, encoding): """Test support for doctest_encoding ini option. """ - testdir.makeini(""" + testdir.makeini( + """ [pytest] - doctest_encoding={0} - """.format(encoding)) + doctest_encoding={} + """.format( + encoding + ) + ) doctest = u""" - >>> u"{0}" - {1} - """.format(test_string, repr(test_string)) + >>> u"{}" + {} + """.format( + test_string, repr(test_string) + ) testdir._makefile(".txt", [doctest], {}, encoding=encoding) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - '*1 passed*', - ]) + result.stdout.fnmatch_lines(["*1 passed*"]) def test_doctest_unexpected_exception(self, testdir): - testdir.maketxtfile(""" + testdir.maketxtfile( + """ >>> i = 0 >>> 0 / i 2 - """) + """ + ) result = testdir.runpytest("--doctest-modules") - result.stdout.fnmatch_lines([ - "*unexpected_exception*", - "*>>> i = 0*", - "*>>> 0 / i*", - "*UNEXPECTED*ZeroDivision*", - ]) + result.stdout.fnmatch_lines( + [ + "*unexpected_exception*", + "*>>> i = 0*", + "*>>> 0 / i*", + "*UNEXPECTED*ZeroDivision*", + ] + ) def test_docstring_partial_context_around_error(self, testdir): """Test that we show some context before the actual line of a failing doctest. """ - testdir.makepyfile(''' + testdir.makepyfile( + ''' def foo(): """ text-line-1 @@ -196,28 +207,32 @@ class TestDoctests(object): text-line-after """ - ''') - result = testdir.runpytest('--doctest-modules') - result.stdout.fnmatch_lines([ - '*docstring_partial_context_around_error*', - '005*text-line-3', - '006*text-line-4', - '013*text-line-11', - '014*>>> 1 + 1', - 'Expected:', - ' 3', - 'Got:', - ' 2', - ]) + ''' + ) + result = testdir.runpytest("--doctest-modules") + result.stdout.fnmatch_lines( + [ + "*docstring_partial_context_around_error*", + "005*text-line-3", + "006*text-line-4", + "013*text-line-11", + "014*>>> 1 + 1", + "Expected:", + " 3", + "Got:", + " 2", + ] + ) # lines below should be trimmed out - assert 'text-line-2' not in result.stdout.str() - assert 'text-line-after' not in result.stdout.str() + assert "text-line-2" not in result.stdout.str() + assert "text-line-after" not in result.stdout.str() def test_docstring_full_context_around_error(self, testdir): """Test that we show the whole context before the actual line of a failing doctest, provided that the context is up to 10 lines long. """ - testdir.makepyfile(''' + testdir.makepyfile( + ''' def foo(): """ text-line-1 @@ -226,21 +241,26 @@ class TestDoctests(object): >>> 1 + 1 3 """ - ''') - result = testdir.runpytest('--doctest-modules') - result.stdout.fnmatch_lines([ - '*docstring_full_context_around_error*', - '003*text-line-1', - '004*text-line-2', - '006*>>> 1 + 1', - 'Expected:', - ' 3', - 'Got:', - ' 2', - ]) + ''' + ) + result = testdir.runpytest("--doctest-modules") + result.stdout.fnmatch_lines( + [ + "*docstring_full_context_around_error*", + "003*text-line-1", + "004*text-line-2", + "006*>>> 1 + 1", + "Expected:", + " 3", + "Got:", + " 2", + ] + ) def test_doctest_linedata_missing(self, testdir): - testdir.tmpdir.join('hello.py').write(_pytest._code.Source(""" + testdir.tmpdir.join("hello.py").write( + _pytest._code.Source( + """ class Fun(object): @property def test(self): @@ -248,137 +268,176 @@ class TestDoctests(object): >>> a = 1 >>> 1/0 ''' - """)) + """ + ) + ) result = testdir.runpytest("--doctest-modules") - result.stdout.fnmatch_lines([ - "*hello*", - "*EXAMPLE LOCATION UNKNOWN, not showing all tests of that example*", - "*1/0*", - "*UNEXPECTED*ZeroDivision*", - "*1 failed*", - ]) + result.stdout.fnmatch_lines( + [ + "*hello*", + "*EXAMPLE LOCATION UNKNOWN, not showing all tests of that example*", + "*1/0*", + "*UNEXPECTED*ZeroDivision*", + "*1 failed*", + ] + ) def test_doctest_unex_importerror_only_txt(self, testdir): - testdir.maketxtfile(""" + testdir.maketxtfile( + """ >>> import asdalsdkjaslkdjasd >>> - """) + """ + ) result = testdir.runpytest() # doctest is never executed because of error during hello.py collection - result.stdout.fnmatch_lines([ - "*>>> import asdals*", - "*UNEXPECTED*{e}*".format(e=MODULE_NOT_FOUND_ERROR), - "{e}: No module named *asdal*".format(e=MODULE_NOT_FOUND_ERROR), - ]) + result.stdout.fnmatch_lines( + [ + "*>>> import asdals*", + "*UNEXPECTED*{e}*".format(e=MODULE_NOT_FOUND_ERROR), + "{e}: No module named *asdal*".format(e=MODULE_NOT_FOUND_ERROR), + ] + ) def test_doctest_unex_importerror_with_module(self, testdir): - testdir.tmpdir.join("hello.py").write(_pytest._code.Source(""" + testdir.tmpdir.join("hello.py").write( + _pytest._code.Source( + """ import asdalsdkjaslkdjasd - """)) - testdir.maketxtfile(""" + """ + ) + ) + testdir.maketxtfile( + """ >>> import hello >>> - """) + """ + ) result = testdir.runpytest("--doctest-modules") # doctest is never executed because of error during hello.py collection - result.stdout.fnmatch_lines([ - "*ERROR collecting hello.py*", - "*{e}: No module named *asdals*".format(e=MODULE_NOT_FOUND_ERROR), - "*Interrupted: 1 errors during collection*", - ]) + result.stdout.fnmatch_lines( + [ + "*ERROR collecting hello.py*", + "*{e}: No module named *asdals*".format(e=MODULE_NOT_FOUND_ERROR), + "*Interrupted: 1 errors during collection*", + ] + ) def test_doctestmodule(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ ''' >>> x = 1 >>> x == 1 False ''' - """) + """ + ) reprec = testdir.inline_run(p, "--doctest-modules") reprec.assertoutcome(failed=1) def test_doctestmodule_external_and_issue116(self, testdir): p = testdir.mkpydir("hello") - p.join("__init__.py").write(_pytest._code.Source(""" + p.join("__init__.py").write( + _pytest._code.Source( + """ def somefunc(): ''' >>> i = 0 >>> i + 1 2 ''' - """)) + """ + ) + ) result = testdir.runpytest(p, "--doctest-modules") - result.stdout.fnmatch_lines([ - '004 *>>> i = 0', - '005 *>>> i + 1', - '*Expected:', - "* 2", - "*Got:", - "* 1", - "*:5: DocTestFailure" - ]) + result.stdout.fnmatch_lines( + [ + "004 *>>> i = 0", + "005 *>>> i + 1", + "*Expected:", + "* 2", + "*Got:", + "* 1", + "*:5: DocTestFailure", + ] + ) def test_txtfile_failing(self, testdir): - p = testdir.maketxtfile(""" + p = testdir.maketxtfile( + """ >>> i = 0 >>> i + 1 2 - """) + """ + ) result = testdir.runpytest(p, "-s") - result.stdout.fnmatch_lines([ - '001 >>> i = 0', - '002 >>> i + 1', - 'Expected:', - " 2", - "Got:", - " 1", - "*test_txtfile_failing.txt:2: DocTestFailure" - ]) + result.stdout.fnmatch_lines( + [ + "001 >>> i = 0", + "002 >>> i + 1", + "Expected:", + " 2", + "Got:", + " 1", + "*test_txtfile_failing.txt:2: DocTestFailure", + ] + ) def test_txtfile_with_fixtures(self, testdir): - p = testdir.maketxtfile(""" + p = testdir.maketxtfile( + """ >>> dir = getfixture('tmpdir') >>> type(dir).__name__ 'LocalPath' - """) - reprec = testdir.inline_run(p, ) + """ + ) + reprec = testdir.inline_run(p) reprec.assertoutcome(passed=1) def test_txtfile_with_usefixtures_in_ini(self, testdir): - testdir.makeini(""" + testdir.makeini( + """ [pytest] usefixtures = myfixture - """) - testdir.makeconftest(""" + """ + ) + testdir.makeconftest( + """ import pytest @pytest.fixture def myfixture(monkeypatch): monkeypatch.setenv("HELLO", "WORLD") - """) + """ + ) - p = testdir.maketxtfile(""" + p = testdir.maketxtfile( + """ >>> import os >>> os.environ["HELLO"] 'WORLD' - """) - reprec = testdir.inline_run(p, ) + """ + ) + reprec = testdir.inline_run(p) reprec.assertoutcome(passed=1) def test_doctestmodule_with_fixtures(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ ''' >>> dir = getfixture('tmpdir') >>> type(dir).__name__ 'LocalPath' ''' - """) + """ + ) reprec = testdir.inline_run(p, "--doctest-modules") reprec.assertoutcome(passed=1) def test_doctestmodule_three_tests(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ ''' >>> dir = getfixture('tmpdir') >>> type(dir).__name__ @@ -398,12 +457,14 @@ class TestDoctests(object): >>> os is os True ''' - """) + """ + ) reprec = testdir.inline_run(p, "--doctest-modules") reprec.assertoutcome(passed=3) def test_doctestmodule_two_tests_one_fail(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ class MyClass(object): def bad_meth(self): ''' @@ -417,16 +478,20 @@ class TestDoctests(object): >>> magic - 42 0 ''' - """) + """ + ) reprec = testdir.inline_run(p, "--doctest-modules") reprec.assertoutcome(failed=1, passed=1) def test_ignored_whitespace(self, testdir): - testdir.makeini(""" + testdir.makeini( + """ [pytest] doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE - """) - p = testdir.makepyfile(""" + """ + ) + p = testdir.makepyfile( + """ class MyClass(object): ''' >>> a = "foo " @@ -434,16 +499,20 @@ class TestDoctests(object): foo ''' pass - """) + """ + ) reprec = testdir.inline_run(p, "--doctest-modules") reprec.assertoutcome(passed=1) def test_non_ignored_whitespace(self, testdir): - testdir.makeini(""" + testdir.makeini( + """ [pytest] doctest_optionflags = ELLIPSIS - """) - p = testdir.makepyfile(""" + """ + ) + p = testdir.makepyfile( + """ class MyClass(object): ''' >>> a = "foo " @@ -451,55 +520,64 @@ class TestDoctests(object): foo ''' pass - """) + """ + ) reprec = testdir.inline_run(p, "--doctest-modules") reprec.assertoutcome(failed=1, passed=0) def test_ignored_whitespace_glob(self, testdir): - testdir.makeini(""" + testdir.makeini( + """ [pytest] doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE - """) - p = testdir.maketxtfile(xdoc=""" + """ + ) + p = testdir.maketxtfile( + xdoc=""" >>> a = "foo " >>> print(a) foo - """) + """ + ) reprec = testdir.inline_run(p, "--doctest-glob=x*.txt") reprec.assertoutcome(passed=1) def test_non_ignored_whitespace_glob(self, testdir): - testdir.makeini(""" + testdir.makeini( + """ [pytest] doctest_optionflags = ELLIPSIS - """) - p = testdir.maketxtfile(xdoc=""" + """ + ) + p = testdir.maketxtfile( + xdoc=""" >>> a = "foo " >>> print(a) foo - """) + """ + ) reprec = testdir.inline_run(p, "--doctest-glob=x*.txt") reprec.assertoutcome(failed=1, passed=0) def test_contains_unicode(self, testdir): """Fix internal error with docstrings containing non-ascii characters. """ - testdir.makepyfile(u''' + testdir.makepyfile( + u''' # encoding: utf-8 def foo(): """ >>> name = 'с' # not letter 'c' but instead Cyrillic 's'. 'anything' """ - ''') - result = testdir.runpytest('--doctest-modules') - result.stdout.fnmatch_lines([ - 'Got nothing', - '* 1 failed in*', - ]) + ''' + ) + result = testdir.runpytest("--doctest-modules") + result.stdout.fnmatch_lines(["Got nothing", "* 1 failed in*"]) def test_ignore_import_errors_on_doctest(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import asdf def add_one(x): @@ -508,26 +586,29 @@ class TestDoctests(object): 2 ''' return x + 1 - """) + """ + ) - reprec = testdir.inline_run(p, "--doctest-modules", - "--doctest-ignore-import-errors") + reprec = testdir.inline_run( + p, "--doctest-modules", "--doctest-ignore-import-errors" + ) reprec.assertoutcome(skipped=1, failed=1, passed=0) def test_junit_report_for_doctest(self, testdir): """ #713: Fix --junit-xml option when used with --doctest-modules. """ - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def foo(): ''' >>> 1 + 1 3 ''' pass - """) - reprec = testdir.inline_run(p, "--doctest-modules", - "--junit-xml=junit.xml") + """ + ) + reprec = testdir.inline_run(p, "--doctest-modules", "--junit-xml=junit.xml") reprec.assertoutcome(failed=1) def test_unicode_doctest(self, testdir): @@ -535,7 +616,8 @@ class TestDoctests(object): Test case for issue 2434: DecodeError on Python 2 when doctest contains non-ascii characters. """ - p = testdir.maketxtfile(test_unicode_doctest=""" + p = testdir.maketxtfile( + test_unicode_doctest=""" .. doctest:: >>> print( @@ -545,19 +627,20 @@ class TestDoctests(object): Byé >>> 1/0 # Byé 1 - """) + """ + ) result = testdir.runpytest(p) - result.stdout.fnmatch_lines([ - '*UNEXPECTED EXCEPTION: ZeroDivisionError*', - '*1 failed*', - ]) + result.stdout.fnmatch_lines( + ["*UNEXPECTED EXCEPTION: ZeroDivisionError*", "*1 failed*"] + ) def test_unicode_doctest_module(self, testdir): """ Test case for issue 2434: DecodeError on Python 2 when doctest docstring contains non-ascii characters. """ - p = testdir.makepyfile(test_unicode_doctest_module=""" + p = testdir.makepyfile( + test_unicode_doctest_module=""" # -*- encoding: utf-8 -*- from __future__ import unicode_literals @@ -567,112 +650,139 @@ class TestDoctests(object): único ''' return "único" - """) - result = testdir.runpytest(p, '--doctest-modules') - result.stdout.fnmatch_lines(['* 1 passed *']) + """ + ) + result = testdir.runpytest(p, "--doctest-modules") + result.stdout.fnmatch_lines(["* 1 passed *"]) def test_reportinfo(self, testdir): - ''' + """ Test case to make sure that DoctestItem.reportinfo() returns lineno. - ''' - p = testdir.makepyfile(test_reportinfo=""" + """ + p = testdir.makepyfile( + test_reportinfo=""" def foo(x): ''' >>> foo('a') 'b' ''' return 'c' - """) - items, reprec = testdir.inline_genitems(p, '--doctest-modules') + """ + ) + items, reprec = testdir.inline_genitems(p, "--doctest-modules") reportinfo = items[0].reportinfo() assert reportinfo[1] == 1 def test_valid_setup_py(self, testdir): - ''' + """ Test to make sure that pytest ignores valid setup.py files when ran with --doctest-modules - ''' - p = testdir.makepyfile(setup=""" + """ + p = testdir.makepyfile( + setup=""" from setuptools import setup, find_packages setup(name='sample', version='0.0', description='description', packages=find_packages() ) - """) - result = testdir.runpytest(p, '--doctest-modules') - result.stdout.fnmatch_lines(['*collected 0 items*']) + """ + ) + result = testdir.runpytest(p, "--doctest-modules") + result.stdout.fnmatch_lines(["*collected 0 items*"]) def test_invalid_setup_py(self, testdir): - ''' + """ Test to make sure that pytest reads setup.py files that are not used for python packages when ran with --doctest-modules - ''' - p = testdir.makepyfile(setup=""" + """ + p = testdir.makepyfile( + setup=""" def test_foo(): return 'bar' - """) - result = testdir.runpytest(p, '--doctest-modules') - result.stdout.fnmatch_lines(['*collected 1 item*']) + """ + ) + result = testdir.runpytest(p, "--doctest-modules") + result.stdout.fnmatch_lines(["*collected 1 item*"]) class TestLiterals(object): - @pytest.mark.parametrize('config_mode', ['ini', 'comment']) + @pytest.mark.parametrize("config_mode", ["ini", "comment"]) def test_allow_unicode(self, testdir, config_mode): """Test that doctests which output unicode work in all python versions tested by pytest when the ALLOW_UNICODE option is used (either in the ini file or by an inline comment). """ - if config_mode == 'ini': - testdir.makeini(''' + if config_mode == "ini": + testdir.makeini( + """ [pytest] doctest_optionflags = ALLOW_UNICODE - ''') - comment = '' + """ + ) + comment = "" else: - comment = '#doctest: +ALLOW_UNICODE' + comment = "#doctest: +ALLOW_UNICODE" - testdir.maketxtfile(test_doc=""" + testdir.maketxtfile( + test_doc=""" >>> b'12'.decode('ascii') {comment} '12' - """.format(comment=comment)) - testdir.makepyfile(foo=""" + """.format( + comment=comment + ) + ) + testdir.makepyfile( + foo=""" def foo(): ''' >>> b'12'.decode('ascii') {comment} '12' ''' - """.format(comment=comment)) + """.format( + comment=comment + ) + ) reprec = testdir.inline_run("--doctest-modules") reprec.assertoutcome(passed=2) - @pytest.mark.parametrize('config_mode', ['ini', 'comment']) + @pytest.mark.parametrize("config_mode", ["ini", "comment"]) def test_allow_bytes(self, testdir, config_mode): """Test that doctests which output bytes work in all python versions tested by pytest when the ALLOW_BYTES option is used (either in the ini file or by an inline comment)(#1287). """ - if config_mode == 'ini': - testdir.makeini(''' + if config_mode == "ini": + testdir.makeini( + """ [pytest] doctest_optionflags = ALLOW_BYTES - ''') - comment = '' + """ + ) + comment = "" else: - comment = '#doctest: +ALLOW_BYTES' + comment = "#doctest: +ALLOW_BYTES" - testdir.maketxtfile(test_doc=""" + testdir.maketxtfile( + test_doc=""" >>> b'foo' {comment} 'foo' - """.format(comment=comment)) - testdir.makepyfile(foo=""" + """.format( + comment=comment + ) + ) + testdir.makepyfile( + foo=""" def foo(): ''' >>> b'foo' {comment} 'foo' ''' - """.format(comment=comment)) + """.format( + comment=comment + ) + ) reprec = testdir.inline_run("--doctest-modules") reprec.assertoutcome(passed=2) @@ -681,10 +791,12 @@ class TestLiterals(object): the ALLOW_UNICODE option is not used. The same test should pass in Python 3. """ - testdir.maketxtfile(test_doc=""" + testdir.maketxtfile( + test_doc=""" >>> b'12'.decode('ascii') '12' - """) + """ + ) reprec = testdir.inline_run() passed = int(sys.version_info[0] >= 3) reprec.assertoutcome(passed=passed, failed=int(not passed)) @@ -694,10 +806,12 @@ class TestLiterals(object): the ALLOW_BYTES option is not used. The same test should pass in Python 2 (#1287). """ - testdir.maketxtfile(test_doc=""" + testdir.maketxtfile( + test_doc=""" >>> b'foo' 'foo' - """) + """ + ) reprec = testdir.inline_run() passed = int(sys.version_info[0] == 2) reprec.assertoutcome(passed=passed, failed=int(not passed)) @@ -709,55 +823,63 @@ class TestDoctestSkips(object): the tests should be SKIPPED rather than PASSED. (#957) """ - @pytest.fixture(params=['text', 'module']) + @pytest.fixture(params=["text", "module"]) def makedoctest(self, testdir, request): + def makeit(doctest): mode = request.param - if mode == 'text': + if mode == "text": testdir.maketxtfile(doctest) else: - assert mode == 'module' + assert mode == "module" testdir.makepyfile('"""\n%s"""' % doctest) return makeit def test_one_skipped(self, testdir, makedoctest): - makedoctest(""" + makedoctest( + """ >>> 1 + 1 # doctest: +SKIP 2 >>> 2 + 2 4 - """) + """ + ) reprec = testdir.inline_run("--doctest-modules") reprec.assertoutcome(passed=1) def test_one_skipped_failed(self, testdir, makedoctest): - makedoctest(""" + makedoctest( + """ >>> 1 + 1 # doctest: +SKIP 2 >>> 2 + 2 200 - """) + """ + ) reprec = testdir.inline_run("--doctest-modules") reprec.assertoutcome(failed=1) def test_all_skipped(self, testdir, makedoctest): - makedoctest(""" + makedoctest( + """ >>> 1 + 1 # doctest: +SKIP 2 >>> 2 + 2 # doctest: +SKIP 200 - """) + """ + ) reprec = testdir.inline_run("--doctest-modules") reprec.assertoutcome(skipped=1) def test_vacuous_all_skipped(self, testdir, makedoctest): - makedoctest('') + makedoctest("") reprec = testdir.inline_run("--doctest-modules") reprec.assertoutcome(passed=0, skipped=0) def test_continue_on_failure(self, testdir): - testdir.maketxtfile(test_something=""" + testdir.maketxtfile( + test_something=""" >>> i = 5 >>> def foo(): ... raise ValueError('error1') @@ -766,28 +888,28 @@ class TestDoctestSkips(object): >>> i + 2 7 >>> i + 1 - """) + """ + ) result = testdir.runpytest("--doctest-modules", "--doctest-continue-on-failure") result.assert_outcomes(passed=0, failed=1) # The lines that contains the failure are 4, 5, and 8. The first one # is a stack trace and the other two are mismatches. - result.stdout.fnmatch_lines([ - "*4: UnexpectedException*", - "*5: DocTestFailure*", - "*8: DocTestFailure*", - ]) + result.stdout.fnmatch_lines( + ["*4: UnexpectedException*", "*5: DocTestFailure*", "*8: DocTestFailure*"] + ) class TestDoctestAutoUseFixtures(object): - SCOPES = ['module', 'session', 'class', 'function'] + SCOPES = ["module", "session", "class", "function"] def test_doctest_module_session_fixture(self, testdir): """Test that session fixtures are initialized for doctest modules (#768) """ # session fixture which changes some global data, which will # be accessed by doctests in a module - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest import sys @@ -797,8 +919,10 @@ class TestDoctestAutoUseFixtures(object): sys.pytest_session_data = 1 yield del sys.pytest_session_data - """) - testdir.makepyfile(foo=""" + """ + ) + testdir.makepyfile( + foo=""" import sys def foo(): @@ -810,24 +934,30 @@ class TestDoctestAutoUseFixtures(object): ''' >>> assert sys.pytest_session_data == 1 ''' - """) + """ + ) result = testdir.runpytest("--doctest-modules") - result.stdout.fnmatch_lines('*2 passed*') + result.stdout.fnmatch_lines("*2 passed*") - @pytest.mark.parametrize('scope', SCOPES) - @pytest.mark.parametrize('enable_doctest', [True, False]) + @pytest.mark.parametrize("scope", SCOPES) + @pytest.mark.parametrize("enable_doctest", [True, False]) def test_fixture_scopes(self, testdir, scope, enable_doctest): """Test that auto-use fixtures work properly with doctest modules. See #1057 and #1100. """ - testdir.makeconftest(''' + testdir.makeconftest( + """ import pytest @pytest.fixture(autouse=True, scope="{scope}") def auto(request): return 99 - '''.format(scope=scope)) - testdir.makepyfile(test_1=''' + """.format( + scope=scope + ) + ) + testdir.makepyfile( + test_1=''' def test_foo(): """ >>> getfixture('auto') + 1 @@ -835,47 +965,58 @@ class TestDoctestAutoUseFixtures(object): """ def test_bar(): assert 1 - ''') - params = ('--doctest-modules',) if enable_doctest else () + ''' + ) + params = ("--doctest-modules",) if enable_doctest else () passes = 3 if enable_doctest else 2 result = testdir.runpytest(*params) - result.stdout.fnmatch_lines(['*=== %d passed in *' % passes]) + result.stdout.fnmatch_lines(["*=== %d passed in *" % passes]) - @pytest.mark.parametrize('scope', SCOPES) - @pytest.mark.parametrize('autouse', [True, False]) - @pytest.mark.parametrize('use_fixture_in_doctest', [True, False]) - def test_fixture_module_doctest_scopes(self, testdir, scope, autouse, - use_fixture_in_doctest): + @pytest.mark.parametrize("scope", SCOPES) + @pytest.mark.parametrize("autouse", [True, False]) + @pytest.mark.parametrize("use_fixture_in_doctest", [True, False]) + def test_fixture_module_doctest_scopes( + self, testdir, scope, autouse, use_fixture_in_doctest + ): """Test that auto-use fixtures work properly with doctest files. See #1057 and #1100. """ - testdir.makeconftest(''' + testdir.makeconftest( + """ import pytest @pytest.fixture(autouse={autouse}, scope="{scope}") def auto(request): return 99 - '''.format(scope=scope, autouse=autouse)) + """.format( + scope=scope, autouse=autouse + ) + ) if use_fixture_in_doctest: - testdir.maketxtfile(test_doc=""" + testdir.maketxtfile( + test_doc=""" >>> getfixture('auto') 99 - """) + """ + ) else: - testdir.maketxtfile(test_doc=""" + testdir.maketxtfile( + test_doc=""" >>> 1 + 1 2 - """) - result = testdir.runpytest('--doctest-modules') - assert 'FAILURES' not in str(result.stdout.str()) - result.stdout.fnmatch_lines(['*=== 1 passed in *']) + """ + ) + result = testdir.runpytest("--doctest-modules") + assert "FAILURES" not in str(result.stdout.str()) + result.stdout.fnmatch_lines(["*=== 1 passed in *"]) - @pytest.mark.parametrize('scope', SCOPES) + @pytest.mark.parametrize("scope", SCOPES) def test_auto_use_request_attributes(self, testdir, scope): """Check that all attributes of a request in an autouse fixture behave as expected when requested for a doctest item. """ - testdir.makeconftest(''' + testdir.makeconftest( + """ import pytest @pytest.fixture(autouse=True, scope="{scope}") @@ -887,69 +1028,88 @@ class TestDoctestAutoUseFixtures(object): if "{scope}" == 'function': assert request.function is None return 99 - '''.format(scope=scope)) - testdir.maketxtfile(test_doc=""" + """.format( + scope=scope + ) + ) + testdir.maketxtfile( + test_doc=""" >>> 1 + 1 2 - """) - result = testdir.runpytest('--doctest-modules') - assert 'FAILURES' not in str(result.stdout.str()) - result.stdout.fnmatch_lines(['*=== 1 passed in *']) + """ + ) + result = testdir.runpytest("--doctest-modules") + assert "FAILURES" not in str(result.stdout.str()) + result.stdout.fnmatch_lines(["*=== 1 passed in *"]) class TestDoctestNamespaceFixture(object): - SCOPES = ['module', 'session', 'class', 'function'] + SCOPES = ["module", "session", "class", "function"] - @pytest.mark.parametrize('scope', SCOPES) + @pytest.mark.parametrize("scope", SCOPES) def test_namespace_doctestfile(self, testdir, scope): """ Check that inserting something into the namespace works in a simple text file doctest """ - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest import contextlib @pytest.fixture(autouse=True, scope="{scope}") def add_contextlib(doctest_namespace): doctest_namespace['cl'] = contextlib - """.format(scope=scope)) - p = testdir.maketxtfile(""" + """.format( + scope=scope + ) + ) + p = testdir.maketxtfile( + """ >>> print(cl.__name__) contextlib - """) + """ + ) reprec = testdir.inline_run(p) reprec.assertoutcome(passed=1) - @pytest.mark.parametrize('scope', SCOPES) + @pytest.mark.parametrize("scope", SCOPES) def test_namespace_pyfile(self, testdir, scope): """ Check that inserting something into the namespace works in a simple Python file docstring doctest """ - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest import contextlib @pytest.fixture(autouse=True, scope="{scope}") def add_contextlib(doctest_namespace): doctest_namespace['cl'] = contextlib - """.format(scope=scope)) - p = testdir.makepyfile(""" + """.format( + scope=scope + ) + ) + p = testdir.makepyfile( + """ def foo(): ''' >>> print(cl.__name__) contextlib ''' - """) + """ + ) reprec = testdir.inline_run(p, "--doctest-modules") reprec.assertoutcome(passed=1) class TestDoctestReportingOption(object): + def _run_doctest_report(self, testdir, format): - testdir.makepyfile(""" + testdir.makepyfile( + """ def foo(): ''' >>> foo() @@ -962,63 +1122,69 @@ class TestDoctestReportingOption(object): '0 1 4\\n' '1 2 5\\n' '2 3 6') - """) + """ + ) return testdir.runpytest("--doctest-modules", "--doctest-report", format) - @pytest.mark.parametrize('format', ['udiff', 'UDIFF', 'uDiFf']) + @pytest.mark.parametrize("format", ["udiff", "UDIFF", "uDiFf"]) def test_doctest_report_udiff(self, testdir, format): result = self._run_doctest_report(testdir, format) - result.stdout.fnmatch_lines([ - ' 0 1 4', - ' -1 2 4', - ' +1 2 5', - ' 2 3 6', - ]) + result.stdout.fnmatch_lines( + [" 0 1 4", " -1 2 4", " +1 2 5", " 2 3 6"] + ) def test_doctest_report_cdiff(self, testdir): - result = self._run_doctest_report(testdir, 'cdiff') - result.stdout.fnmatch_lines([ - ' a b', - ' 0 1 4', - ' ! 1 2 4', - ' 2 3 6', - ' --- 1,4 ----', - ' a b', - ' 0 1 4', - ' ! 1 2 5', - ' 2 3 6', - ]) + result = self._run_doctest_report(testdir, "cdiff") + result.stdout.fnmatch_lines( + [ + " a b", + " 0 1 4", + " ! 1 2 4", + " 2 3 6", + " --- 1,4 ----", + " a b", + " 0 1 4", + " ! 1 2 5", + " 2 3 6", + ] + ) def test_doctest_report_ndiff(self, testdir): - result = self._run_doctest_report(testdir, 'ndiff') - result.stdout.fnmatch_lines([ - ' a b', - ' 0 1 4', - ' - 1 2 4', - ' ? ^', - ' + 1 2 5', - ' ? ^', - ' 2 3 6', - ]) + result = self._run_doctest_report(testdir, "ndiff") + result.stdout.fnmatch_lines( + [ + " a b", + " 0 1 4", + " - 1 2 4", + " ? ^", + " + 1 2 5", + " ? ^", + " 2 3 6", + ] + ) - @pytest.mark.parametrize('format', ['none', 'only_first_failure']) + @pytest.mark.parametrize("format", ["none", "only_first_failure"]) def test_doctest_report_none_or_only_first_failure(self, testdir, format): result = self._run_doctest_report(testdir, format) - result.stdout.fnmatch_lines([ - 'Expected:', - ' a b', - ' 0 1 4', - ' 1 2 4', - ' 2 3 6', - 'Got:', - ' a b', - ' 0 1 4', - ' 1 2 5', - ' 2 3 6', - ]) + result.stdout.fnmatch_lines( + [ + "Expected:", + " a b", + " 0 1 4", + " 1 2 4", + " 2 3 6", + "Got:", + " a b", + " 0 1 4", + " 1 2 5", + " 2 3 6", + ] + ) def test_doctest_report_invalid(self, testdir): - result = self._run_doctest_report(testdir, 'obviously_invalid_format') - result.stderr.fnmatch_lines([ - "*error: argument --doctest-report: invalid choice: 'obviously_invalid_format' (choose from*" - ]) + result = self._run_doctest_report(testdir, "obviously_invalid_format") + result.stderr.fnmatch_lines( + [ + "*error: argument --doctest-report: invalid choice: 'obviously_invalid_format' (choose from*" + ] + ) diff --git a/testing/test_entry_points.py b/testing/test_entry_points.py index 6ca68b481..8f734778f 100644 --- a/testing/test_entry_points.py +++ b/testing/test_entry_points.py @@ -4,11 +4,11 @@ import pkg_resources import pytest -@pytest.mark.parametrize("entrypoint", ['py.test', 'pytest']) +@pytest.mark.parametrize("entrypoint", ["py.test", "pytest"]) def test_entry_point_exist(entrypoint): - assert entrypoint in pkg_resources.get_entry_map('pytest')['console_scripts'] + assert entrypoint in pkg_resources.get_entry_map("pytest")["console_scripts"] def test_pytest_entry_points_are_identical(): - entryMap = pkg_resources.get_entry_map('pytest')['console_scripts'] - assert entryMap['pytest'].module_name == entryMap['py.test'].module_name + entryMap = pkg_resources.get_entry_map("pytest")["console_scripts"] + assert entryMap["pytest"].module_name == entryMap["py.test"].module_name diff --git a/testing/test_helpconfig.py b/testing/test_helpconfig.py index 845005a05..b5424235b 100644 --- a/testing/test_helpconfig.py +++ b/testing/test_helpconfig.py @@ -7,57 +7,53 @@ def test_version(testdir, pytestconfig): result = testdir.runpytest("--version") assert result.ret == 0 # p = py.path.local(py.__file__).dirpath() - result.stderr.fnmatch_lines([ - '*pytest*%s*imported from*' % (pytest.__version__, ) - ]) + result.stderr.fnmatch_lines(["*pytest*%s*imported from*" % (pytest.__version__,)]) if pytestconfig.pluginmanager.list_plugin_distinfo(): - result.stderr.fnmatch_lines([ - "*setuptools registered plugins:", - "*at*", - ]) + result.stderr.fnmatch_lines(["*setuptools registered plugins:", "*at*"]) def test_help(testdir): result = testdir.runpytest("--help") assert result.ret == 0 - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *-v*verbose* *setup.cfg* *minversion* *to see*markers*pytest --markers* *to see*fixtures*pytest --fixtures* - """) + """ + ) def test_hookvalidation_unknown(testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_hello(xyz): pass - """) + """ + ) result = testdir.runpytest() assert result.ret != 0 - result.stdout.fnmatch_lines([ - '*unknown hook*pytest_hello*' - ]) + result.stdout.fnmatch_lines(["*unknown hook*pytest_hello*"]) def test_hookvalidation_optional(testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest @pytest.hookimpl(optionalhook=True) def pytest_hello(xyz): pass - """) + """ + ) result = testdir.runpytest() assert result.ret == EXIT_NOTESTSCOLLECTED def test_traceconfig(testdir): result = testdir.runpytest("--traceconfig") - result.stdout.fnmatch_lines([ - "*using*pytest*py*", - "*active plugins*", - ]) + result.stdout.fnmatch_lines(["*using*pytest*py*", "*active plugins*"]) def test_debug(testdir, monkeypatch): @@ -71,7 +67,6 @@ def test_PYTEST_DEBUG(testdir, monkeypatch): monkeypatch.setenv("PYTEST_DEBUG", "1") result = testdir.runpytest_subprocess() assert result.ret == EXIT_NOTESTSCOLLECTED - result.stderr.fnmatch_lines([ - "*pytest_plugin_registered*", - "*manager*PluginManager*" - ]) + result.stderr.fnmatch_lines( + ["*pytest_plugin_registered*", "*manager*PluginManager*"] + ) diff --git a/testing/test_junitxml.py b/testing/test_junitxml.py index 889a2f007..d0be5f267 100644 --- a/testing/test_junitxml.py +++ b/testing/test_junitxml.py @@ -23,12 +23,13 @@ def assert_attr(node, **kwargs): if anode is not None: return anode.value - expected = dict((name, str(value)) for name, value in kwargs.items()) - on_node = dict((name, nodeval(node, name)) for name in expected) + expected = {name: str(value) for name, value in kwargs.items()} + on_node = {name: nodeval(node, name) for name in expected} assert on_node == expected class DomNode(object): + def __init__(self, dom): self.__node = dom @@ -80,8 +81,10 @@ class DomNode(object): class TestPython(object): + def test_summing_simple(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def test_pass(): pass @@ -95,14 +98,16 @@ class TestPython(object): @pytest.mark.xfail def test_xpass(): assert 1 - """) + """ + ) result, dom = runandparse(testdir) assert result.ret node = dom.find_first_by_tag("testsuite") node.assert_attr(name="pytest", errors=0, failures=1, skips=2, tests=5) def test_summing_simple_with_errors(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture def fixture(): @@ -119,14 +124,16 @@ class TestPython(object): @pytest.mark.xfail(strict=True) def test_xpass(): assert True - """) + """ + ) result, dom = runandparse(testdir) assert result.ret node = dom.find_first_by_tag("testsuite") node.assert_attr(name="pytest", errors=1, failures=2, skips=1, tests=5) def test_timing_function(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import time, pytest def setup_module(): time.sleep(0.01) @@ -134,7 +141,8 @@ class TestPython(object): time.sleep(0.01) def test_sleep(): time.sleep(0.01) - """) + """ + ) result, dom = runandparse(testdir) node = dom.find_first_by_tag("testsuite") tnode = node.find_first_by_tag("testcase") @@ -142,7 +150,8 @@ class TestPython(object): assert round(float(val), 2) >= 0.03 def test_setup_error(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture @@ -150,7 +159,8 @@ class TestPython(object): raise ValueError() def test_function(arg): pass - """) + """ + ) result, dom = runandparse(testdir) assert result.ret node = dom.find_first_by_tag("testsuite") @@ -160,13 +170,15 @@ class TestPython(object): file="test_setup_error.py", line="5", classname="test_setup_error", - name="test_function") + name="test_function", + ) fnode = tnode.find_first_by_tag("error") fnode.assert_attr(message="test setup failure") assert "ValueError" in fnode.toxml() def test_teardown_error(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture @@ -175,7 +187,8 @@ class TestPython(object): raise ValueError() def test_function(arg): pass - """) + """ + ) result, dom = runandparse(testdir) assert result.ret node = dom.find_first_by_tag("testsuite") @@ -184,13 +197,15 @@ class TestPython(object): file="test_teardown_error.py", line="6", classname="test_teardown_error", - name="test_function") + name="test_function", + ) fnode = tnode.find_first_by_tag("error") fnode.assert_attr(message="test teardown failure") assert "ValueError" in fnode.toxml() def test_call_failure_teardown_error(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture @@ -199,7 +214,8 @@ class TestPython(object): raise Exception("Teardown Exception") def test_function(arg): raise Exception("Call Exception") - """) + """ + ) result, dom = runandparse(testdir) assert result.ret node = dom.find_first_by_tag("testsuite") @@ -213,11 +229,13 @@ class TestPython(object): snode.assert_attr(message="test teardown failure") def test_skip_contains_name_reason(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def test_skip(): pytest.skip("hello23") - """) + """ + ) result, dom = runandparse(testdir) assert result.ret == 0 node = dom.find_first_by_tag("testsuite") @@ -227,17 +245,20 @@ class TestPython(object): file="test_skip_contains_name_reason.py", line="1", classname="test_skip_contains_name_reason", - name="test_skip") + name="test_skip", + ) snode = tnode.find_first_by_tag("skipped") - snode.assert_attr(type="pytest.skip", message="hello23", ) + snode.assert_attr(type="pytest.skip", message="hello23") def test_mark_skip_contains_name_reason(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.skip(reason="hello24") def test_skip(): assert True - """) + """ + ) result, dom = runandparse(testdir) assert result.ret == 0 node = dom.find_first_by_tag("testsuite") @@ -247,18 +268,21 @@ class TestPython(object): file="test_mark_skip_contains_name_reason.py", line="1", classname="test_mark_skip_contains_name_reason", - name="test_skip") + name="test_skip", + ) snode = tnode.find_first_by_tag("skipped") - snode.assert_attr(type="pytest.skip", message="hello24", ) + snode.assert_attr(type="pytest.skip", message="hello24") def test_mark_skipif_contains_name_reason(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest GLOBAL_CONDITION = True @pytest.mark.skipif(GLOBAL_CONDITION, reason="hello25") def test_skip(): assert True - """) + """ + ) result, dom = runandparse(testdir) assert result.ret == 0 node = dom.find_first_by_tag("testsuite") @@ -268,28 +292,33 @@ class TestPython(object): file="test_mark_skipif_contains_name_reason.py", line="2", classname="test_mark_skipif_contains_name_reason", - name="test_skip") + name="test_skip", + ) snode = tnode.find_first_by_tag("skipped") - snode.assert_attr(type="pytest.skip", message="hello25", ) + snode.assert_attr(type="pytest.skip", message="hello25") def test_mark_skip_doesnt_capture_output(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.skip(reason="foo") def test_skip(): print("bar!") - """) + """ + ) result, dom = runandparse(testdir) assert result.ret == 0 node_xml = dom.find_first_by_tag("testsuite").toxml() assert "bar!" not in node_xml def test_classname_instance(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ class TestClass(object): def test_method(self): assert 0 - """) + """ + ) result, dom = runandparse(testdir) assert result.ret node = dom.find_first_by_tag("testsuite") @@ -299,7 +328,8 @@ class TestPython(object): file="test_classname_instance.py", line="1", classname="test_classname_instance.TestClass", - name="test_method") + name="test_method", + ) def test_classname_nested_dir(self, testdir): p = testdir.tmpdir.ensure("sub", "test_hello.py") @@ -313,7 +343,8 @@ class TestPython(object): file=os.path.join("sub", "test_hello.py"), line="0", classname="sub.test_hello", - name="test_func") + name="test_func", + ) def test_internal_error(self, testdir): testdir.makeconftest("def pytest_runtest_protocol(): 0 / 0") @@ -328,9 +359,10 @@ class TestPython(object): fnode.assert_attr(message="internal error") assert "Division" in fnode.toxml() - @pytest.mark.parametrize('junit_logging', ['no', 'system-out', 'system-err']) + @pytest.mark.parametrize("junit_logging", ["no", "system-out", "system-err"]) def test_failure_function(self, testdir, junit_logging): - testdir.makepyfile(""" + testdir.makepyfile( + """ import logging import sys @@ -340,9 +372,10 @@ class TestPython(object): logging.info('info msg') logging.warning('warning msg') raise ValueError(42) - """) + """ + ) - result, dom = runandparse(testdir, '-o', 'junit_logging=%s' % junit_logging) + result, dom = runandparse(testdir, "-o", "junit_logging=%s" % junit_logging) assert result.ret node = dom.find_first_by_tag("testsuite") node.assert_attr(failures=1, tests=1) @@ -351,7 +384,8 @@ class TestPython(object): file="test_failure_function.py", line="3", classname="test_failure_function", - name="test_fail") + name="test_fail", + ) fnode = tnode.find_first_by_tag("failure") fnode.assert_attr(message="ValueError: 42") assert "ValueError" in fnode.toxml() @@ -364,22 +398,24 @@ class TestPython(object): assert "hello-stderr" in systemerr.toxml() assert "info msg" not in systemerr.toxml() - if junit_logging == 'system-out': + if junit_logging == "system-out": assert "warning msg" in systemout.toxml() assert "warning msg" not in systemerr.toxml() - elif junit_logging == 'system-err': + elif junit_logging == "system-err": assert "warning msg" not in systemout.toxml() assert "warning msg" in systemerr.toxml() - elif junit_logging == 'no': + elif junit_logging == "no": assert "warning msg" not in systemout.toxml() assert "warning msg" not in systemerr.toxml() def test_failure_verbose_message(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import sys def test_fail(): assert 0, "An error" - """) + """ + ) result, dom = runandparse(testdir) node = dom.find_first_by_tag("testsuite") @@ -388,13 +424,15 @@ class TestPython(object): fnode.assert_attr(message="AssertionError: An error assert 0") def test_failure_escape(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.parametrize('arg1', "<&'", ids="<&'") def test_func(arg1): print(arg1) assert 0 - """) + """ + ) result, dom = runandparse(testdir) assert result.ret node = dom.find_first_by_tag("testsuite") @@ -407,19 +445,22 @@ class TestPython(object): file="test_failure_escape.py", line="1", classname="test_failure_escape", - name="test_func[%s]" % char) - sysout = tnode.find_first_by_tag('system-out') + name="test_func[%s]" % char, + ) + sysout = tnode.find_first_by_tag("system-out") text = sysout.text - assert text == '%s\n' % char + assert text == "%s\n" % char def test_junit_prefixing(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_func(): assert 0 class TestHello(object): def test_hello(self): pass - """) + """ + ) result, dom = runandparse(testdir, "--junitprefix=xyz") assert result.ret node = dom.find_first_by_tag("testsuite") @@ -429,21 +470,24 @@ class TestPython(object): file="test_junit_prefixing.py", line="0", classname="xyz.test_junit_prefixing", - name="test_func") + name="test_func", + ) tnode = node.find_nth_by_tag("testcase", 1) tnode.assert_attr( file="test_junit_prefixing.py", line="3", - classname="xyz.test_junit_prefixing." - "TestHello", - name="test_hello") + classname="xyz.test_junit_prefixing." "TestHello", + name="test_hello", + ) def test_xfailure_function(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def test_xfail(): pytest.xfail("42") - """) + """ + ) result, dom = runandparse(testdir) assert not result.ret node = dom.find_first_by_tag("testsuite") @@ -453,13 +497,15 @@ class TestPython(object): file="test_xfailure_function.py", line="1", classname="test_xfailure_function", - name="test_xfail") + name="test_xfail", + ) fnode = tnode.find_first_by_tag("skipped") fnode.assert_attr(message="expected test failure") # assert "ValueError" in fnode.toxml() def test_xfail_captures_output_once(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import sys import pytest @@ -468,20 +514,23 @@ class TestPython(object): sys.stdout.write('XFAIL This is stdout') sys.stderr.write('XFAIL This is stderr') assert 0 - """) + """ + ) result, dom = runandparse(testdir) node = dom.find_first_by_tag("testsuite") tnode = node.find_first_by_tag("testcase") - assert len(tnode.find_by_tag('system-err')) == 1 - assert len(tnode.find_by_tag('system-out')) == 1 + assert len(tnode.find_by_tag("system-err")) == 1 + assert len(tnode.find_by_tag("system-out")) == 1 def test_xfailure_xpass(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.xfail def test_xpass(): pass - """) + """ + ) result, dom = runandparse(testdir) # assert result.ret node = dom.find_first_by_tag("testsuite") @@ -491,15 +540,18 @@ class TestPython(object): file="test_xfailure_xpass.py", line="1", classname="test_xfailure_xpass", - name="test_xpass") + name="test_xpass", + ) def test_xfailure_xpass_strict(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.xfail(strict=True, reason="This needs to fail!") def test_xpass(): pass - """) + """ + ) result, dom = runandparse(testdir) # assert result.ret node = dom.find_first_by_tag("testsuite") @@ -509,7 +561,8 @@ class TestPython(object): file="test_xfailure_xpass_strict.py", line="1", classname="test_xfailure_xpass_strict", - name="test_xpass") + name="test_xpass", + ) fnode = tnode.find_first_by_tag("failure") fnode.assert_attr(message="[XPASS(strict)] This needs to fail!") @@ -520,22 +573,23 @@ class TestPython(object): node = dom.find_first_by_tag("testsuite") node.assert_attr(errors=1, tests=1) tnode = node.find_first_by_tag("testcase") - tnode.assert_attr( - file="test_collect_error.py", - name="test_collect_error") + tnode.assert_attr(file="test_collect_error.py", name="test_collect_error") assert tnode["line"] is None fnode = tnode.find_first_by_tag("error") fnode.assert_attr(message="collection failure") assert "SyntaxError" in fnode.toxml() def test_unicode(self, testdir): - value = 'hx\xc4\x85\xc4\x87\n' - testdir.makepyfile(""" + value = "hx\xc4\x85\xc4\x87\n" + testdir.makepyfile( + """ # coding: latin1 def test_hello(): print (%r) assert 0 - """ % value) + """ + % value + ) result, dom = runandparse(testdir) assert result.ret == 1 tnode = dom.find_first_by_tag("testcase") @@ -545,22 +599,26 @@ class TestPython(object): def test_assertion_binchars(self, testdir): """this test did fail when the escaping wasnt strict""" - testdir.makepyfile(""" + testdir.makepyfile( + """ M1 = '\x01\x02\x03\x04' M2 = '\x01\x02\x03\x05' def test_str_compare(): assert M1 == M2 - """) + """ + ) result, dom = runandparse(testdir) print(dom.toxml()) def test_pass_captures_stdout(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_pass(): print('hello-stdout') - """) + """ + ) result, dom = runandparse(testdir) node = dom.find_first_by_tag("testsuite") pnode = node.find_first_by_tag("testcase") @@ -568,11 +626,13 @@ class TestPython(object): assert "hello-stdout" in systemout.toxml() def test_pass_captures_stderr(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import sys def test_pass(): sys.stderr.write('hello-stderr') - """) + """ + ) result, dom = runandparse(testdir) node = dom.find_first_by_tag("testsuite") pnode = node.find_first_by_tag("testcase") @@ -580,7 +640,8 @@ class TestPython(object): assert "hello-stderr" in systemout.toxml() def test_setup_error_captures_stdout(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture @@ -589,7 +650,8 @@ class TestPython(object): raise ValueError() def test_function(arg): pass - """) + """ + ) result, dom = runandparse(testdir) node = dom.find_first_by_tag("testsuite") pnode = node.find_first_by_tag("testcase") @@ -597,7 +659,8 @@ class TestPython(object): assert "hello-stdout" in systemout.toxml() def test_setup_error_captures_stderr(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import sys import pytest @@ -607,7 +670,8 @@ class TestPython(object): raise ValueError() def test_function(arg): pass - """) + """ + ) result, dom = runandparse(testdir) node = dom.find_first_by_tag("testsuite") pnode = node.find_first_by_tag("testcase") @@ -615,7 +679,8 @@ class TestPython(object): assert "hello-stderr" in systemout.toxml() def test_avoid_double_stdout(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import sys import pytest @@ -626,7 +691,8 @@ class TestPython(object): raise ValueError() def test_function(arg): sys.stdout.write('hello-stdout call') - """) + """ + ) result, dom = runandparse(testdir) node = dom.find_first_by_tag("testsuite") pnode = node.find_first_by_tag("testcase") @@ -637,8 +703,8 @@ class TestPython(object): def test_mangle_test_address(): from _pytest.junitxml import mangle_test_address - address = '::'.join( - ["a/my.py.thing.py", "Class", "()", "method", "[a-1-::]"]) + + address = "::".join(["a/my.py.thing.py", "Class", "()", "method", "[a-1-::]"]) newnames = mangle_test_address(address) assert newnames == ["a.my.py.thing", "Class", "method", "[a-1-::]"] @@ -647,6 +713,7 @@ def test_dont_configure_on_slaves(tmpdir): gotten = [] class FakeConfig(object): + def __init__(self): self.pluginmanager = self self.option = self @@ -656,11 +723,12 @@ def test_dont_configure_on_slaves(tmpdir): junitprefix = None # XXX: shouldnt need tmpdir ? - xmlpath = str(tmpdir.join('junix.xml')) + xmlpath = str(tmpdir.join("junix.xml")) register = gotten.append fake_config = FakeConfig() from _pytest import junitxml + junitxml.pytest_configure(fake_config) assert len(gotten) == 1 FakeConfig.slaveinput = None @@ -669,8 +737,10 @@ def test_dont_configure_on_slaves(tmpdir): class TestNonPython(object): + def test_summing_simple(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest def pytest_collect_file(path, parent): if path.ext == ".xyz": @@ -683,7 +753,8 @@ class TestNonPython(object): raise ValueError(42) def repr_failure(self, excinfo): return "custom item runtest failed" - """) + """ + ) testdir.tmpdir.join("myfile.xyz").write("hello") result, dom = runandparse(testdir) assert result.ret @@ -698,33 +769,37 @@ class TestNonPython(object): def test_nullbyte(testdir): # A null byte can not occur in XML (see section 2.2 of the spec) - testdir.makepyfile(""" + testdir.makepyfile( + """ import sys def test_print_nullbyte(): sys.stdout.write('Here the null -->' + chr(0) + '<--') sys.stdout.write('In repr form -->' + repr(chr(0)) + '<--') assert False - """) - xmlf = testdir.tmpdir.join('junit.xml') - testdir.runpytest('--junitxml=%s' % xmlf) + """ + ) + xmlf = testdir.tmpdir.join("junit.xml") + testdir.runpytest("--junitxml=%s" % xmlf) text = xmlf.read() - assert '\x00' not in text - assert '#x00' in text + assert "\x00" not in text + assert "#x00" in text def test_nullbyte_replace(testdir): # Check if the null byte gets replaced - testdir.makepyfile(""" + testdir.makepyfile( + """ import sys def test_print_nullbyte(): sys.stdout.write('Here the null -->' + chr(0) + '<--') sys.stdout.write('In repr form -->' + repr(chr(0)) + '<--') assert False - """) - xmlf = testdir.tmpdir.join('junit.xml') - testdir.runpytest('--junitxml=%s' % xmlf) + """ + ) + xmlf = testdir.tmpdir.join("junit.xml") + testdir.runpytest("--junitxml=%s" % xmlf) text = xmlf.read() - assert '#x0' in text + assert "#x0" in text def test_invalid_xml_escape(): @@ -741,9 +816,20 @@ def test_invalid_xml_escape(): unichr(65) except NameError: unichr = chr - invalid = (0x00, 0x1, 0xB, 0xC, 0xE, 0x19, 27, # issue #126 - 0xD800, 0xDFFF, 0xFFFE, 0x0FFFF) # , 0x110000) - valid = (0x9, 0xA, 0x20, ) + invalid = ( + 0x00, + 0x1, + 0xB, + 0xC, + 0xE, + 0x19, + 27, # issue #126 + 0xD800, + 0xDFFF, + 0xFFFE, + 0x0FFFF, + ) # , 0x110000) + valid = (0x9, 0xA, 0x20) # 0xD, 0xD7FF, 0xE000, 0xFFFD, 0x10000, 0x10FFFF) from _pytest.junitxml import bin_xml_escape @@ -751,34 +837,36 @@ def test_invalid_xml_escape(): for i in invalid: got = bin_xml_escape(unichr(i)).uniobj if i <= 0xFF: - expected = '#x%02X' % i + expected = "#x%02X" % i else: - expected = '#x%04X' % i + expected = "#x%04X" % i assert got == expected for i in valid: assert chr(i) == bin_xml_escape(unichr(i)).uniobj def test_logxml_path_expansion(tmpdir, monkeypatch): - home_tilde = py.path.local(os.path.expanduser('~')).join('test.xml') + home_tilde = py.path.local(os.path.expanduser("~")).join("test.xml") - xml_tilde = LogXML('~%stest.xml' % tmpdir.sep, None) + xml_tilde = LogXML("~%stest.xml" % tmpdir.sep, None) assert xml_tilde.logfile == home_tilde # this is here for when $HOME is not set correct monkeypatch.setenv("HOME", tmpdir) - home_var = os.path.normpath(os.path.expandvars('$HOME/test.xml')) + home_var = os.path.normpath(os.path.expandvars("$HOME/test.xml")) - xml_var = LogXML('$HOME%stest.xml' % tmpdir.sep, None) + xml_var = LogXML("$HOME%stest.xml" % tmpdir.sep, None) assert xml_var.logfile == home_var def test_logxml_changingdir(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_func(): import os os.chdir("a") - """) + """ + ) testdir.tmpdir.mkdir("a") result = testdir.runpytest("--junitxml=a/x.xml") assert result.ret == 0 @@ -787,10 +875,12 @@ def test_logxml_changingdir(testdir): def test_logxml_makedir(testdir): """--junitxml should automatically create directories for the xml file""" - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_pass(): pass - """) + """ + ) result = testdir.runpytest("--junitxml=path/to/results.xml") assert result.ret == 0 assert testdir.tmpdir.join("path/to/results.xml").check() @@ -803,12 +893,14 @@ def test_logxml_check_isdir(testdir): def test_escaped_parametrized_names_xml(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.parametrize('char', [u"\\x00"]) def test_func(char): assert char - """) + """ + ) result, dom = runandparse(testdir) assert result.ret == 0 node = dom.find_first_by_tag("testcase") @@ -816,33 +908,36 @@ def test_escaped_parametrized_names_xml(testdir): def test_double_colon_split_function_issue469(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.parametrize('param', ["double::colon"]) def test_func(param): pass - """) + """ + ) result, dom = runandparse(testdir) assert result.ret == 0 node = dom.find_first_by_tag("testcase") node.assert_attr(classname="test_double_colon_split_function_issue469") - node.assert_attr(name='test_func[double::colon]') + node.assert_attr(name="test_func[double::colon]") def test_double_colon_split_method_issue469(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest class TestClass(object): @pytest.mark.parametrize('param', ["double::colon"]) def test_func(self, param): pass - """) + """ + ) result, dom = runandparse(testdir) assert result.ret == 0 node = dom.find_first_by_tag("testcase") - node.assert_attr( - classname="test_double_colon_split_method_issue469.TestClass") - node.assert_attr(name='test_func[double::colon]') + node.assert_attr(classname="test_double_colon_split_method_issue469.TestClass") + node.assert_attr(name="test_func[double::colon]") def test_unicode_issue368(testdir): @@ -855,7 +950,7 @@ def test_unicode_issue368(testdir): longrepr = ustr sections = [] nodeid = "something" - location = 'tests/filename.py', 42, 'TestClass.method' + location = "tests/filename.py", 42, "TestClass.method" test_report = Report() @@ -876,7 +971,8 @@ def test_unicode_issue368(testdir): def test_record_property(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture @@ -884,33 +980,37 @@ def test_record_property(testdir): record_property("bar", 1) def test_record(record_property, other): record_property("foo", "<1"); - """) - result, dom = runandparse(testdir, '-rwv') + """ + ) + result, dom = runandparse(testdir, "-rwv") node = dom.find_first_by_tag("testsuite") tnode = node.find_first_by_tag("testcase") - psnode = tnode.find_first_by_tag('properties') - pnodes = psnode.find_by_tag('property') + psnode = tnode.find_first_by_tag("properties") + pnodes = psnode.find_by_tag("property") pnodes[0].assert_attr(name="bar", value="1") pnodes[1].assert_attr(name="foo", value="<1") def test_record_property_same_name(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_record_with_same_name(record_property): record_property("foo", "bar") record_property("foo", "baz") - """) - result, dom = runandparse(testdir, '-rw') + """ + ) + result, dom = runandparse(testdir, "-rw") node = dom.find_first_by_tag("testsuite") tnode = node.find_first_by_tag("testcase") - psnode = tnode.find_first_by_tag('properties') - pnodes = psnode.find_by_tag('property') + psnode = tnode.find_first_by_tag("properties") + pnodes = psnode.find_by_tag("property") pnodes[0].assert_attr(name="foo", value="bar") pnodes[1].assert_attr(name="foo", value="baz") def test_record_attribute(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture @@ -918,16 +1018,16 @@ def test_record_attribute(testdir): record_xml_attribute("bar", 1) def test_record(record_xml_attribute, other): record_xml_attribute("foo", "<1"); - """) - result, dom = runandparse(testdir, '-rw') + """ + ) + result, dom = runandparse(testdir, "-rw") node = dom.find_first_by_tag("testsuite") tnode = node.find_first_by_tag("testcase") tnode.assert_attr(bar="1") tnode.assert_attr(foo="<1") - result.stdout.fnmatch_lines([ - 'test_record_attribute.py::test_record', - '*record_xml_attribute*experimental*', - ]) + result.stdout.fnmatch_lines( + ["test_record_attribute.py::test_record", "*record_xml_attribute*experimental*"] + ) def test_random_report_log_xdist(testdir): @@ -935,54 +1035,59 @@ def test_random_report_log_xdist(testdir): with nodes from several nodes overlapping, so junitxml must cope with that to produce correct reports. #1064 """ - pytest.importorskip('xdist') - testdir.makepyfile(""" + pytest.importorskip("xdist") + testdir.makepyfile( + """ import pytest, time @pytest.mark.parametrize('i', list(range(30))) def test_x(i): assert i != 22 - """) - _, dom = runandparse(testdir, '-n2') + """ + ) + _, dom = runandparse(testdir, "-n2") suite_node = dom.find_first_by_tag("testsuite") failed = [] for case_node in suite_node.find_by_tag("testcase"): - if case_node.find_first_by_tag('failure'): - failed.append(case_node['name']) + if case_node.find_first_by_tag("failure"): + failed.append(case_node["name"]) - assert failed == ['test_x[22]'] + assert failed == ["test_x[22]"] def test_runs_twice(testdir): - f = testdir.makepyfile(''' + f = testdir.makepyfile( + """ def test_pass(): pass - ''') + """ + ) result, dom = runandparse(testdir, f, f) - assert 'INTERNALERROR' not in result.stdout.str() - first, second = [x['classname'] for x in dom.find_by_tag("testcase")] + assert "INTERNALERROR" not in result.stdout.str() + first, second = [x["classname"] for x in dom.find_by_tag("testcase")] assert first == second -@pytest.mark.xfail(reason='hangs', run=False) +@pytest.mark.xfail(reason="hangs", run=False) def test_runs_twice_xdist(testdir): - pytest.importorskip('xdist') - f = testdir.makepyfile(''' + pytest.importorskip("xdist") + f = testdir.makepyfile( + """ def test_pass(): pass - ''') + """ + ) - result, dom = runandparse( - testdir, f, - '--dist', 'each', '--tx', '2*popen',) - assert 'INTERNALERROR' not in result.stdout.str() - first, second = [x['classname'] for x in dom.find_by_tag("testcase")] + result, dom = runandparse(testdir, f, "--dist", "each", "--tx", "2*popen") + assert "INTERNALERROR" not in result.stdout.str() + first, second = [x["classname"] for x in dom.find_by_tag("testcase")] assert first == second def test_fancy_items_regression(testdir): # issue 1259 - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest class FunItem(pytest.Item): def runtest(self): @@ -1002,33 +1107,38 @@ def test_fancy_items_regression(testdir): def pytest_collect_file(path, parent): if path.check(ext='.py'): return FunCollector(path, parent) - """) + """ + ) - testdir.makepyfile(''' + testdir.makepyfile( + """ def test_pass(): pass - ''') + """ + ) result, dom = runandparse(testdir) - assert 'INTERNALERROR' not in result.stdout.str() + assert "INTERNALERROR" not in result.stdout.str() items = sorted( - '%(classname)s %(name)s %(file)s' % x - - for x in dom.find_by_tag("testcase")) + "%(classname)s %(name)s %(file)s" % x for x in dom.find_by_tag("testcase") + ) import pprint + pprint.pprint(items) - assert items == [ - u'conftest a conftest.py', - u'conftest a conftest.py', - u'conftest b conftest.py', - u'test_fancy_items_regression a test_fancy_items_regression.py', - u'test_fancy_items_regression a test_fancy_items_regression.py', - u'test_fancy_items_regression b test_fancy_items_regression.py', - u'test_fancy_items_regression test_pass' - u' test_fancy_items_regression.py', - ] + assert ( + items + == [ + u"conftest a conftest.py", + u"conftest a conftest.py", + u"conftest b conftest.py", + u"test_fancy_items_regression a test_fancy_items_regression.py", + u"test_fancy_items_regression a test_fancy_items_regression.py", + u"test_fancy_items_regression b test_fancy_items_regression.py", + u"test_fancy_items_regression test_pass" u" test_fancy_items_regression.py", + ] + ) def test_global_properties(testdir): @@ -1041,26 +1151,26 @@ def test_global_properties(testdir): nodeid = "test_node_id" log.pytest_sessionstart() - log.add_global_property('foo', 1) - log.add_global_property('bar', 2) + log.add_global_property("foo", 1) + log.add_global_property("bar", 2) log.pytest_sessionfinish() dom = minidom.parse(str(path)) - properties = dom.getElementsByTagName('properties') + properties = dom.getElementsByTagName("properties") - assert (properties.length == 1), "There must be one node" + assert properties.length == 1, "There must be one node" - property_list = dom.getElementsByTagName('property') + property_list = dom.getElementsByTagName("property") - assert (property_list.length == 2), "There most be only 2 property nodes" + assert property_list.length == 2, "There most be only 2 property nodes" - expected = {'foo': '1', 'bar': '2'} + expected = {"foo": "1", "bar": "2"} actual = {} for p in property_list: - k = str(p.getAttribute('name')) - v = str(p.getAttribute('value')) + k = str(p.getAttribute("name")) + v = str(p.getAttribute("value")) actual[k] = v assert actual == expected @@ -1076,7 +1186,7 @@ def test_url_property(testdir): longrepr = "FooBarBaz" sections = [] nodeid = "something" - location = 'tests/filename.py', 42, 'TestClass.method' + location = "tests/filename.py", 42, "TestClass.method" url = test_url test_report = Report() @@ -1086,27 +1196,35 @@ def test_url_property(testdir): node_reporter.append_failure(test_report) log.pytest_sessionfinish() - test_case = minidom.parse(str(path)).getElementsByTagName('testcase')[0] + test_case = minidom.parse(str(path)).getElementsByTagName("testcase")[0] - assert (test_case.getAttribute('url') == test_url), "The URL did not get written to the xml" + assert ( + test_case.getAttribute("url") == test_url + ), "The URL did not get written to the xml" -@pytest.mark.parametrize('suite_name', ['my_suite', '']) +@pytest.mark.parametrize("suite_name", ["my_suite", ""]) def test_set_suite_name(testdir, suite_name): if suite_name: - testdir.makeini(""" + testdir.makeini( + """ [pytest] - junit_suite_name={0} - """.format(suite_name)) + junit_suite_name={} + """.format( + suite_name + ) + ) expected = suite_name else: - expected = 'pytest' - testdir.makepyfile(""" + expected = "pytest" + testdir.makepyfile( + """ import pytest def test_func(): pass - """) + """ + ) result, dom = runandparse(testdir) assert result.ret == 0 node = dom.find_first_by_tag("testsuite") diff --git a/testing/test_mark.py b/testing/test_mark.py index 764678ab4..e96af888a 100644 --- a/testing/test_mark.py +++ b/testing/test_mark.py @@ -4,21 +4,27 @@ import sys import pytest from _pytest.mark import ( - MarkGenerator as Mark, ParameterSet, transfer_markers, + MarkGenerator as Mark, + ParameterSet, + transfer_markers, EMPTY_PARAMETERSET_OPTION, ) -ignore_markinfo = pytest.mark.filterwarnings('ignore:MarkInfo objects:_pytest.deprecated.RemovedInPytest4Warning') +ignore_markinfo = pytest.mark.filterwarnings( + "ignore:MarkInfo objects:_pytest.deprecated.RemovedInPytest4Warning" +) class TestMark(object): + def test_markinfo_repr(self): from _pytest.mark import MarkInfo, Mark + m = MarkInfo.for_mark(Mark("hello", (1, 2), {})) repr(m) - @pytest.mark.parametrize('attr', ['mark', 'param']) - @pytest.mark.parametrize('modulename', ['py.test', 'pytest']) + @pytest.mark.parametrize("attr", ["mark", "param"]) + @pytest.mark.parametrize("modulename", ["py.test", "pytest"]) def test_pytest_exists_in_namespace_all(self, attr, modulename): module = sys.modules[modulename] assert attr in module.__all__ @@ -28,6 +34,7 @@ class TestMark(object): pytest.raises((AttributeError, TypeError), mark) def test_mark_with_param(self): + def some_function(abc): pass @@ -42,7 +49,7 @@ class TestMark(object): def test_pytest_mark_name_starts_with_underscore(self): mark = Mark() - pytest.raises(AttributeError, getattr, mark, '_some_name') + pytest.raises(AttributeError, getattr, mark, "_some_name") def test_pytest_mark_bare(self): mark = Mark() @@ -62,8 +69,8 @@ class TestMark(object): mark.world(x=3, y=4)(f) assert f.world - assert f.world.kwargs['x'] == 3 - assert f.world.kwargs['y'] == 4 + assert f.world.kwargs["x"] == 3 + assert f.world.kwargs["y"] == 4 @ignore_markinfo def test_apply_multiple_and_merge(self): @@ -74,12 +81,12 @@ class TestMark(object): mark.world mark.world(x=3)(f) - assert f.world.kwargs['x'] == 3 + assert f.world.kwargs["x"] == 3 mark.world(y=4)(f) - assert f.world.kwargs['x'] == 3 - assert f.world.kwargs['y'] == 4 + assert f.world.kwargs["x"] == 3 + assert f.world.kwargs["y"] == 4 mark.world(y=1)(f) - assert f.world.kwargs['y'] == 1 + assert f.world.kwargs["y"] == 1 assert len(f.world.args) == 0 @ignore_markinfo @@ -119,94 +126,109 @@ class TestMark(object): w = mark.some w("hello", reason="123")(f) assert f.some.args[0] == "hello" - assert f.some.kwargs['reason'] == "123" + assert f.some.kwargs["reason"] == "123" def g(): pass w("world", reason2="456")(g) assert g.some.args[0] == "world" - assert 'reason' not in g.some.kwargs - assert g.some.kwargs['reason2'] == "456" + assert "reason" not in g.some.kwargs + assert g.some.kwargs["reason2"] == "456" def test_marked_class_run_twice(testdir, request): """Test fails file is run twice that contains marked class. See issue#683. """ - py_file = testdir.makepyfile(""" + py_file = testdir.makepyfile( + """ import pytest @pytest.mark.parametrize('abc', [1, 2, 3]) class Test1(object): def test_1(self, abc): assert abc in [1, 2, 3] - """) + """ + ) file_name = os.path.basename(py_file.strpath) rec = testdir.inline_run(file_name, file_name) rec.assertoutcome(passed=6) def test_ini_markers(testdir): - testdir.makeini(""" + testdir.makeini( + """ [pytest] markers = a1: this is a webtest marker a2: this is a smoke marker - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ def test_markers(pytestconfig): markers = pytestconfig.getini("markers") print (markers) assert len(markers) >= 2 assert markers[0].startswith("a1:") assert markers[1].startswith("a2:") - """) + """ + ) rec = testdir.inline_run() rec.assertoutcome(passed=1) def test_markers_option(testdir): - testdir.makeini(""" + testdir.makeini( + """ [pytest] markers = a1: this is a webtest marker a1some: another marker nodescription - """) - result = testdir.runpytest("--markers", ) - result.stdout.fnmatch_lines([ - "*a1*this is a webtest*", - "*a1some*another marker", - "*nodescription*", - ]) + """ + ) + result = testdir.runpytest("--markers") + result.stdout.fnmatch_lines( + ["*a1*this is a webtest*", "*a1some*another marker", "*nodescription*"] + ) def test_ini_markers_whitespace(testdir): - testdir.makeini(""" + testdir.makeini( + """ [pytest] markers = a1 : this is a whitespace marker - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ import pytest @pytest.mark.a1 def test_markers(): assert True - """) + """ + ) rec = testdir.inline_run("--strict", "-m", "a1") rec.assertoutcome(passed=1) def test_marker_without_description(testdir): - testdir.makefile(".cfg", setup=""" + testdir.makefile( + ".cfg", + setup=""" [tool:pytest] markers=slow - """) - testdir.makeconftest(""" + """, + ) + testdir.makeconftest( + """ import pytest pytest.mark.xfail('FAIL') - """) + """ + ) ftdir = testdir.mkdir("ft1_dummy") testdir.tmpdir.join("conftest.py").move(ftdir.join("conftest.py")) rec = testdir.runpytest_subprocess("--strict") @@ -215,7 +237,8 @@ def test_marker_without_description(testdir): def test_markers_option_with_plugin_in_current_dir(testdir): testdir.makeconftest('pytest_plugins = "flip_flop"') - testdir.makepyfile(flip_flop="""\ + testdir.makepyfile( + flip_flop="""\ def pytest_configure(config): config.addinivalue_line("markers", "flip:flop") @@ -224,51 +247,60 @@ def test_markers_option_with_plugin_in_current_dir(testdir): mark = metafunc.function.flipper except AttributeError: return - metafunc.parametrize("x", (10, 20))""") - testdir.makepyfile("""\ + metafunc.parametrize("x", (10, 20))""" + ) + testdir.makepyfile( + """\ import pytest @pytest.mark.flipper def test_example(x): - assert x""") + assert x""" + ) result = testdir.runpytest("--markers") result.stdout.fnmatch_lines(["*flip*flop*"]) def test_mark_on_pseudo_function(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.r(lambda x: 0/0) def test_hello(): pass - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_strict_prohibits_unregistered_markers(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.unregisteredmark def test_hello(): pass - """) + """ + ) result = testdir.runpytest("--strict") assert result.ret != 0 - result.stdout.fnmatch_lines([ - "*unregisteredmark*not*registered*", - ]) + result.stdout.fnmatch_lines(["*unregisteredmark*not*registered*"]) -@pytest.mark.parametrize("spec", [ - ("xyz", ("test_one",)), - ("xyz and xyz2", ()), - ("xyz2", ("test_two",)), - ("xyz or xyz2", ("test_one", "test_two"),) -]) +@pytest.mark.parametrize( + "spec", + [ + ("xyz", ("test_one",)), + ("xyz and xyz2", ()), + ("xyz2", ("test_two",)), + ("xyz or xyz2", ("test_one", "test_two")), + ], +) def test_mark_option(spec, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.xyz def test_one(): @@ -276,7 +308,8 @@ def test_mark_option(spec, testdir): @pytest.mark.xyz2 def test_two(): pass - """) + """ + ) opt, passed_result = spec rec = testdir.inline_run("-m", opt) passed, skipped, fail = rec.listoutcomes() @@ -285,24 +318,27 @@ def test_mark_option(spec, testdir): assert list(passed) == list(passed_result) -@pytest.mark.parametrize("spec", [ - ("interface", ("test_interface",)), - ("not interface", ("test_nointer",)), -]) +@pytest.mark.parametrize( + "spec", [("interface", ("test_interface",)), ("not interface", ("test_nointer",))] +) def test_mark_option_custom(spec, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest def pytest_collection_modifyitems(items): for item in items: if "interface" in item.nodeid: item.add_marker(pytest.mark.interface) - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ def test_interface(): pass def test_nointer(): pass - """) + """ + ) opt, passed_result = spec rec = testdir.inline_run("-m", opt) passed, skipped, fail = rec.listoutcomes() @@ -311,21 +347,26 @@ def test_mark_option_custom(spec, testdir): assert list(passed) == list(passed_result) -@pytest.mark.parametrize("spec", [ - ("interface", ("test_interface",)), - ("not interface", ("test_nointer", "test_pass")), - ("pass", ("test_pass",)), - ("not pass", ("test_interface", "test_nointer")), -]) +@pytest.mark.parametrize( + "spec", + [ + ("interface", ("test_interface",)), + ("not interface", ("test_nointer", "test_pass")), + ("pass", ("test_pass",)), + ("not pass", ("test_interface", "test_nointer")), + ], +) def test_keyword_option_custom(spec, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_interface(): pass def test_nointer(): pass def test_pass(): pass - """) + """ + ) opt, passed_result = spec rec = testdir.inline_run("-k", opt) passed, skipped, fail = rec.listoutcomes() @@ -334,18 +375,23 @@ def test_keyword_option_custom(spec, testdir): assert list(passed) == list(passed_result) -@pytest.mark.parametrize("spec", [ - ("None", ("test_func[None]",)), - ("1.3", ("test_func[1.3]",)), - ("2-3", ("test_func[2-3]",)) -]) +@pytest.mark.parametrize( + "spec", + [ + ("None", ("test_func[None]",)), + ("1.3", ("test_func[1.3]",)), + ("2-3", ("test_func[2-3]",)), + ], +) def test_keyword_option_parametrize(spec, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.parametrize("arg", [None, 1.3, "2-3"]) def test_func(arg): pass - """) + """ + ) opt, passed_result = spec rec = testdir.inline_run("-k", opt) passed, skipped, fail = rec.listoutcomes() @@ -354,15 +400,23 @@ def test_keyword_option_parametrize(spec, testdir): assert list(passed) == list(passed_result) -@pytest.mark.parametrize("spec", [ - ("foo or import", "ERROR: Python keyword 'import' not accepted in expressions passed to '-k'"), - ("foo or", "ERROR: Wrong expression passed to '-k': foo or") -]) +@pytest.mark.parametrize( + "spec", + [ + ( + "foo or import", + "ERROR: Python keyword 'import' not accepted in expressions passed to '-k'", + ), + ("foo or", "ERROR: Wrong expression passed to '-k': foo or"), + ], +) def test_keyword_option_wrong_arguments(spec, testdir, capsys): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_func(arg): pass - """) + """ + ) opt, expected_result = spec testdir.inline_run("-k", opt) out = capsys.readouterr().err @@ -373,12 +427,14 @@ def test_parametrized_collected_from_command_line(testdir): """Parametrized test not collected if test named specified in command line issue#649. """ - py_file = testdir.makepyfile(""" + py_file = testdir.makepyfile( + """ import pytest @pytest.mark.parametrize("arg", [None, 1.3, "2-3"]) def test_func(arg): pass - """) + """ + ) file_name = os.path.basename(py_file.strpath) rec = testdir.inline_run(file_name + "::" + "test_func") rec.assertoutcome(passed=3) @@ -386,24 +442,29 @@ def test_parametrized_collected_from_command_line(testdir): def test_parametrized_collect_with_wrong_args(testdir): """Test collect parametrized func with wrong number of args.""" - py_file = testdir.makepyfile(""" + py_file = testdir.makepyfile( + """ import pytest @pytest.mark.parametrize('foo, bar', [(1, 2, 3)]) def test_func(foo, bar): pass - """) + """ + ) result = testdir.runpytest(py_file) - result.stdout.fnmatch_lines([ - 'E ValueError: In "parametrize" the number of values ((1, 2, 3)) ' - 'must be equal to the number of names ([\'foo\', \'bar\'])' - ]) + result.stdout.fnmatch_lines( + [ + 'E ValueError: In "parametrize" the number of values ((1, 2, 3)) ' + "must be equal to the number of names (['foo', 'bar'])" + ] + ) def test_parametrized_with_kwargs(testdir): """Test collect parametrized func with wrong number of args.""" - py_file = testdir.makepyfile(""" + py_file = testdir.makepyfile( + """ import pytest @pytest.fixture(params=[1,2]) @@ -413,72 +474,84 @@ def test_parametrized_with_kwargs(testdir): @pytest.mark.parametrize(argnames='b', argvalues=[1, 2]) def test_func(a, b): pass - """) + """ + ) result = testdir.runpytest(py_file) - assert(result.ret == 0) + assert result.ret == 0 class TestFunctional(object): def test_mark_per_function(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest @pytest.mark.hello def test_hello(): assert hasattr(test_hello, 'hello') - """) + """ + ) result = testdir.runpytest(p) result.stdout.fnmatch_lines(["*1 passed*"]) def test_mark_per_module(self, testdir): - item = testdir.getitem(""" + item = testdir.getitem( + """ import pytest pytestmark = pytest.mark.hello def test_func(): pass - """) + """ + ) keywords = item.keywords - assert 'hello' in keywords + assert "hello" in keywords def test_marklist_per_class(self, testdir): - item = testdir.getitem(""" + item = testdir.getitem( + """ import pytest class TestClass(object): pytestmark = [pytest.mark.hello, pytest.mark.world] def test_func(self): assert TestClass.test_func.hello assert TestClass.test_func.world - """) + """ + ) keywords = item.keywords - assert 'hello' in keywords + assert "hello" in keywords def test_marklist_per_module(self, testdir): - item = testdir.getitem(""" + item = testdir.getitem( + """ import pytest pytestmark = [pytest.mark.hello, pytest.mark.world] class TestClass(object): def test_func(self): assert TestClass.test_func.hello assert TestClass.test_func.world - """) + """ + ) keywords = item.keywords - assert 'hello' in keywords - assert 'world' in keywords + assert "hello" in keywords + assert "world" in keywords def test_mark_per_class_decorator(self, testdir): - item = testdir.getitem(""" + item = testdir.getitem( + """ import pytest @pytest.mark.hello class TestClass(object): def test_func(self): assert TestClass.test_func.hello - """) + """ + ) keywords = item.keywords - assert 'hello' in keywords + assert "hello" in keywords def test_mark_per_class_decorator_plus_existing_dec(self, testdir): - item = testdir.getitem(""" + item = testdir.getitem( + """ import pytest @pytest.mark.hello class TestClass(object): @@ -486,14 +559,16 @@ class TestFunctional(object): def test_func(self): assert TestClass.test_func.hello assert TestClass.test_func.world - """) + """ + ) keywords = item.keywords - assert 'hello' in keywords - assert 'world' in keywords + assert "hello" in keywords + assert "world" in keywords @ignore_markinfo def test_merging_markers(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest pytestmark = pytest.mark.hello("pos1", x=1, y=2) class TestClass(object): @@ -502,24 +577,26 @@ class TestFunctional(object): @pytest.mark.hello("pos0", z=4) def test_func(self): pass - """) + """ + ) items, rec = testdir.inline_genitems(p) item, = items keywords = item.keywords - marker = keywords['hello'] + marker = keywords["hello"] assert marker.args == ("pos0", "pos1") - assert marker.kwargs == {'x': 1, 'y': 2, 'z': 4} + assert marker.kwargs == {"x": 1, "y": 2, "z": 4} # test the new __iter__ interface values = list(marker) assert len(values) == 3 assert values[0].args == ("pos0",) assert values[1].args == () - assert values[2].args == ("pos1", ) + assert values[2].args == ("pos1",) def test_merging_markers_deep(self, testdir): # issue 199 - propagate markers into nested classes - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest class TestA(object): pytestmark = pytest.mark.a @@ -529,14 +606,16 @@ class TestFunctional(object): # this one didnt get marked def test_d(self): assert True - """) + """ + ) items, rec = testdir.inline_genitems(p) for item in items: print(item, item.keywords) - assert [x for x in item.iter_markers() if x.name == 'a'] + assert [x for x in item.iter_markers() if x.name == "a"] def test_mark_decorator_subclass_does_not_propagate_to_base(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest @pytest.mark.a @@ -548,13 +627,15 @@ class TestFunctional(object): class Test2(Base): def test_bar(self): pass - """) + """ + ) items, rec = testdir.inline_genitems(p) - self.assert_markers(items, test_foo=('a', 'b'), test_bar=('a',)) + self.assert_markers(items, test_foo=("a", "b"), test_bar=("a",)) @pytest.mark.issue568 def test_mark_should_not_pass_to_siebling_class(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest class TestBase(object): @@ -569,22 +650,24 @@ class TestFunctional(object): class TestOtherSub(TestBase): pass - """) + """ + ) items, rec = testdir.inline_genitems(p) base_item, sub_item, sub_item_other = items print(items, [x.nodeid for x in items]) # legacy api smears - assert hasattr(base_item.obj, 'b') - assert hasattr(sub_item_other.obj, 'b') - assert hasattr(sub_item.obj, 'b') + assert hasattr(base_item.obj, "b") + assert hasattr(sub_item_other.obj, "b") + assert hasattr(sub_item.obj, "b") # new api seregates - assert not list(base_item.iter_markers(name='b')) - assert not list(sub_item_other.iter_markers(name='b')) - assert list(sub_item.iter_markers(name='b')) + assert not list(base_item.iter_markers(name="b")) + assert not list(sub_item_other.iter_markers(name="b")) + assert list(sub_item.iter_markers(name="b")) def test_mark_decorator_baseclasses_merged(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest @pytest.mark.a @@ -600,13 +683,14 @@ class TestFunctional(object): class Test2(Base2): @pytest.mark.d def test_bar(self): pass - """) + """ + ) items, rec = testdir.inline_genitems(p) - self.assert_markers(items, test_foo=('a', 'b', 'c'), - test_bar=('a', 'b', 'd')) + self.assert_markers(items, test_foo=("a", "b", "c"), test_bar=("a", "b", "d")) def test_mark_closest(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest @pytest.mark.c(location="class") @@ -618,27 +702,31 @@ class TestFunctional(object): def test_has_inherited(): pass - """) + """ + ) items, rec = testdir.inline_genitems(p) has_own, has_inherited = items - assert has_own.get_closest_marker('c').kwargs == {'location': 'function'} - assert has_inherited.get_closest_marker('c').kwargs == {'location': 'class'} - assert has_own.get_closest_marker('missing') is None + assert has_own.get_closest_marker("c").kwargs == {"location": "function"} + assert has_inherited.get_closest_marker("c").kwargs == {"location": "class"} + assert has_own.get_closest_marker("missing") is None def test_mark_with_wrong_marker(self, testdir): - reprec = testdir.inline_runsource(""" + reprec = testdir.inline_runsource( + """ import pytest class pytestmark(object): pass def test_func(): pass - """) + """ + ) values = reprec.getfailedcollections() assert len(values) == 1 assert "TypeError" in str(values[0].longrepr) def test_mark_dynamically_in_funcarg(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest @pytest.fixture def arg(request): @@ -646,36 +734,40 @@ class TestFunctional(object): def pytest_terminal_summary(terminalreporter): values = terminalreporter.stats['passed'] terminalreporter._tw.line("keyword: %s" % values[0].keywords) - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ def test_func(arg): pass - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "keyword: *hello*" - ]) + result.stdout.fnmatch_lines(["keyword: *hello*"]) @ignore_markinfo def test_merging_markers_two_functions(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest @pytest.mark.hello("pos1", z=4) @pytest.mark.hello("pos0", z=3) def test_func(): pass - """) + """ + ) items, rec = testdir.inline_genitems(p) item, = items keywords = item.keywords - marker = keywords['hello'] + marker = keywords["hello"] values = list(marker) assert len(values) == 2 assert values[0].args == ("pos0",) assert values[1].args == ("pos1",) def test_no_marker_match_on_unmarked_names(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest @pytest.mark.shouldmatch def test_marked(): @@ -683,7 +775,8 @@ class TestFunctional(object): def test_unmarked(): assert 1 - """) + """ + ) reprec = testdir.inline_run("-m", "test_unmarked", p) passed, skipped, failed = reprec.listoutcomes() assert len(passed) + len(skipped) + len(failed) == 0 @@ -692,7 +785,8 @@ class TestFunctional(object): assert len(deselected_tests) == 2 def test_keywords_at_node_level(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(scope="session", autouse=True) def some(request): @@ -707,13 +801,15 @@ class TestFunctional(object): @pytest.mark.world def test_function(): pass - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) @ignore_markinfo def test_keyword_added_for_session(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest def pytest_collection_modifyitems(session): session.add_marker("mark1") @@ -721,8 +817,10 @@ class TestFunctional(object): session.add_marker(pytest.mark.mark3) pytest.raises(ValueError, lambda: session.add_marker(10)) - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ def test_some(request): assert "mark1" in request.keywords assert "mark2" in request.keywords @@ -732,7 +830,8 @@ class TestFunctional(object): assert marker.name == "mark1" assert marker.args == () assert marker.kwargs == {} - """) + """ + ) reprec = testdir.inline_run("-m", "mark1") reprec.assertoutcome(passed=1) @@ -744,17 +843,20 @@ class TestFunctional(object): to other modules. """ from _pytest.mark import MarkInfo - items = dict((x.name, x) for x in items) + + items = {x.name: x for x in items} for name, expected_markers in expected.items(): markers = items[name].keywords._markers - marker_names = set([name for (name, v) in markers.items() - if isinstance(v, MarkInfo)]) + marker_names = { + name for (name, v) in markers.items() if isinstance(v, MarkInfo) + } assert marker_names == set(expected_markers) @pytest.mark.issue1540 @pytest.mark.filterwarnings("ignore") def test_mark_from_parameters(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest pytestmark = pytest.mark.skipif(True, reason='skip all') @@ -770,7 +872,8 @@ class TestFunctional(object): def test_1(parameter): assert True - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(skipped=1) @@ -778,37 +881,50 @@ class TestFunctional(object): class TestKeywordSelection(object): def test_select_simple(self, testdir): - file_test = testdir.makepyfile(""" + file_test = testdir.makepyfile( + """ def test_one(): assert 0 class TestClass(object): def test_method_one(self): assert 42 == 43 - """) + """ + ) def check(keyword, name): reprec = testdir.inline_run("-s", "-k", keyword, file_test) passed, skipped, failed = reprec.listoutcomes() assert len(failed) == 1 assert failed[0].nodeid.split("::")[-1] == name - assert len(reprec.getcalls('pytest_deselected')) == 1 + assert len(reprec.getcalls("pytest_deselected")) == 1 - for keyword in ['test_one', 'est_on']: - check(keyword, 'test_one') - check('TestClass and test', 'test_method_one') + for keyword in ["test_one", "est_on"]: + check(keyword, "test_one") + check("TestClass and test", "test_method_one") - @pytest.mark.parametrize("keyword", [ - 'xxx', 'xxx and test_2', 'TestClass', 'xxx and not test_1', - 'TestClass and test_2', 'xxx and TestClass and test_2']) + @pytest.mark.parametrize( + "keyword", + [ + "xxx", + "xxx and test_2", + "TestClass", + "xxx and not test_1", + "TestClass and test_2", + "xxx and TestClass and test_2", + ], + ) def test_select_extra_keywords(self, testdir, keyword): - p = testdir.makepyfile(test_select=""" + p = testdir.makepyfile( + test_select=""" def test_1(): pass class TestClass(object): def test_2(self): pass - """) - testdir.makepyfile(conftest=""" + """ + ) + testdir.makepyfile( + conftest=""" import pytest @pytest.hookimpl(hookwrapper=True) def pytest_pycollect_makeitem(name): @@ -816,22 +932,25 @@ class TestKeywordSelection(object): if name == "TestClass": item = outcome.get_result() item.extra_keyword_matches.add("xxx") - """) - reprec = testdir.inline_run(p.dirpath(), '-s', '-k', keyword) + """ + ) + reprec = testdir.inline_run(p.dirpath(), "-s", "-k", keyword) print("keyword", repr(keyword)) passed, skipped, failed = reprec.listoutcomes() assert len(passed) == 1 assert passed[0].nodeid.endswith("test_2") dlist = reprec.getcalls("pytest_deselected") assert len(dlist) == 1 - assert dlist[0].items[0].name == 'test_1' + assert dlist[0].items[0].name == "test_1" def test_select_starton(self, testdir): - threepass = testdir.makepyfile(test_threepass=""" + threepass = testdir.makepyfile( + test_threepass=""" def test_one(): assert 1 def test_two(): assert 1 def test_three(): assert 1 - """) + """ + ) reprec = testdir.inline_run("-k", "test_two:", threepass) passed, skipped, failed = reprec.listoutcomes() assert len(passed) == 2 @@ -842,22 +961,26 @@ class TestKeywordSelection(object): assert item.name == "test_one" def test_keyword_extra(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def test_one(): assert 0 test_one.mykeyword = True - """) + """ + ) reprec = testdir.inline_run("-k", "mykeyword", p) passed, skipped, failed = reprec.countoutcomes() assert failed == 1 @pytest.mark.xfail def test_keyword_extra_dash(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def test_one(): assert 0 test_one.mykeyword = True - """) + """ + ) # with argparse the argument to an option cannot # start with '-' reprec = testdir.inline_run("-k", "-mykeyword", p) @@ -869,9 +992,11 @@ class TestKeywordSelection(object): no double underscored values, like '__dict__', and no instance values, like '()'. """ - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def test_one(): assert 1 - """) + """ + ) def assert_test_is_not_selected(keyword): reprec = testdir.inline_run("-k", keyword, p) @@ -885,15 +1010,22 @@ class TestKeywordSelection(object): assert_test_is_not_selected("()") -@pytest.mark.parametrize('argval, expected', [ - (pytest.mark.skip()((1, 2)), - ParameterSet(values=(1, 2), marks=[pytest.mark.skip], id=None)), - (pytest.mark.xfail(pytest.mark.skip()((1, 2))), - ParameterSet(values=(1, 2), - marks=[pytest.mark.xfail, pytest.mark.skip], id=None)), - -]) -@pytest.mark.filterwarnings('ignore') +@pytest.mark.parametrize( + "argval, expected", + [ + ( + pytest.mark.skip()((1, 2)), + ParameterSet(values=(1, 2), marks=[pytest.mark.skip], id=None), + ), + ( + pytest.mark.xfail(pytest.mark.skip()((1, 2))), + ParameterSet( + values=(1, 2), marks=[pytest.mark.xfail, pytest.mark.skip], id=None + ), + ), + ], +) +@pytest.mark.filterwarnings("ignore") def test_parameterset_extractfrom(argval, expected): extracted = ParameterSet.extract_from(argval) assert extracted == expected @@ -922,42 +1054,46 @@ def test_legacy_transfer(): class TestMarkDecorator(object): - @pytest.mark.parametrize('lhs, rhs, expected', [ - (pytest.mark.foo(), pytest.mark.foo(), True), - (pytest.mark.foo(), pytest.mark.bar(), False), - (pytest.mark.foo(), 'bar', False), - ('foo', pytest.mark.bar(), False) - ]) + @pytest.mark.parametrize( + "lhs, rhs, expected", + [ + (pytest.mark.foo(), pytest.mark.foo(), True), + (pytest.mark.foo(), pytest.mark.bar(), False), + (pytest.mark.foo(), "bar", False), + ("foo", pytest.mark.bar(), False), + ], + ) def test__eq__(self, lhs, rhs, expected): assert (lhs == rhs) == expected -@pytest.mark.parametrize('mark', [None, '', 'skip', 'xfail']) +@pytest.mark.parametrize("mark", [None, "", "skip", "xfail"]) def test_parameterset_for_parametrize_marks(testdir, mark): if mark is not None: - testdir.makeini( - "[pytest]\n{}={}".format(EMPTY_PARAMETERSET_OPTION, mark)) + testdir.makeini("[pytest]\n{}={}".format(EMPTY_PARAMETERSET_OPTION, mark)) config = testdir.parseconfig() from _pytest.mark import pytest_configure, get_empty_parameterset_mark + pytest_configure(config) - result_mark = get_empty_parameterset_mark(config, ['a'], all) - if mark in (None, ''): + result_mark = get_empty_parameterset_mark(config, ["a"], all) + if mark in (None, ""): # normalize to the requested name - mark = 'skip' + mark = "skip" assert result_mark.name == mark - assert result_mark.kwargs['reason'].startswith("got empty parameter set ") - if mark == 'xfail': - assert result_mark.kwargs.get('run') is False + assert result_mark.kwargs["reason"].startswith("got empty parameter set ") + if mark == "xfail": + assert result_mark.kwargs.get("run") is False def test_parameterset_for_parametrize_bad_markname(testdir): with pytest.raises(pytest.UsageError): - test_parameterset_for_parametrize_marks(testdir, 'bad') + test_parameterset_for_parametrize_marks(testdir, "bad") def test_mark_expressions_no_smear(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest class BaseTests(object): @@ -971,9 +1107,10 @@ def test_mark_expressions_no_smear(testdir): @pytest.mark.BAR class TestBarClass(BaseTests): pass - """) + """ + ) - reprec = testdir.inline_run("-m", 'FOO') + reprec = testdir.inline_run("-m", "FOO") passed, skipped, failed = reprec.countoutcomes() dlist = reprec.getcalls("pytest_deselected") assert passed == 1 @@ -982,7 +1119,7 @@ def test_mark_expressions_no_smear(testdir): assert len(deselected_tests) == 1 # keywords smear - expected behaviour - reprec_keywords = testdir.inline_run("-k", 'FOO') + reprec_keywords = testdir.inline_run("-k", "FOO") passed_k, skipped_k, failed_k = reprec_keywords.countoutcomes() assert passed_k == 2 assert skipped_k == failed_k == 0 diff --git a/testing/test_modimport.py b/testing/test_modimport.py index 2ab86bf7a..f7b92a0b6 100644 --- a/testing/test_modimport.py +++ b/testing/test_modimport.py @@ -5,21 +5,25 @@ import pytest import _pytest MODSET = [ - x for x in py.path.local(_pytest.__file__).dirpath().visit('*.py') - if x.purebasename != '__init__' + x + for x in py.path.local(_pytest.__file__).dirpath().visit("*.py") + if x.purebasename != "__init__" ] -@pytest.mark.parametrize('modfile', MODSET, ids=lambda x: x.purebasename) +@pytest.mark.parametrize("modfile", MODSET, ids=lambda x: x.purebasename) def test_fileimport(modfile): # this test ensures all internal packages can import # without needing the pytest namespace being set # this is critical for the initialization of xdist - res = subprocess.call([ - sys.executable, - '-c', 'import sys, py; py.path.local(sys.argv[1]).pyimport()', - modfile.strpath, - ]) + res = subprocess.call( + [ + sys.executable, + "-c", + "import sys, py; py.path.local(sys.argv[1]).pyimport()", + modfile.strpath, + ] + ) if res: pytest.fail("command result %s" % res) diff --git a/testing/test_monkeypatch.py b/testing/test_monkeypatch.py index 36ef083f7..c298ce0d9 100644 --- a/testing/test_monkeypatch.py +++ b/testing/test_monkeypatch.py @@ -17,20 +17,21 @@ def mp(): def test_setattr(): + class A(object): x = 1 monkeypatch = MonkeyPatch() pytest.raises(AttributeError, "monkeypatch.setattr(A, 'notexists', 2)") - monkeypatch.setattr(A, 'y', 2, raising=False) + monkeypatch.setattr(A, "y", 2, raising=False) assert A.y == 2 monkeypatch.undo() - assert not hasattr(A, 'y') + assert not hasattr(A, "y") monkeypatch = MonkeyPatch() - monkeypatch.setattr(A, 'x', 2) + monkeypatch.setattr(A, "x", 2) assert A.x == 2 - monkeypatch.setattr(A, 'x', 3) + monkeypatch.setattr(A, "x", 3) assert A.x == 3 monkeypatch.undo() assert A.x == 1 @@ -41,6 +42,7 @@ def test_setattr(): class TestSetattrWithImportPath(object): + def test_string_expression(self, monkeypatch): monkeypatch.setattr("os.path.abspath", lambda x: "hello2") assert os.path.abspath("123") == "hello2" @@ -48,11 +50,13 @@ class TestSetattrWithImportPath(object): def test_string_expression_class(self, monkeypatch): monkeypatch.setattr("_pytest.config.Config", 42) import _pytest + assert _pytest.config.Config == 42 def test_unicode_string(self, monkeypatch): monkeypatch.setattr("_pytest.config.Config", 42) import _pytest + assert _pytest.config.Config == 42 monkeypatch.delattr("_pytest.config.Config") @@ -60,16 +64,16 @@ class TestSetattrWithImportPath(object): pytest.raises(TypeError, lambda: monkeypatch.setattr(None, None)) def test_unknown_import(self, monkeypatch): - pytest.raises(ImportError, - lambda: monkeypatch.setattr("unkn123.classx", None)) + pytest.raises(ImportError, lambda: monkeypatch.setattr("unkn123.classx", None)) def test_unknown_attr(self, monkeypatch): - pytest.raises(AttributeError, - lambda: monkeypatch.setattr("os.path.qweqwe", None)) + pytest.raises( + AttributeError, lambda: monkeypatch.setattr("os.path.qweqwe", None) + ) def test_unknown_attr_non_raising(self, monkeypatch): # https://github.com/pytest-dev/pytest/issues/746 - monkeypatch.setattr('os.path.qweqwe', 42, raising=False) + monkeypatch.setattr("os.path.qweqwe", 42, raising=False) assert os.path.qweqwe == 42 def test_delattr(self, monkeypatch): @@ -80,48 +84,49 @@ class TestSetattrWithImportPath(object): def test_delattr(): + class A(object): x = 1 monkeypatch = MonkeyPatch() - monkeypatch.delattr(A, 'x') - assert not hasattr(A, 'x') + monkeypatch.delattr(A, "x") + assert not hasattr(A, "x") monkeypatch.undo() assert A.x == 1 monkeypatch = MonkeyPatch() - monkeypatch.delattr(A, 'x') + monkeypatch.delattr(A, "x") pytest.raises(AttributeError, "monkeypatch.delattr(A, 'y')") - monkeypatch.delattr(A, 'y', raising=False) - monkeypatch.setattr(A, 'x', 5, raising=False) + monkeypatch.delattr(A, "y", raising=False) + monkeypatch.setattr(A, "x", 5, raising=False) assert A.x == 5 monkeypatch.undo() assert A.x == 1 def test_setitem(): - d = {'x': 1} + d = {"x": 1} monkeypatch = MonkeyPatch() - monkeypatch.setitem(d, 'x', 2) - monkeypatch.setitem(d, 'y', 1700) - monkeypatch.setitem(d, 'y', 1700) - assert d['x'] == 2 - assert d['y'] == 1700 - monkeypatch.setitem(d, 'x', 3) - assert d['x'] == 3 + monkeypatch.setitem(d, "x", 2) + monkeypatch.setitem(d, "y", 1700) + monkeypatch.setitem(d, "y", 1700) + assert d["x"] == 2 + assert d["y"] == 1700 + monkeypatch.setitem(d, "x", 3) + assert d["x"] == 3 monkeypatch.undo() - assert d['x'] == 1 - assert 'y' not in d - d['x'] = 5 + assert d["x"] == 1 + assert "y" not in d + d["x"] = 5 monkeypatch.undo() - assert d['x'] == 5 + assert d["x"] == 5 def test_setitem_deleted_meanwhile(): d = {} monkeypatch = MonkeyPatch() - monkeypatch.setitem(d, 'x', 2) - del d['x'] + monkeypatch.setitem(d, "x", 2) + del d["x"] monkeypatch.undo() assert not d @@ -132,7 +137,7 @@ def test_setenv_deleted_meanwhile(before): if before: os.environ[key] = "world" monkeypatch = MonkeyPatch() - monkeypatch.setenv(key, 'hello') + monkeypatch.setenv(key, "hello") del os.environ[key] monkeypatch.undo() if before: @@ -143,33 +148,34 @@ def test_setenv_deleted_meanwhile(before): def test_delitem(): - d = {'x': 1} + d = {"x": 1} monkeypatch = MonkeyPatch() - monkeypatch.delitem(d, 'x') - assert 'x' not in d - monkeypatch.delitem(d, 'y', raising=False) + monkeypatch.delitem(d, "x") + assert "x" not in d + monkeypatch.delitem(d, "y", raising=False) pytest.raises(KeyError, "monkeypatch.delitem(d, 'y')") assert not d - monkeypatch.setitem(d, 'y', 1700) - assert d['y'] == 1700 - d['hello'] = 'world' - monkeypatch.setitem(d, 'x', 1500) - assert d['x'] == 1500 + monkeypatch.setitem(d, "y", 1700) + assert d["y"] == 1700 + d["hello"] = "world" + monkeypatch.setitem(d, "x", 1500) + assert d["x"] == 1500 monkeypatch.undo() - assert d == {'hello': 'world', 'x': 1} + assert d == {"hello": "world", "x": 1} def test_setenv(): monkeypatch = MonkeyPatch() - monkeypatch.setenv('XYZ123', 2) + monkeypatch.setenv("XYZ123", 2) import os - assert os.environ['XYZ123'] == "2" + + assert os.environ["XYZ123"] == "2" monkeypatch.undo() - assert 'XYZ123' not in os.environ + assert "XYZ123" not in os.environ def test_delenv(): - name = 'xyz1234' + name = "xyz1234" assert name not in os.environ monkeypatch = MonkeyPatch() pytest.raises(KeyError, "monkeypatch.delenv(%r, raising=True)" % name) @@ -191,28 +197,31 @@ def test_delenv(): def test_setenv_prepend(): import os + monkeypatch = MonkeyPatch() - monkeypatch.setenv('XYZ123', 2, prepend="-") - assert os.environ['XYZ123'] == "2" - monkeypatch.setenv('XYZ123', 3, prepend="-") - assert os.environ['XYZ123'] == "3-2" + monkeypatch.setenv("XYZ123", 2, prepend="-") + assert os.environ["XYZ123"] == "2" + monkeypatch.setenv("XYZ123", 3, prepend="-") + assert os.environ["XYZ123"] == "3-2" monkeypatch.undo() - assert 'XYZ123' not in os.environ + assert "XYZ123" not in os.environ def test_monkeypatch_plugin(testdir): - reprec = testdir.inline_runsource(""" + reprec = testdir.inline_runsource( + """ def test_method(monkeypatch): assert monkeypatch.__class__.__name__ == "MonkeyPatch" - """) + """ + ) res = reprec.countoutcomes() assert tuple(res) == (1, 0, 0), res def test_syspath_prepend(mp): old = list(sys.path) - mp.syspath_prepend('world') - mp.syspath_prepend('hello') + mp.syspath_prepend("world") + mp.syspath_prepend("hello") assert sys.path[0] == "hello" assert sys.path[1] == "world" mp.undo() @@ -222,11 +231,11 @@ def test_syspath_prepend(mp): def test_syspath_prepend_double_undo(mp): - mp.syspath_prepend('hello world') + mp.syspath_prepend("hello world") mp.undo() - sys.path.append('more hello world') + sys.path.append("more hello world") mp.undo() - assert sys.path[-1] == 'more hello world' + assert sys.path[-1] == "more hello world" def test_chdir_with_path_local(mp, tmpdir): @@ -255,37 +264,54 @@ def test_chdir_double_undo(mp, tmpdir): def test_issue185_time_breaks(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import time def test_m(monkeypatch): def f(): raise Exception monkeypatch.setattr(time, "time", f) - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *1 passed* - """) + """ + ) def test_importerror(testdir): p = testdir.mkpydir("package") - p.join("a.py").write(textwrap.dedent("""\ + p.join("a.py").write( + textwrap.dedent( + """\ import doesnotexist x = 1 - """)) - testdir.tmpdir.join("test_importerror.py").write(textwrap.dedent("""\ + """ + ) + ) + testdir.tmpdir.join("test_importerror.py").write( + textwrap.dedent( + """\ def test_importerror(monkeypatch): monkeypatch.setattr('package.a.x', 2) - """)) + """ + ) + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *import error in package.a: No module named {0}doesnotexist{0}* - """.format("'" if sys.version_info > (3, 0) else "")) + """.format( + "'" if sys.version_info > (3, 0) else "" + ) + ) class SampleNew(object): + @staticmethod def hello(): return True @@ -306,14 +332,15 @@ class SampleOldInherit(SampleOld): pass -@pytest.mark.parametrize('Sample', [ - SampleNew, SampleNewInherit, - SampleOld, SampleOldInherit, -], ids=['new', 'new-inherit', 'old', 'old-inherit']) +@pytest.mark.parametrize( + "Sample", + [SampleNew, SampleNewInherit, SampleOld, SampleOldInherit], + ids=["new", "new-inherit", "old", "old-inherit"], +) def test_issue156_undo_staticmethod(Sample): monkeypatch = MonkeyPatch() - monkeypatch.setattr(Sample, 'hello', None) + monkeypatch.setattr(Sample, "hello", None) assert Sample.hello is None monkeypatch.undo() @@ -321,10 +348,10 @@ def test_issue156_undo_staticmethod(Sample): def test_issue1338_name_resolving(): - pytest.importorskip('requests') + pytest.importorskip("requests") monkeypatch = MonkeyPatch() try: - monkeypatch.delattr('requests.sessions.Session.request') + monkeypatch.delattr("requests.sessions.Session.request") finally: monkeypatch.undo() diff --git a/testing/test_nodes.py b/testing/test_nodes.py index 6f4540f99..eee3ac8e9 100644 --- a/testing/test_nodes.py +++ b/testing/test_nodes.py @@ -3,16 +3,19 @@ import pytest from _pytest import nodes -@pytest.mark.parametrize("baseid, nodeid, expected", ( - ('', '', True), - ('', 'foo', True), - ('', 'foo/bar', True), - ('', 'foo/bar::TestBaz::()', True), - ('foo', 'food', False), - ('foo/bar::TestBaz::()', 'foo/bar', False), - ('foo/bar::TestBaz::()', 'foo/bar::TestBop::()', False), - ('foo/bar', 'foo/bar::TestBop::()', True), -)) +@pytest.mark.parametrize( + "baseid, nodeid, expected", + ( + ("", "", True), + ("", "foo", True), + ("", "foo/bar", True), + ("", "foo/bar::TestBaz::()", True), + ("foo", "food", False), + ("foo/bar::TestBaz::()", "foo/bar", False), + ("foo/bar::TestBaz::()", "foo/bar::TestBop::()", False), + ("foo/bar", "foo/bar::TestBop::()", True), + ), +) def test_ischildnode(baseid, nodeid, expected): result = nodes.ischildnode(baseid, nodeid) assert result is expected diff --git a/testing/test_nose.py b/testing/test_nose.py index 1964b06c5..abe732375 100644 --- a/testing/test_nose.py +++ b/testing/test_nose.py @@ -7,7 +7,8 @@ def setup_module(mod): def test_nose_setup(testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ values = [] from nose.tools import with_setup @@ -20,16 +21,19 @@ def test_nose_setup(testdir): test_hello.setup = lambda: values.append(1) test_hello.teardown = lambda: values.append(2) - """) - result = testdir.runpytest(p, '-p', 'nose') + """ + ) + result = testdir.runpytest(p, "-p", "nose") result.assert_outcomes(passed=2) def test_setup_func_with_setup_decorator(): from _pytest.nose import call_optional + values = [] class A(object): + @pytest.fixture(autouse=True) def f(self): values.append(1) @@ -48,7 +52,8 @@ def test_setup_func_not_callable(): def test_nose_setup_func(testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ from nose.tools import with_setup values = [] @@ -70,13 +75,15 @@ def test_nose_setup_func(testdir): print (values) assert values == [1,2] - """) - result = testdir.runpytest(p, '-p', 'nose') + """ + ) + result = testdir.runpytest(p, "-p", "nose") result.assert_outcomes(passed=2) def test_nose_setup_func_failure(testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ from nose.tools import with_setup values = [] @@ -92,15 +99,15 @@ def test_nose_setup_func_failure(testdir): print (values) assert values == [1,2] - """) - result = testdir.runpytest(p, '-p', 'nose') - result.stdout.fnmatch_lines([ - "*TypeError: ()*" - ]) + """ + ) + result = testdir.runpytest(p, "-p", "nose") + result.stdout.fnmatch_lines(["*TypeError: ()*"]) def test_nose_setup_func_failure_2(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ values = [] my_setup = 1 @@ -111,14 +118,16 @@ def test_nose_setup_func_failure_2(testdir): test_hello.setup = my_setup test_hello.teardown = my_teardown - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_nose_setup_partial(testdir): pytest.importorskip("functools") - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ from functools import partial values = [] @@ -144,15 +153,15 @@ def test_nose_setup_partial(testdir): test_hello.setup = my_setup_partial test_hello.teardown = my_teardown_partial - """) - result = testdir.runpytest(p, '-p', 'nose') - result.stdout.fnmatch_lines([ - "*2 passed*" - ]) + """ + ) + result = testdir.runpytest(p, "-p", "nose") + result.stdout.fnmatch_lines(["*2 passed*"]) def test_nose_test_generator_fixtures(testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ # taken from nose-0.11.1 unit_tests/test_generator_fixtures.py from nose.tools import eq_ called = [] @@ -211,15 +220,15 @@ def test_nose_test_generator_fixtures(testdir): # expect.append('teardown') #expect.append('setup') eq_(self.called, expect) - """) - result = testdir.runpytest(p, '-p', 'nose') - result.stdout.fnmatch_lines([ - "*10 passed*" - ]) + """ + ) + result = testdir.runpytest(p, "-p", "nose") + result.stdout.fnmatch_lines(["*10 passed*"]) def test_module_level_setup(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ from nose.tools import with_setup items = {} @@ -242,15 +251,15 @@ def test_module_level_setup(testdir): def test_local_setup(): assert items[2] == 2 assert 1 not in items - """) - result = testdir.runpytest('-p', 'nose') - result.stdout.fnmatch_lines([ - "*2 passed*", - ]) + """ + ) + result = testdir.runpytest("-p", "nose") + result.stdout.fnmatch_lines(["*2 passed*"]) def test_nose_style_setup_teardown(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ values = [] def setup_module(): @@ -264,15 +273,15 @@ def test_nose_style_setup_teardown(testdir): def test_world(): assert values == [1] - """) - result = testdir.runpytest('-p', 'nose') - result.stdout.fnmatch_lines([ - "*2 passed*", - ]) + """ + ) + result = testdir.runpytest("-p", "nose") + result.stdout.fnmatch_lines(["*2 passed*"]) def test_nose_setup_ordering(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def setup_module(mod): mod.visited = True @@ -281,17 +290,17 @@ def test_nose_setup_ordering(testdir): assert visited def test_first(self): pass - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*1 passed*", - ]) + result.stdout.fnmatch_lines(["*1 passed*"]) def test_apiwrapper_problem_issue260(testdir): # this would end up trying a call an optional teardown on the class # for plain unittests we dont want nose behaviour - testdir.makepyfile(""" + testdir.makepyfile( + """ import unittest class TestCase(unittest.TestCase): def setup(self): @@ -306,7 +315,8 @@ def test_apiwrapper_problem_issue260(testdir): print('teardown') def test_fun(self): pass - """) + """ + ) result = testdir.runpytest() result.assert_outcomes(passed=1) @@ -314,7 +324,8 @@ def test_apiwrapper_problem_issue260(testdir): def test_setup_teardown_linking_issue265(testdir): # we accidentally didnt integrate nose setupstate with normal setupstate # this test ensures that won't happen again - testdir.makepyfile(''' + testdir.makepyfile( + ''' import pytest class TestGeneric(object): @@ -332,51 +343,60 @@ def test_setup_teardown_linking_issue265(testdir): def teardown(self): """Undoes the setup.""" raise Exception("should not call teardown for skipped tests") - ''') + ''' + ) reprec = testdir.runpytest() reprec.assert_outcomes(passed=1, skipped=1) def test_SkipTest_during_collection(testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import nose raise nose.SkipTest("during collection") def test_failing(): assert False - """) + """ + ) result = testdir.runpytest(p) result.assert_outcomes(skipped=1) def test_SkipTest_in_test(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import nose def test_skipping(): raise nose.SkipTest("in test") - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(skipped=1) def test_istest_function_decorator(testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import nose.tools @nose.tools.istest def not_test_prefix(): pass - """) + """ + ) result = testdir.runpytest(p) result.assert_outcomes(passed=1) def test_nottest_function_decorator(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import nose.tools @nose.tools.nottest def test_prefix(): pass - """) + """ + ) reprec = testdir.inline_run() assert not reprec.getfailedcollections() calls = reprec.getreports("pytest_runtest_logreport") @@ -384,25 +404,29 @@ def test_nottest_function_decorator(testdir): def test_istest_class_decorator(testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import nose.tools @nose.tools.istest class NotTestPrefix(object): def test_method(self): pass - """) + """ + ) result = testdir.runpytest(p) result.assert_outcomes(passed=1) def test_nottest_class_decorator(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import nose.tools @nose.tools.nottest class TestPrefix(object): def test_method(self): pass - """) + """ + ) reprec = testdir.inline_run() assert not reprec.getfailedcollections() calls = reprec.getreports("pytest_runtest_logreport") diff --git a/testing/test_parseopt.py b/testing/test_parseopt.py index 55983bbb1..a8fc6d724 100644 --- a/testing/test_parseopt.py +++ b/testing/test_parseopt.py @@ -13,6 +13,7 @@ def parser(): class TestParser(object): + def test_no_help_by_default(self, capsys): parser = parseopt.Parser(usage="xyz") pytest.raises(SystemExit, lambda: parser.parse(["-h"])) @@ -23,41 +24,43 @@ class TestParser(object): with pytest.raises(parseopt.ArgumentError): # need a short or long option argument = parseopt.Argument() - argument = parseopt.Argument('-t') - assert argument._short_opts == ['-t'] + argument = parseopt.Argument("-t") + assert argument._short_opts == ["-t"] assert argument._long_opts == [] - assert argument.dest == 't' - argument = parseopt.Argument('-t', '--test') - assert argument._short_opts == ['-t'] - assert argument._long_opts == ['--test'] - assert argument.dest == 'test' - argument = parseopt.Argument('-t', '--test', dest='abc') - assert argument.dest == 'abc' - assert str(argument) == ( - "Argument(_short_opts: ['-t'], _long_opts: ['--test'], dest: 'abc')" + assert argument.dest == "t" + argument = parseopt.Argument("-t", "--test") + assert argument._short_opts == ["-t"] + assert argument._long_opts == ["--test"] + assert argument.dest == "test" + argument = parseopt.Argument("-t", "--test", dest="abc") + assert argument.dest == "abc" + assert ( + str(argument) + == ("Argument(_short_opts: ['-t'], _long_opts: ['--test'], dest: 'abc')") ) def test_argument_type(self): - argument = parseopt.Argument('-t', dest='abc', type=int) + argument = parseopt.Argument("-t", dest="abc", type=int) assert argument.type is int - argument = parseopt.Argument('-t', dest='abc', type=str) + argument = parseopt.Argument("-t", dest="abc", type=str) assert argument.type is str - argument = parseopt.Argument('-t', dest='abc', type=float) + argument = parseopt.Argument("-t", dest="abc", type=float) assert argument.type is float with pytest.warns(DeprecationWarning): with pytest.raises(KeyError): - argument = parseopt.Argument('-t', dest='abc', type='choice') - argument = parseopt.Argument('-t', dest='abc', type=str, - choices=['red', 'blue']) + argument = parseopt.Argument("-t", dest="abc", type="choice") + argument = parseopt.Argument( + "-t", dest="abc", type=str, choices=["red", "blue"] + ) assert argument.type is str def test_argument_processopt(self): - argument = parseopt.Argument('-t', type=int) + argument = parseopt.Argument("-t", type=int) argument.default = 42 - argument.dest = 'abc' + argument.dest = "abc" res = argument.attrs() - assert res['default'] == 42 - assert res['dest'] == 'abc' + assert res["default"] == 42 + assert res["dest"] == "abc" def test_group_add_and_get(self, parser): group = parser.getgroup("hello", description="desc") @@ -90,13 +93,16 @@ class TestParser(object): group.addoption("--option1", "--option-1", action="store_true") with pytest.raises(ValueError) as err: group.addoption("--option1", "--option-one", action="store_true") - assert str(set(["--option1"])) in str(err.value) + assert str({"--option1"}) in str(err.value) def test_group_shortopt_lowercase(self, parser): group = parser.getgroup("hello") - pytest.raises(ValueError, """ + pytest.raises( + ValueError, + """ group.addoption("-x", action="store_true") - """) + """, + ) assert len(group.options) == 0 group._addoption("-x", action="store_true") assert len(group.options) == 1 @@ -109,7 +115,7 @@ class TestParser(object): def test_parse(self, parser): parser.addoption("--hello", dest="hello", action="store") - args = parser.parse(['--hello', 'world']) + args = parser.parse(["--hello", "world"]) assert args.hello == "world" assert not getattr(args, parseopt.FILE_OR_DIR) @@ -122,15 +128,16 @@ class TestParser(object): parser.addoption("--hello", action="store_true") ns = parser.parse_known_args(["x", "--y", "--hello", "this"]) assert ns.hello - assert ns.file_or_dir == ['x'] + assert ns.file_or_dir == ["x"] def test_parse_known_and_unknown_args(self, parser): parser.addoption("--hello", action="store_true") - ns, unknown = parser.parse_known_and_unknown_args(["x", "--y", - "--hello", "this"]) + ns, unknown = parser.parse_known_and_unknown_args( + ["x", "--y", "--hello", "this"] + ) assert ns.hello - assert ns.file_or_dir == ['x'] - assert unknown == ['--y', 'this'] + assert ns.file_or_dir == ["x"] + assert unknown == ["--y", "this"] def test_parse_will_set_default(self, parser): parser.addoption("--hello", dest="hello", default="x", action="store") @@ -148,38 +155,40 @@ class TestParser(object): pass option = A() - args = parser.parse_setoption(['--hello', 'world'], option) + args = parser.parse_setoption(["--hello", "world"], option) assert option.hello == "world" assert option.world == 42 assert not args def test_parse_special_destination(self, parser): parser.addoption("--ultimate-answer", type=int) - args = parser.parse(['--ultimate-answer', '42']) + args = parser.parse(["--ultimate-answer", "42"]) assert args.ultimate_answer == 42 def test_parse_split_positional_arguments(self, parser): - parser.addoption("-R", action='store_true') - parser.addoption("-S", action='store_false') - args = parser.parse(['-R', '4', '2', '-S']) - assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2'] - args = parser.parse(['-R', '-S', '4', '2', '-R']) - assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2'] + parser.addoption("-R", action="store_true") + parser.addoption("-S", action="store_false") + args = parser.parse(["-R", "4", "2", "-S"]) + assert getattr(args, parseopt.FILE_OR_DIR) == ["4", "2"] + args = parser.parse(["-R", "-S", "4", "2", "-R"]) + assert getattr(args, parseopt.FILE_OR_DIR) == ["4", "2"] assert args.R is True assert args.S is False - args = parser.parse(['-R', '4', '-S', '2']) - assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2'] + args = parser.parse(["-R", "4", "-S", "2"]) + assert getattr(args, parseopt.FILE_OR_DIR) == ["4", "2"] assert args.R is True assert args.S is False def test_parse_defaultgetter(self): + def defaultget(option): - if not hasattr(option, 'type'): + if not hasattr(option, "type"): return if option.type is int: option.default = 42 elif option.type is str: option.default = "world" + parser = parseopt.Parser(processopt=defaultget) parser.addoption("--this", dest="this", type=int, action="store") parser.addoption("--hello", dest="hello", type=str, action="store") @@ -190,73 +199,88 @@ class TestParser(object): assert option.no is False def test_drop_short_helper(self): - parser = argparse.ArgumentParser(formatter_class=parseopt.DropShorterLongHelpFormatter) - parser.add_argument('-t', '--twoword', '--duo', '--two-word', '--two', - help='foo').map_long_option = {'two': 'two-word'} + parser = argparse.ArgumentParser( + formatter_class=parseopt.DropShorterLongHelpFormatter + ) + parser.add_argument( + "-t", "--twoword", "--duo", "--two-word", "--two", help="foo" + ).map_long_option = { + "two": "two-word" + } # throws error on --deux only! - parser.add_argument('-d', '--deuxmots', '--deux-mots', - action='store_true', help='foo').map_long_option = {'deux': 'deux-mots'} - parser.add_argument('-s', action='store_true', help='single short') - parser.add_argument('--abc', '-a', - action='store_true', help='bar') - parser.add_argument('--klm', '-k', '--kl-m', - action='store_true', help='bar') - parser.add_argument('-P', '--pq-r', '-p', '--pqr', - action='store_true', help='bar') - parser.add_argument('--zwei-wort', '--zweiwort', '--zweiwort', - action='store_true', help='bar') - parser.add_argument('-x', '--exit-on-first', '--exitfirst', - action='store_true', help='spam').map_long_option = {'exitfirst': 'exit-on-first'} - parser.add_argument('files_and_dirs', nargs='*') - args = parser.parse_args(['-k', '--duo', 'hallo', '--exitfirst']) - assert args.twoword == 'hallo' + parser.add_argument( + "-d", "--deuxmots", "--deux-mots", action="store_true", help="foo" + ).map_long_option = { + "deux": "deux-mots" + } + parser.add_argument("-s", action="store_true", help="single short") + parser.add_argument("--abc", "-a", action="store_true", help="bar") + parser.add_argument("--klm", "-k", "--kl-m", action="store_true", help="bar") + parser.add_argument( + "-P", "--pq-r", "-p", "--pqr", action="store_true", help="bar" + ) + parser.add_argument( + "--zwei-wort", "--zweiwort", "--zweiwort", action="store_true", help="bar" + ) + parser.add_argument( + "-x", "--exit-on-first", "--exitfirst", action="store_true", help="spam" + ).map_long_option = { + "exitfirst": "exit-on-first" + } + parser.add_argument("files_and_dirs", nargs="*") + args = parser.parse_args(["-k", "--duo", "hallo", "--exitfirst"]) + assert args.twoword == "hallo" assert args.klm is True assert args.zwei_wort is False assert args.exit_on_first is True assert args.s is False - args = parser.parse_args(['--deux-mots']) + args = parser.parse_args(["--deux-mots"]) with pytest.raises(AttributeError): assert args.deux_mots is True assert args.deuxmots is True - args = parser.parse_args(['file', 'dir']) - assert '|'.join(args.files_and_dirs) == 'file|dir' + args = parser.parse_args(["file", "dir"]) + assert "|".join(args.files_and_dirs) == "file|dir" def test_drop_short_0(self, parser): - parser.addoption('--funcarg', '--func-arg', action='store_true') - parser.addoption('--abc-def', '--abc-def', action='store_true') - parser.addoption('--klm-hij', action='store_true') - args = parser.parse(['--funcarg', '--k']) + parser.addoption("--funcarg", "--func-arg", action="store_true") + parser.addoption("--abc-def", "--abc-def", action="store_true") + parser.addoption("--klm-hij", action="store_true") + args = parser.parse(["--funcarg", "--k"]) assert args.funcarg is True assert args.abc_def is False assert args.klm_hij is True def test_drop_short_2(self, parser): - parser.addoption('--func-arg', '--doit', action='store_true') - args = parser.parse(['--doit']) + parser.addoption("--func-arg", "--doit", action="store_true") + args = parser.parse(["--doit"]) assert args.func_arg is True def test_drop_short_3(self, parser): - parser.addoption('--func-arg', '--funcarg', '--doit', action='store_true') - args = parser.parse(['abcd']) + parser.addoption("--func-arg", "--funcarg", "--doit", action="store_true") + args = parser.parse(["abcd"]) assert args.func_arg is False - assert args.file_or_dir == ['abcd'] + assert args.file_or_dir == ["abcd"] def test_drop_short_help0(self, parser, capsys): - parser.addoption('--func-args', '--doit', help='foo', - action='store_true') + parser.addoption("--func-args", "--doit", help="foo", action="store_true") parser.parse([]) help = parser.optparser.format_help() - assert '--func-args, --doit foo' in help + assert "--func-args, --doit foo" in help # testing would be more helpful with all help generated def test_drop_short_help1(self, parser, capsys): group = parser.getgroup("general") - group.addoption('--doit', '--func-args', action='store_true', help='foo') - group._addoption("-h", "--help", action="store_true", dest="help", - help="show help message and configuration info") - parser.parse(['-h']) + group.addoption("--doit", "--func-args", action="store_true", help="foo") + group._addoption( + "-h", + "--help", + action="store_true", + dest="help", + help="show help message and configuration info", + ) + parser.parse(["-h"]) help = parser.optparser.format_help() - assert '-doit, --func-args foo' in help + assert "-doit, --func-args foo" in help def test_multiple_metavar_help(self, parser): """ @@ -264,22 +288,24 @@ class TestParser(object): in the form "--preferences=value1 value2 value3" (#2004). """ group = parser.getgroup("general") - group.addoption('--preferences', metavar=('value1', 'value2', 'value3'), nargs=3) + group.addoption( + "--preferences", metavar=("value1", "value2", "value3"), nargs=3 + ) group._addoption("-h", "--help", action="store_true", dest="help") - parser.parse(['-h']) + parser.parse(["-h"]) help = parser.optparser.format_help() - assert '--preferences=value1 value2 value3' in help + assert "--preferences=value1 value2 value3" in help def test_argcomplete(testdir, monkeypatch): - if not py.path.local.sysfind('bash'): + if not py.path.local.sysfind("bash"): pytest.skip("bash not available") script = str(testdir.tmpdir.join("test_argcomplete")) pytest_bin = sys.argv[0] if "pytest" not in os.path.basename(pytest_bin): pytest.skip("need to be run with pytest executable, not %s" % (pytest_bin,)) - with open(str(script), 'w') as fp: + with open(str(script), "w") as fp: # redirect output from argcomplete to stdin and stderr is not trivial # http://stackoverflow.com/q/12589419/1307905 # so we use bash @@ -287,14 +313,14 @@ def test_argcomplete(testdir, monkeypatch): # alternative would be exteneded Testdir.{run(),_run(),popen()} to be able # to handle a keyword argument env that replaces os.environ in popen or # extends the copy, advantage: could not forget to restore - monkeypatch.setenv('_ARGCOMPLETE', "1") - monkeypatch.setenv('_ARGCOMPLETE_IFS', "\x0b") - monkeypatch.setenv('COMP_WORDBREAKS', ' \\t\\n"\\\'><=;|&(:') + monkeypatch.setenv("_ARGCOMPLETE", "1") + monkeypatch.setenv("_ARGCOMPLETE_IFS", "\x0b") + monkeypatch.setenv("COMP_WORDBREAKS", " \\t\\n\"\\'><=;|&(:") - arg = '--fu' - monkeypatch.setenv('COMP_LINE', "pytest " + arg) - monkeypatch.setenv('COMP_POINT', str(len("pytest " + arg))) - result = testdir.run('bash', str(script), arg) + arg = "--fu" + monkeypatch.setenv("COMP_LINE", "pytest " + arg) + monkeypatch.setenv("COMP_POINT", str(len("pytest " + arg))) + result = testdir.run("bash", str(script), arg) if result.ret == 255: # argcomplete not found pytest.skip("argcomplete not available") @@ -302,9 +328,9 @@ def test_argcomplete(testdir, monkeypatch): pytest.skip("bash provided no output, argcomplete not available?") else: result.stdout.fnmatch_lines(["--funcargs", "--fulltrace"]) - os.mkdir('test_argcomplete.d') - arg = 'test_argc' - monkeypatch.setenv('COMP_LINE', "pytest " + arg) - monkeypatch.setenv('COMP_POINT', str(len('pytest ' + arg))) - result = testdir.run('bash', str(script), arg) + os.mkdir("test_argcomplete.d") + arg = "test_argc" + monkeypatch.setenv("COMP_LINE", "pytest " + arg) + monkeypatch.setenv("COMP_POINT", str(len("pytest " + arg))) + result = testdir.run("bash", str(script), arg) result.stdout.fnmatch_lines(["test_argcomplete", "test_argcomplete.d/"]) diff --git a/testing/test_pastebin.py b/testing/test_pastebin.py index 6b1742d14..ad7c4d0c1 100644 --- a/testing/test_pastebin.py +++ b/testing/test_pastebin.py @@ -9,12 +9,13 @@ class TestPasteCapture(object): @pytest.fixture def pastebinlist(self, monkeypatch, request): pastebinlist = [] - plugin = request.config.pluginmanager.getplugin('pastebin') - monkeypatch.setattr(plugin, 'create_new_paste', pastebinlist.append) + plugin = request.config.pluginmanager.getplugin("pastebin") + monkeypatch.setattr(plugin, "create_new_paste", pastebinlist.append) return pastebinlist def test_failed(self, testdir, pastebinlist): - testpath = testdir.makepyfile(""" + testpath = testdir.makepyfile( + """ import pytest def test_pass(): pass @@ -22,7 +23,8 @@ class TestPasteCapture(object): assert 0 def test_skip(): pytest.skip("") - """) + """ + ) reprec = testdir.inline_run(testpath, "--paste=failed") assert len(pastebinlist) == 1 s = pastebinlist[0] @@ -31,7 +33,9 @@ class TestPasteCapture(object): def test_all(self, testdir, pastebinlist): from _pytest.pytester import LineMatcher - testpath = testdir.makepyfile(""" + + testpath = testdir.makepyfile( + """ import pytest def test_pass(): pass @@ -39,45 +43,52 @@ class TestPasteCapture(object): assert 0 def test_skip(): pytest.skip("") - """) - reprec = testdir.inline_run(testpath, "--pastebin=all", '-v') + """ + ) + reprec = testdir.inline_run(testpath, "--pastebin=all", "-v") assert reprec.countoutcomes() == [1, 1, 1] assert len(pastebinlist) == 1 - contents = pastebinlist[0].decode('utf-8') + contents = pastebinlist[0].decode("utf-8") matcher = LineMatcher(contents.splitlines()) - matcher.fnmatch_lines([ - '*test_pass PASSED*', - '*test_fail FAILED*', - '*test_skip SKIPPED*', - '*== 1 failed, 1 passed, 1 skipped in *' - ]) + matcher.fnmatch_lines( + [ + "*test_pass PASSED*", + "*test_fail FAILED*", + "*test_skip SKIPPED*", + "*== 1 failed, 1 passed, 1 skipped in *", + ] + ) def test_non_ascii_paste_text(self, testdir): """Make sure that text which contains non-ascii characters is pasted correctly. See #1219. """ - testdir.makepyfile(test_unicode=""" + testdir.makepyfile( + test_unicode=""" # encoding: utf-8 def test(): assert '☺' == 1 - """) - result = testdir.runpytest('--pastebin=all') + """ + ) + result = testdir.runpytest("--pastebin=all") if sys.version_info[0] == 3: expected_msg = "*assert '☺' == 1*" else: expected_msg = "*assert '\\xe2\\x98\\xba' == 1*" - result.stdout.fnmatch_lines([ - expected_msg, - "*== 1 failed in *", - '*Sending information to Paste Service*', - ]) + result.stdout.fnmatch_lines( + [ + expected_msg, + "*== 1 failed in *", + "*Sending information to Paste Service*", + ] + ) class TestPaste(object): @pytest.fixture def pastebin(self, request): - return request.config.pluginmanager.getplugin('pastebin') + return request.config.pluginmanager.getplugin("pastebin") @pytest.fixture def mocked_urlopen(self, monkeypatch): @@ -91,27 +102,31 @@ class TestPaste(object): calls.append((url, data)) class DummyFile(object): + def read(self): # part of html of a normal response return b'View raw.' + return DummyFile() if sys.version_info < (3, 0): import urllib - monkeypatch.setattr(urllib, 'urlopen', mocked) + + monkeypatch.setattr(urllib, "urlopen", mocked) else: import urllib.request - monkeypatch.setattr(urllib.request, 'urlopen', mocked) + + monkeypatch.setattr(urllib.request, "urlopen", mocked) return calls def test_create_new_paste(self, pastebin, mocked_urlopen): - result = pastebin.create_new_paste(b'full-paste-contents') - assert result == 'https://bpaste.net/show/3c0c6750bd' + result = pastebin.create_new_paste(b"full-paste-contents") + assert result == "https://bpaste.net/show/3c0c6750bd" assert len(mocked_urlopen) == 1 url, data = mocked_urlopen[0] assert type(data) is bytes - lexer = 'python3' if sys.version_info[0] == 3 else 'python' - assert url == 'https://bpaste.net' - assert 'lexer=%s' % lexer in data.decode() - assert 'code=full-paste-contents' in data.decode() - assert 'expiry=1week' in data.decode() + lexer = "python3" if sys.version_info[0] == 3 else "python" + assert url == "https://bpaste.net" + assert "lexer=%s" % lexer in data.decode() + assert "code=full-paste-contents" in data.decode() + assert "expiry=1week" in data.decode() diff --git a/testing/test_pdb.py b/testing/test_pdb.py index 575c97418..615d52e83 100644 --- a/testing/test_pdb.py +++ b/testing/test_pdb.py @@ -8,7 +8,7 @@ from _pytest.debugging import SUPPORTS_BREAKPOINT_BUILTIN import pytest -_ENVIRON_PYTHONBREAKPOINT = os.environ.get('PYTHONBREAKPOINT', '') +_ENVIRON_PYTHONBREAKPOINT = os.environ.get("PYTHONBREAKPOINT", "") def runpdb_and_get_report(testdir, source): @@ -25,6 +25,7 @@ def custom_pdb_calls(): # install dummy debugger class and track which methods were called on it class _CustomPdb(object): + def __init__(self, *args, **kwargs): called.append("init") @@ -44,6 +45,7 @@ def custom_debugger_hook(): # install dummy debugger class and track which methods were called on it class _CustomDebugger(object): + def __init__(self, *args, **kwargs): called.append("init") @@ -72,62 +74,79 @@ class TestPDB(object): def mypdb(*args): pdblist.append(args) - plugin = request.config.pluginmanager.getplugin('debugging') - monkeypatch.setattr(plugin, 'post_mortem', mypdb) + plugin = request.config.pluginmanager.getplugin("debugging") + monkeypatch.setattr(plugin, "post_mortem", mypdb) return pdblist def test_pdb_on_fail(self, testdir, pdblist): - rep = runpdb_and_get_report(testdir, """ + rep = runpdb_and_get_report( + testdir, + """ def test_func(): assert 0 - """) + """, + ) assert rep.failed assert len(pdblist) == 1 tb = _pytest._code.Traceback(pdblist[0][0]) assert tb[-1].name == "test_func" def test_pdb_on_xfail(self, testdir, pdblist): - rep = runpdb_and_get_report(testdir, """ + rep = runpdb_and_get_report( + testdir, + """ import pytest @pytest.mark.xfail def test_func(): assert 0 - """) + """, + ) assert "xfail" in rep.keywords assert not pdblist def test_pdb_on_skip(self, testdir, pdblist): - rep = runpdb_and_get_report(testdir, """ + rep = runpdb_and_get_report( + testdir, + """ import pytest def test_func(): pytest.skip("hello") - """) + """, + ) assert rep.skipped assert len(pdblist) == 0 def test_pdb_on_BdbQuit(self, testdir, pdblist): - rep = runpdb_and_get_report(testdir, """ + rep = runpdb_and_get_report( + testdir, + """ import bdb def test_func(): raise bdb.BdbQuit - """) + """, + ) assert rep.failed assert len(pdblist) == 0 def test_pdb_on_KeyboardInterrupt(self, testdir, pdblist): - rep = runpdb_and_get_report(testdir, """ + rep = runpdb_and_get_report( + testdir, + """ def test_func(): raise KeyboardInterrupt - """) + """, + ) assert rep.failed assert len(pdblist) == 1 def test_pdb_interaction(self, testdir): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ def test_1(): i = 0 assert i == 1 - """) + """ + ) child = testdir.spawn_pytest("--pdb %s" % p1) child.expect(".*def test_1") child.expect(".*i = 0") @@ -140,13 +159,14 @@ class TestPDB(object): @staticmethod def flush(child): - if platform.system() == 'Darwin': + if platform.system() == "Darwin": return if child.isalive(): child.wait() def test_pdb_unittest_postmortem(self, testdir): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ import unittest class Blub(unittest.TestCase): def tearDown(self): @@ -154,36 +174,41 @@ class TestPDB(object): def test_false(self): self.filename = 'debug' + '.me' assert 0 - """) + """ + ) child = testdir.spawn_pytest("--pdb %s" % p1) - child.expect('(Pdb)') - child.sendline('p self.filename') + child.expect("(Pdb)") + child.sendline("p self.filename") child.sendeof() rest = child.read().decode("utf8") - assert 'debug.me' in rest + assert "debug.me" in rest self.flush(child) def test_pdb_unittest_skip(self, testdir): """Test for issue #2137""" - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ import unittest @unittest.skipIf(True, 'Skipping also with pdb active') class MyTestCase(unittest.TestCase): def test_one(self): assert 0 - """) + """ + ) child = testdir.spawn_pytest("-rs --pdb %s" % p1) - child.expect('Skipping also with pdb active') - child.expect('1 skipped in') + child.expect("Skipping also with pdb active") + child.expect("1 skipped in") child.sendeof() self.flush(child) def test_pdb_print_captured_stdout(self, testdir): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ def test_1(): print("get\\x20rekt") assert False - """) + """ + ) child = testdir.spawn_pytest("--pdb %s" % p1) child.expect("captured stdout") child.expect("get rekt") @@ -195,12 +220,14 @@ class TestPDB(object): self.flush(child) def test_pdb_print_captured_stderr(self, testdir): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ def test_1(): import sys sys.stderr.write("get\\x20rekt") assert False - """) + """ + ) child = testdir.spawn_pytest("--pdb %s" % p1) child.expect("captured stderr") child.expect("get rekt") @@ -212,10 +239,12 @@ class TestPDB(object): self.flush(child) def test_pdb_dont_print_empty_captured_stdout_and_stderr(self, testdir): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ def test_1(): assert False - """) + """ + ) child = testdir.spawn_pytest("--pdb %s" % p1) child.expect("(Pdb)") output = child.before.decode("utf8") @@ -224,16 +253,18 @@ class TestPDB(object): assert "captured stderr" not in output self.flush(child) - @pytest.mark.parametrize('showcapture', ['all', 'no', 'log']) + @pytest.mark.parametrize("showcapture", ["all", "no", "log"]) def test_pdb_print_captured_logs(self, testdir, showcapture): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ def test_1(): import logging logging.warn("get " + "rekt") assert False - """) + """ + ) child = testdir.spawn_pytest("--show-capture=%s --pdb %s" % (showcapture, p1)) - if showcapture in ('all', 'log'): + if showcapture in ("all", "log"): child.expect("captured log") child.expect("get rekt") child.expect("(Pdb)") @@ -243,14 +274,17 @@ class TestPDB(object): self.flush(child) def test_pdb_print_captured_logs_nologging(self, testdir): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ def test_1(): import logging logging.warn("get " + "rekt") assert False - """) - child = testdir.spawn_pytest("--show-capture=all --pdb " - "-p no:logging %s" % p1) + """ + ) + child = testdir.spawn_pytest( + "--show-capture=all --pdb " "-p no:logging %s" % p1 + ) child.expect("get rekt") output = child.before.decode("utf8") assert "captured log" not in output @@ -261,13 +295,15 @@ class TestPDB(object): self.flush(child) def test_pdb_interaction_exception(self, testdir): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ import pytest def globalfunc(): pass def test_1(): pytest.raises(ValueError, globalfunc) - """) + """ + ) child = testdir.spawn_pytest("--pdb %s" % p1) child.expect(".*def test_1") child.expect(".*pytest.raises.*globalfunc") @@ -279,10 +315,12 @@ class TestPDB(object): self.flush(child) def test_pdb_interaction_on_collection_issue181(self, testdir): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ import pytest xxx - """) + """ + ) child = testdir.spawn_pytest("--pdb %s" % p1) # child.expect(".*import pytest.*") child.expect("(Pdb)") @@ -291,10 +329,12 @@ class TestPDB(object): self.flush(child) def test_pdb_interaction_on_internal_error(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_runtest_protocol(): 0/0 - """) + """ + ) p1 = testdir.makepyfile("def test_func(): pass") child = testdir.spawn_pytest("--pdb %s" % p1) # child.expect(".*import pytest.*") @@ -303,14 +343,16 @@ class TestPDB(object): self.flush(child) def test_pdb_interaction_capturing_simple(self, testdir): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ import pytest def test_1(): i = 0 print ("hello17") pytest.set_trace() x = 3 - """) + """ + ) child = testdir.spawn_pytest(str(p1)) child.expect("test_1") child.expect("x = 3") @@ -323,11 +365,13 @@ class TestPDB(object): self.flush(child) def test_pdb_set_trace_interception(self, testdir): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ import pdb def test_1(): pdb.set_trace() - """) + """ + ) child = testdir.spawn_pytest(str(p1)) child.expect("test_1") child.expect("(Pdb)") @@ -338,12 +382,14 @@ class TestPDB(object): self.flush(child) def test_pdb_and_capsys(self, testdir): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ import pytest def test_1(capsys): print ("hello1") pytest.set_trace() - """) + """ + ) child = testdir.spawn_pytest(str(p1)) child.expect("test_1") child.send("capsys.readouterr()\n") @@ -353,14 +399,16 @@ class TestPDB(object): self.flush(child) def test_set_trace_capturing_afterwards(self, testdir): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ import pdb def test_1(): pdb.set_trace() def test_2(): print ("hello") assert 0 - """) + """ + ) child = testdir.spawn_pytest(str(p1)) child.expect("test_1") child.send("c\n") @@ -372,17 +420,19 @@ class TestPDB(object): self.flush(child) def test_pdb_interaction_doctest(self, testdir): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ import pytest def function_1(): ''' >>> i = 0 >>> assert i == 1 ''' - """) + """ + ) child = testdir.spawn_pytest("--doctest-modules --pdb %s" % p1) child.expect("(Pdb)") - child.sendline('i') + child.sendline("i") child.expect("0") child.expect("(Pdb)") child.sendeof() @@ -391,7 +441,8 @@ class TestPDB(object): self.flush(child) def test_pdb_interaction_capturing_twice(self, testdir): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ import pytest def test_1(): i = 0 @@ -401,12 +452,13 @@ class TestPDB(object): print ("hello18") pytest.set_trace() x = 4 - """) + """ + ) child = testdir.spawn_pytest(str(p1)) child.expect("test_1") child.expect("x = 3") child.expect("(Pdb)") - child.sendline('c') + child.sendline("c") child.expect("x = 4") child.sendeof() rest = child.read().decode("utf8") @@ -417,25 +469,29 @@ class TestPDB(object): self.flush(child) def test_pdb_used_outside_test(self, testdir): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ import pytest pytest.set_trace() x = 5 - """) + """ + ) child = testdir.spawn("%s %s" % (sys.executable, p1)) child.expect("x = 5") child.sendeof() self.flush(child) def test_pdb_used_in_generate_tests(self, testdir): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ import pytest def pytest_generate_tests(metafunc): pytest.set_trace() x = 5 def test_foo(a): pass - """) + """ + ) child = testdir.spawn_pytest(str(p1)) child.expect("x = 5") child.sendeof() @@ -444,68 +500,65 @@ class TestPDB(object): def test_pdb_collection_failure_is_shown(self, testdir): p1 = testdir.makepyfile("xxx") result = testdir.runpytest_subprocess("--pdb", p1) - result.stdout.fnmatch_lines([ - "*NameError*xxx*", - "*1 error*", - ]) + result.stdout.fnmatch_lines(["*NameError*xxx*", "*1 error*"]) def test_enter_pdb_hook_is_called(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_enter_pdb(config): assert config.testing_verification == 'configured' print 'enter_pdb_hook' def pytest_configure(config): config.testing_verification = 'configured' - """) - p1 = testdir.makepyfile(""" + """ + ) + p1 = testdir.makepyfile( + """ import pytest def test_foo(): pytest.set_trace() - """) + """ + ) child = testdir.spawn_pytest(str(p1)) child.expect("enter_pdb_hook") - child.send('c\n') + child.send("c\n") child.sendeof() self.flush(child) def test_pdb_custom_cls(self, testdir, custom_pdb_calls): p1 = testdir.makepyfile("""xxx """) - result = testdir.runpytest_inprocess( - "--pdb", "--pdbcls=_pytest:_CustomPdb", p1) - result.stdout.fnmatch_lines([ - "*NameError*xxx*", - "*1 error*", - ]) + result = testdir.runpytest_inprocess("--pdb", "--pdbcls=_pytest:_CustomPdb", p1) + result.stdout.fnmatch_lines(["*NameError*xxx*", "*1 error*"]) assert custom_pdb_calls == ["init", "reset", "interaction"] def test_pdb_custom_cls_without_pdb(self, testdir, custom_pdb_calls): p1 = testdir.makepyfile("""xxx """) - result = testdir.runpytest_inprocess( - "--pdbcls=_pytest:_CustomPdb", p1) - result.stdout.fnmatch_lines([ - "*NameError*xxx*", - "*1 error*", - ]) + result = testdir.runpytest_inprocess("--pdbcls=_pytest:_CustomPdb", p1) + result.stdout.fnmatch_lines(["*NameError*xxx*", "*1 error*"]) assert custom_pdb_calls == [] def test_pdb_custom_cls_with_settrace(self, testdir, monkeypatch): - testdir.makepyfile(custom_pdb=""" + testdir.makepyfile( + custom_pdb=""" class CustomPdb(object): def set_trace(*args, **kwargs): print 'custom set_trace>' - """) - p1 = testdir.makepyfile(""" + """ + ) + p1 = testdir.makepyfile( + """ import pytest def test_foo(): pytest.set_trace() - """) - monkeypatch.setenv('PYTHONPATH', str(testdir.tmpdir)) + """ + ) + monkeypatch.setenv("PYTHONPATH", str(testdir.tmpdir)) child = testdir.spawn_pytest("--pdbcls=custom_pdb:CustomPdb %s" % str(p1)) - child.expect('custom set_trace>') + child.expect("custom set_trace>") self.flush(child) @@ -523,14 +576,17 @@ class TestDebuggingBreakpoints(object): if sys.version_info.major == 2 and sys.version_info.minor == 7: assert SUPPORTS_BREAKPOINT_BUILTIN is False - @pytest.mark.skipif(not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin") - @pytest.mark.parametrize('arg', ['--pdb', '']) + @pytest.mark.skipif( + not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin" + ) + @pytest.mark.parametrize("arg", ["--pdb", ""]) def test_sys_breakpointhook_configure_and_unconfigure(self, testdir, arg): """ Test that sys.breakpointhook is set to the custom Pdb class once configured, test that hook is reset to system value once pytest has been unconfigured """ - testdir.makeconftest(""" + testdir.makeconftest( + """ import sys from pytest import hookimpl from _pytest.debugging import pytestPDB @@ -543,34 +599,40 @@ class TestDebuggingBreakpoints(object): def test_check(): assert sys.breakpointhook == pytestPDB.set_trace - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ def test_nothing(): pass - """) + """ + ) args = (arg,) if arg else () result = testdir.runpytest_subprocess(*args) - result.stdout.fnmatch_lines([ - '*1 passed in *', - ]) + result.stdout.fnmatch_lines(["*1 passed in *"]) - @pytest.mark.skipif(not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin") + @pytest.mark.skipif( + not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin" + ) def test_pdb_custom_cls(self, testdir, custom_debugger_hook): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ def test_nothing(): breakpoint() - """) + """ + ) result = testdir.runpytest_inprocess( - "--pdb", "--pdbcls=_pytest:_CustomDebugger", p1) - result.stdout.fnmatch_lines([ - "*CustomDebugger*", - "*1 passed*", - ]) + "--pdb", "--pdbcls=_pytest:_CustomDebugger", p1 + ) + result.stdout.fnmatch_lines(["*CustomDebugger*", "*1 passed*"]) assert custom_debugger_hook == ["init", "set_trace"] - @pytest.mark.parametrize('arg', ['--pdb', '']) - @pytest.mark.skipif(not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin") + @pytest.mark.parametrize("arg", ["--pdb", ""]) + @pytest.mark.skipif( + not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin" + ) def test_environ_custom_class(self, testdir, custom_debugger_hook, arg): - testdir.makeconftest(""" + testdir.makeconftest( + """ import os import sys @@ -585,23 +647,31 @@ class TestDebuggingBreakpoints(object): def test_check(): import _pytest assert sys.breakpointhook is _pytest._CustomDebugger.set_trace - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ def test_nothing(): pass - """) + """ + ) args = (arg,) if arg else () result = testdir.runpytest_subprocess(*args) - result.stdout.fnmatch_lines([ - '*1 passed in *', - ]) + result.stdout.fnmatch_lines(["*1 passed in *"]) - @pytest.mark.skipif(not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin") - @pytest.mark.skipif(not _ENVIRON_PYTHONBREAKPOINT == '', reason="Requires breakpoint() default value") + @pytest.mark.skipif( + not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin" + ) + @pytest.mark.skipif( + not _ENVIRON_PYTHONBREAKPOINT == "", + reason="Requires breakpoint() default value", + ) def test_sys_breakpoint_interception(self, testdir): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ def test_1(): breakpoint() - """) + """ + ) child = testdir.spawn_pytest(str(p1)) child.expect("test_1") child.expect("(Pdb)") @@ -611,13 +681,17 @@ class TestDebuggingBreakpoints(object): assert "reading from stdin while output" not in rest TestPDB.flush(child) - @pytest.mark.skipif(not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin") + @pytest.mark.skipif( + not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin" + ) def test_pdb_not_altered(self, testdir): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ import pdb def test_1(): pdb.set_trace() - """) + """ + ) child = testdir.spawn_pytest(str(p1)) child.expect("test_1") child.expect("(Pdb)") diff --git a/testing/test_pluginmanager.py b/testing/test_pluginmanager.py index 503ba8454..22cea4207 100644 --- a/testing/test_pluginmanager.py +++ b/testing/test_pluginmanager.py @@ -16,72 +16,86 @@ def pytestpm(): class TestPytestPluginInteractions(object): + def test_addhooks_conftestplugin(self, testdir): - testdir.makepyfile(newhooks=""" + testdir.makepyfile( + newhooks=""" def pytest_myhook(xyz): "new hook" - """) - conf = testdir.makeconftest(""" + """ + ) + conf = testdir.makeconftest( + """ import sys ; sys.path.insert(0, '.') import newhooks def pytest_addhooks(pluginmanager): pluginmanager.addhooks(newhooks) def pytest_myhook(xyz): return xyz + 1 - """) + """ + ) config = get_config() pm = config.pluginmanager pm.hook.pytest_addhooks.call_historic( - kwargs=dict(pluginmanager=config.pluginmanager)) + kwargs=dict(pluginmanager=config.pluginmanager) + ) config.pluginmanager._importconftest(conf) # print(config.pluginmanager.get_plugins()) res = config.hook.pytest_myhook(xyz=10) assert res == [11] def test_addhooks_nohooks(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import sys def pytest_addhooks(pluginmanager): pluginmanager.addhooks(sys) - """) + """ + ) res = testdir.runpytest() assert res.ret != 0 - res.stderr.fnmatch_lines([ - "*did not find*sys*" - ]) + res.stderr.fnmatch_lines(["*did not find*sys*"]) def test_namespace_early_from_import(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ from pytest import Item from pytest import Item as Item2 assert Item is Item2 - """) + """ + ) result = testdir.runpython(p) assert result.ret == 0 def test_do_ext_namespace(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_namespace(): return {'hello': 'world'} - """) - p = testdir.makepyfile(""" + """ + ) + p = testdir.makepyfile( + """ from pytest import hello import pytest def test_hello(): assert hello == "world" assert 'hello' in pytest.__all__ - """) + """ + ) reprec = testdir.inline_run(p) reprec.assertoutcome(passed=1) def test_do_option_postinitialize(self, testdir): config = testdir.parseconfigure() - assert not hasattr(config.option, 'test123') - p = testdir.makepyfile(""" + assert not hasattr(config.option, "test123") + p = testdir.makepyfile( + """ def pytest_addoption(parser): parser.addoption('--test123', action="store_true", default=True) - """) + """ + ) config.pluginmanager._importconftest(p) assert config.option.test123 @@ -90,6 +104,7 @@ class TestPytestPluginInteractions(object): values = [] class A(object): + def pytest_configure(self, config): values.append(self) @@ -110,10 +125,12 @@ class TestPytestPluginInteractions(object): saveindent = [] class api1(object): + def pytest_plugin_registered(self): saveindent.append(pytestpm.trace.root.indent) class api2(object): + def pytest_plugin_registered(self): saveindent.append(pytestpm.trace.root.indent) raise ValueError() @@ -127,8 +144,8 @@ class TestPytestPluginInteractions(object): pytestpm.register(p) assert pytestpm.trace.root.indent == indent assert len(values) >= 2 - assert 'pytest_plugin_registered' in values[0] - assert 'finish' in values[1] + assert "pytest_plugin_registered" in values[0] + assert "finish" in values[1] values[:] = [] with pytest.raises(ValueError): @@ -142,29 +159,28 @@ class TestPytestPluginInteractions(object): """Test the gethookproxy function(#2016)""" config = testdir.parseconfig() session = Session(config) - testdir.makepyfile(**{ - 'tests/conftest.py': '', - 'tests/subdir/conftest.py': '', - }) + testdir.makepyfile(**{"tests/conftest.py": "", "tests/subdir/conftest.py": ""}) - conftest1 = testdir.tmpdir.join('tests/conftest.py') - conftest2 = testdir.tmpdir.join('tests/subdir/conftest.py') + conftest1 = testdir.tmpdir.join("tests/conftest.py") + conftest2 = testdir.tmpdir.join("tests/subdir/conftest.py") config.pluginmanager._importconftest(conftest1) - ihook_a = session.gethookproxy(testdir.tmpdir.join('tests')) + ihook_a = session.gethookproxy(testdir.tmpdir.join("tests")) assert ihook_a is not None config.pluginmanager._importconftest(conftest2) - ihook_b = session.gethookproxy(testdir.tmpdir.join('tests')) + ihook_b = session.gethookproxy(testdir.tmpdir.join("tests")) assert ihook_a is not ihook_b def test_warn_on_deprecated_addhooks(self, pytestpm): warnings = [] class get_warnings(object): + def pytest_logwarning(self, code, fslocation, message, nodeid): warnings.append(message) class Plugin(object): + def pytest_testhook(): pass @@ -176,20 +192,19 @@ class TestPytestPluginInteractions(object): def test_namespace_has_default_and_env_plugins(testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest pytest.mark - """) + """ + ) result = testdir.runpython(p) assert result.ret == 0 def test_default_markers(testdir): result = testdir.runpytest("--markers") - result.stdout.fnmatch_lines([ - "*tryfirst*first*", - "*trylast*last*", - ]) + result.stdout.fnmatch_lines(["*tryfirst*first*", "*trylast*last*"]) def test_importplugin_error_message(testdir, pytestpm): @@ -199,12 +214,14 @@ def test_importplugin_error_message(testdir, pytestpm): See #375 and #1998. """ testdir.syspathinsert(testdir.tmpdir) - testdir.makepyfile(qwe=""" + testdir.makepyfile( + qwe=""" # encoding: UTF-8 def test_traceback(): raise ImportError(u'Not possible to import: ☺') test_traceback() - """) + """ + ) with pytest.raises(ImportError) as excinfo: pytestpm.import_plugin("qwe") @@ -215,6 +232,7 @@ def test_importplugin_error_message(testdir, pytestpm): class TestPytestPluginManager(object): + def test_register_imported_modules(self): pm = PytestPluginManager() mod = types.ModuleType("x.y.pytest_hello") @@ -229,10 +247,10 @@ class TestPytestPluginManager(object): def test_canonical_import(self, monkeypatch): mod = types.ModuleType("pytest_xyz") - monkeypatch.setitem(sys.modules, 'pytest_xyz', mod) + monkeypatch.setitem(sys.modules, "pytest_xyz", mod) pm = PytestPluginManager() - pm.import_plugin('pytest_xyz') - assert pm.get_plugin('pytest_xyz') == mod + pm.import_plugin("pytest_xyz") + assert pm.get_plugin("pytest_xyz") == mod assert pm.is_registered(mod) def test_consider_module(self, testdir, pytestpm): @@ -263,46 +281,49 @@ class TestPytestPluginManager(object): assert len(values) == 1 def test_consider_env_fails_to_import(self, monkeypatch, pytestpm): - monkeypatch.setenv('PYTEST_PLUGINS', 'nonexisting', prepend=",") + monkeypatch.setenv("PYTEST_PLUGINS", "nonexisting", prepend=",") with pytest.raises(ImportError): pytestpm.consider_env() def test_plugin_skip(self, testdir, monkeypatch): - p = testdir.makepyfile(skipping1=""" + p = testdir.makepyfile( + skipping1=""" import pytest pytest.skip("hello") - """) + """ + ) p.copy(p.dirpath("skipping2.py")) monkeypatch.setenv("PYTEST_PLUGINS", "skipping2") result = testdir.runpytest("-rw", "-p", "skipping1", syspathinsert=True) assert result.ret == EXIT_NOTESTSCOLLECTED - result.stdout.fnmatch_lines([ - "*skipped plugin*skipping1*hello*", - "*skipped plugin*skipping2*hello*", - ]) + result.stdout.fnmatch_lines( + ["*skipped plugin*skipping1*hello*", "*skipped plugin*skipping2*hello*"] + ) def test_consider_env_plugin_instantiation(self, testdir, monkeypatch, pytestpm): testdir.syspathinsert() testdir.makepyfile(xy123="#") - monkeypatch.setitem(os.environ, 'PYTEST_PLUGINS', 'xy123') + monkeypatch.setitem(os.environ, "PYTEST_PLUGINS", "xy123") l1 = len(pytestpm.get_plugins()) pytestpm.consider_env() l2 = len(pytestpm.get_plugins()) assert l2 == l1 + 1 - assert pytestpm.get_plugin('xy123') + assert pytestpm.get_plugin("xy123") pytestpm.consider_env() l3 = len(pytestpm.get_plugins()) assert l2 == l3 def test_pluginmanager_ENV_startup(self, testdir, monkeypatch): testdir.makepyfile(pytest_x500="#") - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest def test_hello(pytestconfig): plugin = pytestconfig.pluginmanager.get_plugin('pytest_x500') assert plugin is not None - """) - monkeypatch.setenv('PYTEST_PLUGINS', 'pytest_x500', prepend=",") + """ + ) + monkeypatch.setenv("PYTEST_PLUGINS", "pytest_x500", prepend=",") result = testdir.runpytest(p, syspathinsert=True) assert result.ret == 0 result.stdout.fnmatch_lines(["*1 passed*"]) @@ -320,7 +341,7 @@ class TestPytestPluginManager(object): len2 = len(pytestpm.get_plugins()) assert len1 == len2 plugin1 = pytestpm.get_plugin("pytest_hello") - assert plugin1.__name__.endswith('pytest_hello') + assert plugin1.__name__.endswith("pytest_hello") plugin2 = pytestpm.get_plugin("pytest_hello") assert plugin2 is plugin1 @@ -342,9 +363,11 @@ class TestPytestPluginManager(object): class TestPytestPluginManagerBootstrapming(object): + def test_preparse_args(self, pytestpm): - pytest.raises(ImportError, lambda: - pytestpm.consider_preparse(["xyz", "-p", "hello123"])) + pytest.raises( + ImportError, lambda: pytestpm.consider_preparse(["xyz", "-p", "hello123"]) + ) def test_plugin_prevent_register(self, pytestpm): pytestpm.consider_preparse(["xyz", "-p", "no:abc"]) diff --git a/testing/test_pytester.py b/testing/test_pytester.py index 87063371a..b74c0b7f7 100644 --- a/testing/test_pytester.py +++ b/testing/test_pytester.py @@ -37,6 +37,7 @@ def test_make_hook_recorder(testdir): failed = False skipped = True when = "call" + rep.passed = False rep.skipped = True recorder.hook.pytest_runtest_logreport(report=rep) @@ -71,24 +72,28 @@ def test_parseconfig(testdir): def test_testdir_runs_with_plugin(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ pytest_plugins = "pytester" def test_hello(testdir): assert 1 - """) + """ + ) result = testdir.runpytest() result.assert_outcomes(passed=1) def make_holder(): + class apiclass(object): + def pytest_xyz(self, arg): "x" def pytest_xyz_noarg(self): "x" - apimod = type(os)('api') + apimod = type(os)("api") def pytest_xyz(arg): "x" @@ -130,12 +135,15 @@ def test_makepyfile_utf8(testdir): utf8_contents = u""" def setup_function(function): mixed_encoding = u'São Paulo' - """.encode('utf-8') + """.encode( + "utf-8" + ) p = testdir.makepyfile(utf8_contents) - assert u"mixed_encoding = u'São Paulo'".encode('utf-8') in p.read('rb') + assert u"mixed_encoding = u'São Paulo'".encode("utf-8") in p.read("rb") class TestInlineRunModulesCleanup(object): + def test_inline_run_test_module_not_cleaned_up(self, testdir): test_mod = testdir.makepyfile("def test_foo(): assert True") result = testdir.inline_run(str(test_mod)) @@ -146,6 +154,7 @@ class TestInlineRunModulesCleanup(object): assert result2.ret == EXIT_TESTSFAILED def spy_factory(self): + class SysModulesSnapshotSpy(object): instances = [] @@ -158,19 +167,23 @@ class TestInlineRunModulesCleanup(object): def restore(self): self._spy_restore_count += 1 return self.__snapshot.restore() + return SysModulesSnapshotSpy def test_inline_run_taking_and_restoring_a_sys_modules_snapshot( - self, testdir, monkeypatch): + self, testdir, monkeypatch + ): spy_factory = self.spy_factory() monkeypatch.setattr(pytester, "SysModulesSnapshot", spy_factory) original = dict(sys.modules) testdir.syspathinsert() testdir.makepyfile(import1="# you son of a silly person") testdir.makepyfile(import2="# my hovercraft is full of eels") - test_mod = testdir.makepyfile(""" + test_mod = testdir.makepyfile( + """ import import1 - def test_foo(): import import2""") + def test_foo(): import import2""" + ) testdir.inline_run(str(test_mod)) assert len(spy_factory.instances) == 1 spy = spy_factory.instances[0] @@ -179,7 +192,8 @@ class TestInlineRunModulesCleanup(object): assert all(sys.modules[x] is original[x] for x in sys.modules) def test_inline_run_sys_modules_snapshot_restore_preserving_modules( - self, testdir, monkeypatch): + self, testdir, monkeypatch + ): spy_factory = self.spy_factory() monkeypatch.setattr(pytester, "SysModulesSnapshot", spy_factory) test_mod = testdir.makepyfile("def test_foo(): pass") @@ -194,15 +208,19 @@ class TestInlineRunModulesCleanup(object): testdir.syspathinsert() testdir.makepyfile(imported="data = 'you son of a silly person'") import imported - test_mod = testdir.makepyfile(""" + + test_mod = testdir.makepyfile( + """ def test_foo(): import imported - imported.data = 42""") + imported.data = 42""" + ) testdir.inline_run(str(test_mod)) assert imported.data == 42 def test_inline_run_clean_sys_paths(testdir): + def test_sys_path_change_cleanup(self, testdir): test_path1 = testdir.tmpdir.join("boink1").strpath test_path2 = testdir.tmpdir.join("boink2").strpath @@ -211,18 +229,23 @@ def test_inline_run_clean_sys_paths(testdir): sys.meta_path.append(test_path1) original_path = list(sys.path) original_meta_path = list(sys.meta_path) - test_mod = testdir.makepyfile(""" + test_mod = testdir.makepyfile( + """ import sys sys.path.append({:test_path2}) sys.meta_path.append({:test_path2}) def test_foo(): sys.path.append({:test_path3}) - sys.meta_path.append({:test_path3})""".format(locals())) + sys.meta_path.append({:test_path3})""".format( + locals() + ) + ) testdir.inline_run(str(test_mod)) assert sys.path == original_path assert sys.meta_path == original_meta_path def spy_factory(self): + class SysPathsSnapshotSpy(object): instances = [] @@ -234,10 +257,12 @@ def test_inline_run_clean_sys_paths(testdir): def restore(self): self._spy_restore_count += 1 return self.__snapshot.restore() + return SysPathsSnapshotSpy def test_inline_run_taking_and_restoring_a_sys_paths_snapshot( - self, testdir, monkeypatch): + self, testdir, monkeypatch + ): spy_factory = self.spy_factory() monkeypatch.setattr(pytester, "SysPathsSnapshot", spy_factory) test_mod = testdir.makepyfile("def test_foo(): pass") @@ -250,14 +275,14 @@ def test_inline_run_clean_sys_paths(testdir): def test_assert_outcomes_after_pytest_error(testdir): testdir.makepyfile("def test_foo(): assert True") - result = testdir.runpytest('--unexpected-argument') + result = testdir.runpytest("--unexpected-argument") with pytest.raises(ValueError, message="Pytest terminal report not found"): result.assert_outcomes(passed=0) def test_cwd_snapshot(tmpdir): - foo = tmpdir.ensure('foo', dir=1) - bar = tmpdir.ensure('bar', dir=1) + foo = tmpdir.ensure("foo", dir=1) + bar = tmpdir.ensure("bar", dir=1) foo.chdir() snapshot = CwdSnapshot() bar.chdir() @@ -267,20 +292,20 @@ def test_cwd_snapshot(tmpdir): class TestSysModulesSnapshot(object): - key = 'my-test-module' + key = "my-test-module" def test_remove_added(self): original = dict(sys.modules) assert self.key not in sys.modules snapshot = SysModulesSnapshot() - sys.modules[self.key] = 'something' + sys.modules[self.key] = "something" assert self.key in sys.modules snapshot.restore() assert sys.modules == original def test_add_removed(self, monkeypatch): assert self.key not in sys.modules - monkeypatch.setitem(sys.modules, self.key, 'something') + monkeypatch.setitem(sys.modules, self.key, "something") assert self.key in sys.modules original = dict(sys.modules) snapshot = SysModulesSnapshot() @@ -291,11 +316,11 @@ class TestSysModulesSnapshot(object): def test_restore_reloaded(self, monkeypatch): assert self.key not in sys.modules - monkeypatch.setitem(sys.modules, self.key, 'something') + monkeypatch.setitem(sys.modules, self.key, "something") assert self.key in sys.modules original = dict(sys.modules) snapshot = SysModulesSnapshot() - sys.modules[self.key] = 'something else' + sys.modules[self.key] = "something else" snapshot.restore() assert sys.modules == original @@ -303,16 +328,16 @@ class TestSysModulesSnapshot(object): key = [self.key + str(i) for i in range(3)] assert not any(k in sys.modules for k in key) for i, k in enumerate(key): - monkeypatch.setitem(sys.modules, k, 'something' + str(i)) + monkeypatch.setitem(sys.modules, k, "something" + str(i)) original = dict(sys.modules) def preserve(name): - return name in (key[0], key[1], 'some-other-key') + return name in (key[0], key[1], "some-other-key") snapshot = SysModulesSnapshot(preserve=preserve) - sys.modules[key[0]] = original[key[0]] = 'something else0' - sys.modules[key[1]] = original[key[1]] = 'something else1' - sys.modules[key[2]] = 'something else2' + sys.modules[key[0]] = original[key[0]] = "something else0" + sys.modules[key[1]] = original[key[1]] = "something else1" + sys.modules[key[2]] = "something else2" snapshot.restore() assert sys.modules == original @@ -320,23 +345,21 @@ class TestSysModulesSnapshot(object): original = dict(sys.modules) assert self.key not in original replacement = dict(sys.modules) - replacement[self.key] = 'life of brian' + replacement[self.key] = "life of brian" snapshot = SysModulesSnapshot() - monkeypatch.setattr(sys, 'modules', replacement) + monkeypatch.setattr(sys, "modules", replacement) snapshot.restore() assert sys.modules is replacement assert sys.modules == original -@pytest.mark.parametrize('path_type', ('path', 'meta_path')) +@pytest.mark.parametrize("path_type", ("path", "meta_path")) class TestSysPathsSnapshot(object): - other_path = { - 'path': 'meta_path', - 'meta_path': 'path'} + other_path = {"path": "meta_path", "meta_path": "path"} @staticmethod def path(n): - return 'my-dirty-little-secret-' + str(n) + return "my-dirty-little-secret-" + str(n) def test_restore(self, monkeypatch, path_type): other_path_type = self.other_path[path_type] @@ -348,16 +371,16 @@ class TestSysPathsSnapshot(object): original_other = list(getattr(sys, other_path_type)) snapshot = SysPathsSnapshot() transformation = { - 'source': (0, 1, 2, 3, 4, 5), - 'target': ( 6, 2, 9, 7, 5, 8)} # noqa: E201 - assert sys_path == [self.path(x) for x in transformation['source']] + "source": (0, 1, 2, 3, 4, 5), "target": (6, 2, 9, 7, 5, 8) + } # noqa: E201 + assert sys_path == [self.path(x) for x in transformation["source"]] sys_path[1] = self.path(6) sys_path[3] = self.path(7) sys_path.append(self.path(8)) del sys_path[4] sys_path[3:3] = [self.path(9)] del sys_path[0] - assert sys_path == [self.path(x) for x in transformation['target']] + assert sys_path == [self.path(x) for x in transformation["target"]] snapshot.restore() assert getattr(sys, path_type) is sys_path assert getattr(sys, path_type) == original diff --git a/testing/test_recwarn.py b/testing/test_recwarn.py index 1d99a7656..a8e2fb803 100644 --- a/testing/test_recwarn.py +++ b/testing/test_recwarn.py @@ -7,18 +7,21 @@ from _pytest.recwarn import WarningsRecorder def test_recwarn_functional(testdir): - reprec = testdir.inline_runsource(""" + reprec = testdir.inline_runsource( + """ import warnings def test_method(recwarn): warnings.warn("hello") warn = recwarn.pop() assert isinstance(warn.message, UserWarning) - """) + """ + ) res = reprec.countoutcomes() assert tuple(res) == (1, 0, 0), res class TestWarningsRecorderChecker(object): + def test_recording(self): rec = WarningsRecorder() with rec: @@ -37,10 +40,11 @@ class TestWarningsRecorderChecker(object): def test_typechecking(self): from _pytest.recwarn import WarningsChecker + with pytest.raises(TypeError): WarningsChecker(5) with pytest.raises(TypeError): - WarningsChecker(('hi', RuntimeWarning)) + WarningsChecker(("hi", RuntimeWarning)) with pytest.raises(TypeError): WarningsChecker([DeprecationWarning, RuntimeWarning]) @@ -63,19 +67,19 @@ class TestDeprecatedCall(object): def dep(self, i, j=None): if i == 0: - warnings.warn("is deprecated", DeprecationWarning, - stacklevel=1) + warnings.warn("is deprecated", DeprecationWarning, stacklevel=1) return 42 def dep_explicit(self, i): if i == 0: - warnings.warn_explicit("dep_explicit", category=DeprecationWarning, - filename="hello", lineno=3) + warnings.warn_explicit( + "dep_explicit", category=DeprecationWarning, filename="hello", lineno=3 + ) def test_deprecated_call_raises(self): with pytest.raises(AssertionError) as excinfo: pytest.deprecated_call(self.dep, 3, 5) - assert 'Did not produce' in str(excinfo) + assert "Did not produce" in str(excinfo) def test_deprecated_call(self): pytest.deprecated_call(self.dep, 0, 5) @@ -104,30 +108,34 @@ class TestDeprecatedCall(object): pytest.deprecated_call(self.dep_explicit, 0) pytest.deprecated_call(self.dep_explicit, 0) - @pytest.mark.parametrize('mode', ['context_manager', 'call']) + @pytest.mark.parametrize("mode", ["context_manager", "call"]) def test_deprecated_call_no_warning(self, mode): """Ensure deprecated_call() raises the expected failure when its block/function does not raise a deprecation warning. """ + def f(): pass - msg = 'Did not produce DeprecationWarning or PendingDeprecationWarning' + msg = "Did not produce DeprecationWarning or PendingDeprecationWarning" with pytest.raises(AssertionError, match=msg): - if mode == 'call': + if mode == "call": pytest.deprecated_call(f) else: with pytest.deprecated_call(): f() - @pytest.mark.parametrize('warning_type', [PendingDeprecationWarning, DeprecationWarning]) - @pytest.mark.parametrize('mode', ['context_manager', 'call']) - @pytest.mark.parametrize('call_f_first', [True, False]) - @pytest.mark.filterwarnings('ignore') + @pytest.mark.parametrize( + "warning_type", [PendingDeprecationWarning, DeprecationWarning] + ) + @pytest.mark.parametrize("mode", ["context_manager", "call"]) + @pytest.mark.parametrize("call_f_first", [True, False]) + @pytest.mark.filterwarnings("ignore") def test_deprecated_call_modes(self, warning_type, mode, call_f_first): """Ensure deprecated_call() captures a deprecation warning as expected inside its block/function. """ + def f(): warnings.warn(warning_type("hi")) return 10 @@ -135,31 +143,40 @@ class TestDeprecatedCall(object): # ensure deprecated_call() can capture the warning even if it has already been triggered if call_f_first: assert f() == 10 - if mode == 'call': + if mode == "call": assert pytest.deprecated_call(f) == 10 else: with pytest.deprecated_call(): assert f() == 10 - @pytest.mark.parametrize('mode', ['context_manager', 'call']) + @pytest.mark.parametrize("mode", ["context_manager", "call"]) def test_deprecated_call_exception_is_raised(self, mode): """If the block of the code being tested by deprecated_call() raises an exception, it must raise the exception undisturbed. """ - def f(): - raise ValueError('some exception') - with pytest.raises(ValueError, match='some exception'): - if mode == 'call': + def f(): + raise ValueError("some exception") + + with pytest.raises(ValueError, match="some exception"): + if mode == "call": pytest.deprecated_call(f) else: with pytest.deprecated_call(): f() def test_deprecated_call_specificity(self): - other_warnings = [Warning, UserWarning, SyntaxWarning, RuntimeWarning, - FutureWarning, ImportWarning, UnicodeWarning] + other_warnings = [ + Warning, + UserWarning, + SyntaxWarning, + RuntimeWarning, + FutureWarning, + ImportWarning, + UnicodeWarning, + ] for warning in other_warnings: + def f(): warnings.warn(warning("hi")) @@ -171,29 +188,35 @@ class TestDeprecatedCall(object): class TestWarns(object): + def test_strings(self): # different messages, b/c Python suppresses multiple identical warnings source1 = "warnings.warn('w1', RuntimeWarning)" source2 = "warnings.warn('w2', RuntimeWarning)" source3 = "warnings.warn('w3', RuntimeWarning)" pytest.warns(RuntimeWarning, source1) - pytest.raises(pytest.fail.Exception, - lambda: pytest.warns(UserWarning, source2)) + pytest.raises(pytest.fail.Exception, lambda: pytest.warns(UserWarning, source2)) pytest.warns(RuntimeWarning, source3) def test_function(self): - pytest.warns(SyntaxWarning, - lambda msg: warnings.warn(msg, SyntaxWarning), "syntax") + pytest.warns( + SyntaxWarning, lambda msg: warnings.warn(msg, SyntaxWarning), "syntax" + ) def test_warning_tuple(self): - pytest.warns((RuntimeWarning, SyntaxWarning), - lambda: warnings.warn('w1', RuntimeWarning)) - pytest.warns((RuntimeWarning, SyntaxWarning), - lambda: warnings.warn('w2', SyntaxWarning)) - pytest.raises(pytest.fail.Exception, - lambda: pytest.warns( - (RuntimeWarning, SyntaxWarning), - lambda: warnings.warn('w3', UserWarning))) + pytest.warns( + (RuntimeWarning, SyntaxWarning), lambda: warnings.warn("w1", RuntimeWarning) + ) + pytest.warns( + (RuntimeWarning, SyntaxWarning), lambda: warnings.warn("w2", SyntaxWarning) + ) + pytest.raises( + pytest.fail.Exception, + lambda: pytest.warns( + (RuntimeWarning, SyntaxWarning), + lambda: warnings.warn("w3", UserWarning), + ), + ) def test_as_contextmanager(self): with pytest.warns(RuntimeWarning): @@ -205,20 +228,26 @@ class TestWarns(object): with pytest.raises(pytest.fail.Exception) as excinfo: with pytest.warns(RuntimeWarning): warnings.warn("user", UserWarning) - excinfo.match(r"DID NOT WARN. No warnings of type \(.+RuntimeWarning.+,\) was emitted. " - r"The list of emitted warnings is: \[UserWarning\('user',?\)\].") + excinfo.match( + r"DID NOT WARN. No warnings of type \(.+RuntimeWarning.+,\) was emitted. " + r"The list of emitted warnings is: \[UserWarning\('user',?\)\]." + ) with pytest.raises(pytest.fail.Exception) as excinfo: with pytest.warns(UserWarning): warnings.warn("runtime", RuntimeWarning) - excinfo.match(r"DID NOT WARN. No warnings of type \(.+UserWarning.+,\) was emitted. " - r"The list of emitted warnings is: \[RuntimeWarning\('runtime',?\)\].") + excinfo.match( + r"DID NOT WARN. No warnings of type \(.+UserWarning.+,\) was emitted. " + r"The list of emitted warnings is: \[RuntimeWarning\('runtime',?\)\]." + ) with pytest.raises(pytest.fail.Exception) as excinfo: with pytest.warns(UserWarning): pass - excinfo.match(r"DID NOT WARN. No warnings of type \(.+UserWarning.+,\) was emitted. " - r"The list of emitted warnings is: \[\].") + excinfo.match( + r"DID NOT WARN. No warnings of type \(.+UserWarning.+,\) was emitted. " + r"The list of emitted warnings is: \[\]." + ) warning_classes = (UserWarning, FutureWarning) with pytest.raises(pytest.fail.Exception) as excinfo: @@ -226,10 +255,17 @@ class TestWarns(object): warnings.warn("runtime", RuntimeWarning) warnings.warn("import", ImportWarning) - message_template = ("DID NOT WARN. No warnings of type {0} was emitted. " - "The list of emitted warnings is: {1}.") - excinfo.match(re.escape(message_template.format(warning_classes, - [each.message for each in warninfo]))) + message_template = ( + "DID NOT WARN. No warnings of type {0} was emitted. " + "The list of emitted warnings is: {1}." + ) + excinfo.match( + re.escape( + message_template.format( + warning_classes, [each.message for each in warninfo] + ) + ) + ) def test_record(self): with pytest.warns(UserWarning) as record: @@ -272,7 +308,8 @@ class TestWarns(object): def test_double_test(self, testdir): """If a test is run again, the warning should still be raised""" - testdir.makepyfile(''' + testdir.makepyfile( + """ import pytest import warnings @@ -280,30 +317,31 @@ class TestWarns(object): def test(run): with pytest.warns(RuntimeWarning): warnings.warn("runtime", RuntimeWarning) - ''') + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(['*2 passed in*']) + result.stdout.fnmatch_lines(["*2 passed in*"]) def test_match_regex(self): - with pytest.warns(UserWarning, match=r'must be \d+$'): + with pytest.warns(UserWarning, match=r"must be \d+$"): warnings.warn("value must be 42", UserWarning) with pytest.raises(pytest.fail.Exception): - with pytest.warns(UserWarning, match=r'must be \d+$'): + with pytest.warns(UserWarning, match=r"must be \d+$"): warnings.warn("this is not here", UserWarning) with pytest.raises(pytest.fail.Exception): - with pytest.warns(FutureWarning, match=r'must be \d+$'): + with pytest.warns(FutureWarning, match=r"must be \d+$"): warnings.warn("value must be 42", UserWarning) def test_one_from_multiple_warns(self): - with pytest.warns(UserWarning, match=r'aaa'): + with pytest.warns(UserWarning, match=r"aaa"): warnings.warn("cccccccccc", UserWarning) warnings.warn("bbbbbbbbbb", UserWarning) warnings.warn("aaaaaaaaaa", UserWarning) def test_none_of_multiple_warns(self): with pytest.raises(pytest.fail.Exception): - with pytest.warns(UserWarning, match=r'aaa'): + with pytest.warns(UserWarning, match=r"aaa"): warnings.warn("bbbbbbbbbb", UserWarning) warnings.warn("cccccccccc", UserWarning) diff --git a/testing/test_resultlog.py b/testing/test_resultlog.py index b1760721c..173384ffb 100644 --- a/testing/test_resultlog.py +++ b/testing/test_resultlog.py @@ -5,71 +5,76 @@ import _pytest._code import py import pytest from _pytest.nodes import Node, Item, FSCollector -from _pytest.resultlog import generic_path, ResultLog, \ - pytest_configure, pytest_unconfigure +from _pytest.resultlog import ( + generic_path, + ResultLog, + pytest_configure, + pytest_unconfigure, +) def test_generic_path(testdir): from _pytest.main import Session + config = testdir.parseconfig() session = Session(config) - p1 = Node('a', config=config, session=session, nodeid='a') + p1 = Node("a", config=config, session=session, nodeid="a") # assert p1.fspath is None - p2 = Node('B', parent=p1) - p3 = Node('()', parent=p2) - item = Item('c', parent=p3) + p2 = Node("B", parent=p1) + p3 = Node("()", parent=p2) + item = Item("c", parent=p3) res = generic_path(item) - assert res == 'a.B().c' + assert res == "a.B().c" - p0 = FSCollector('proj/test', config=config, session=session) - p1 = FSCollector('proj/test/a', parent=p0) - p2 = Node('B', parent=p1) - p3 = Node('()', parent=p2) - p4 = Node('c', parent=p3) - item = Item('[1]', parent=p4) + p0 = FSCollector("proj/test", config=config, session=session) + p1 = FSCollector("proj/test/a", parent=p0) + p2 = Node("B", parent=p1) + p3 = Node("()", parent=p2) + p4 = Node("c", parent=p3) + item = Item("[1]", parent=p4) res = generic_path(item) - assert res == 'test/a:B().c[1]' + assert res == "test/a:B().c[1]" def test_write_log_entry(): reslog = ResultLog(None, None) reslog.logfile = py.io.TextIO() - reslog.write_log_entry('name', '.', '') + reslog.write_log_entry("name", ".", "") entry = reslog.logfile.getvalue() - assert entry[-1] == '\n' + assert entry[-1] == "\n" entry_lines = entry.splitlines() assert len(entry_lines) == 1 - assert entry_lines[0] == '. name' + assert entry_lines[0] == ". name" reslog.logfile = py.io.TextIO() - reslog.write_log_entry('name', 's', 'Skipped') + reslog.write_log_entry("name", "s", "Skipped") entry = reslog.logfile.getvalue() - assert entry[-1] == '\n' + assert entry[-1] == "\n" entry_lines = entry.splitlines() assert len(entry_lines) == 2 - assert entry_lines[0] == 's name' - assert entry_lines[1] == ' Skipped' + assert entry_lines[0] == "s name" + assert entry_lines[1] == " Skipped" reslog.logfile = py.io.TextIO() - reslog.write_log_entry('name', 's', 'Skipped\n') + reslog.write_log_entry("name", "s", "Skipped\n") entry = reslog.logfile.getvalue() - assert entry[-1] == '\n' + assert entry[-1] == "\n" entry_lines = entry.splitlines() assert len(entry_lines) == 2 - assert entry_lines[0] == 's name' - assert entry_lines[1] == ' Skipped' + assert entry_lines[0] == "s name" + assert entry_lines[1] == " Skipped" reslog.logfile = py.io.TextIO() - longrepr = ' tb1\n tb 2\nE tb3\nSome Error' - reslog.write_log_entry('name', 'F', longrepr) + longrepr = " tb1\n tb 2\nE tb3\nSome Error" + reslog.write_log_entry("name", "F", longrepr) entry = reslog.logfile.getvalue() - assert entry[-1] == '\n' + assert entry[-1] == "\n" entry_lines = entry.splitlines() assert len(entry_lines) == 5 - assert entry_lines[0] == 'F name' - assert entry_lines[1:] == [' ' + line for line in longrepr.splitlines()] + assert entry_lines[0] == "F name" + assert entry_lines[1:] == [" " + line for line in longrepr.splitlines()] class TestWithFunctionIntegration(object): @@ -98,7 +103,8 @@ class TestWithFunctionIntegration(object): assert "XXX" in "".join(lines[1:]) def test_log_test_outcomes(self, testdir): - mod = testdir.makepyfile(test_mod=""" + mod = testdir.makepyfile( + test_mod=""" import pytest def test_pass(): pass def test_skip(): pytest.skip("hello") @@ -109,7 +115,8 @@ class TestWithFunctionIntegration(object): @pytest.mark.xfail def test_xpass(): pass - """) + """ + ) lines = self.getresultlog(testdir, mod) assert len(lines) >= 3 assert lines[0].startswith(". ") @@ -123,11 +130,11 @@ class TestWithFunctionIntegration(object): tb = "".join(lines[4:8]) assert tb.find('raise ValueError("FAIL")') != -1 - assert lines[8].startswith('x ') + assert lines[8].startswith("x ") tb = "".join(lines[8:14]) assert tb.find('raise ValueError("XFAIL")') != -1 - assert lines[14].startswith('X ') + assert lines[14].startswith("X ") assert len(lines) == 15 @pytest.mark.parametrize("style", ("native", "long", "short")) @@ -143,16 +150,17 @@ class TestWithFunctionIntegration(object): entry = reslog.logfile.getvalue() entry_lines = entry.splitlines() - assert entry_lines[0].startswith('! ') + assert entry_lines[0].startswith("! ") if style != "native": assert os.path.basename(__file__)[:-9] in entry_lines[0] # .pyc/class - assert entry_lines[-1][0] == ' ' - assert 'ValueError' in entry + assert entry_lines[-1][0] == " " + assert "ValueError" in entry def test_generic(testdir, LineMatcher): testdir.plugins.append("resultlog") - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def test_pass(): pass @@ -166,51 +174,55 @@ def test_generic(testdir, LineMatcher): @pytest.mark.xfail(run=False) def test_xfail_norun(): assert 0 - """) + """ + ) testdir.runpytest("--resultlog=result.log") lines = testdir.tmpdir.join("result.log").readlines(cr=0) - LineMatcher(lines).fnmatch_lines([ - ". *:test_pass", - "F *:test_fail", - "s *:test_skip", - "x *:test_xfail", - "x *:test_xfail_norun", - ]) + LineMatcher(lines).fnmatch_lines( + [ + ". *:test_pass", + "F *:test_fail", + "s *:test_skip", + "x *:test_xfail", + "x *:test_xfail_norun", + ] + ) def test_makedir_for_resultlog(testdir, LineMatcher): """--resultlog should automatically create directories for the log file""" testdir.plugins.append("resultlog") - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def test_pass(): pass - """) + """ + ) testdir.runpytest("--resultlog=path/to/result.log") lines = testdir.tmpdir.join("path/to/result.log").readlines(cr=0) - LineMatcher(lines).fnmatch_lines([ - ". *:test_pass", - ]) + LineMatcher(lines).fnmatch_lines([". *:test_pass"]) def test_no_resultlog_on_slaves(testdir): config = testdir.parseconfig("-p", "resultlog", "--resultlog=resultlog") - assert not hasattr(config, '_resultlog') + assert not hasattr(config, "_resultlog") pytest_configure(config) - assert hasattr(config, '_resultlog') + assert hasattr(config, "_resultlog") pytest_unconfigure(config) - assert not hasattr(config, '_resultlog') + assert not hasattr(config, "_resultlog") config.slaveinput = {} pytest_configure(config) - assert not hasattr(config, '_resultlog') + assert not hasattr(config, "_resultlog") pytest_unconfigure(config) - assert not hasattr(config, '_resultlog') + assert not hasattr(config, "_resultlog") def test_failure_issue380(testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest class MyCollector(pytest.File): def collect(self): @@ -219,10 +231,13 @@ def test_failure_issue380(testdir): return "somestring" def pytest_collect_file(path, parent): return MyCollector(parent=parent, fspath=path) - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ def test_func(): pass - """) + """ + ) result = testdir.runpytest("--resultlog=log") assert result.ret == 2 diff --git a/testing/test_runner.py b/testing/test_runner.py index 7c179b1f2..26493de6e 100644 --- a/testing/test_runner.py +++ b/testing/test_runner.py @@ -12,6 +12,7 @@ from _pytest import runner, main, outcomes class TestSetupState(object): + def test_setup(self, testdir): ss = runner.SetupState() item = testdir.getitem("def test_func(): pass") @@ -30,11 +31,13 @@ class TestSetupState(object): ss.teardown_exact(item, None) def test_setup_fails_and_failure_is_cached(self, testdir): - item = testdir.getitem(""" + item = testdir.getitem( + """ def setup_module(mod): raise ValueError(42) def test_func(): pass - """) + """ + ) ss = runner.SetupState() pytest.raises(ValueError, lambda: ss.prepare(item)) pytest.raises(ValueError, lambda: ss.prepare(item)) @@ -43,13 +46,13 @@ class TestSetupState(object): r = [] def fin1(): - r.append('fin1') + r.append("fin1") def fin2(): - raise Exception('oops') + raise Exception("oops") def fin3(): - r.append('fin3') + r.append("fin3") item = testdir.getitem("def test_func(): pass") ss = runner.SetupState() @@ -58,17 +61,17 @@ class TestSetupState(object): ss.addfinalizer(fin3, item) with pytest.raises(Exception) as err: ss._callfinalizers(item) - assert err.value.args == ('oops',) - assert r == ['fin3', 'fin1'] + assert err.value.args == ("oops",) + assert r == ["fin3", "fin1"] def test_teardown_multiple_fail(self, testdir): # Ensure the first exception is the one which is re-raised. # Ideally both would be reported however. def fin1(): - raise Exception('oops1') + raise Exception("oops1") def fin2(): - raise Exception('oops2') + raise Exception("oops2") item = testdir.getitem("def test_func(): pass") ss = runner.SetupState() @@ -76,15 +79,18 @@ class TestSetupState(object): ss.addfinalizer(fin2, item) with pytest.raises(Exception) as err: ss._callfinalizers(item) - assert err.value.args == ('oops2',) + assert err.value.args == ("oops2",) class BaseFunctionalTests(object): + def test_passfunction(self, testdir): - reports = testdir.runitem(""" + reports = testdir.runitem( + """ def test_func(): pass - """) + """ + ) rep = reports[1] assert rep.passed assert not rep.failed @@ -92,10 +98,12 @@ class BaseFunctionalTests(object): assert not rep.longrepr def test_failfunction(self, testdir): - reports = testdir.runitem(""" + reports = testdir.runitem( + """ def test_func(): assert 0 - """) + """ + ) rep = reports[1] assert not rep.passed assert not rep.skipped @@ -105,11 +113,13 @@ class BaseFunctionalTests(object): # assert isinstance(rep.longrepr, ReprExceptionInfo) def test_skipfunction(self, testdir): - reports = testdir.runitem(""" + reports = testdir.runitem( + """ import pytest def test_func(): pytest.skip("hello") - """) + """ + ) rep = reports[1] assert not rep.failed assert not rep.passed @@ -123,13 +133,15 @@ class BaseFunctionalTests(object): # assert not rep.skipped.failurerepr def test_skip_in_setup_function(self, testdir): - reports = testdir.runitem(""" + reports = testdir.runitem( + """ import pytest def setup_function(func): pytest.skip("hello") def test_func(): pass - """) + """ + ) print(reports) rep = reports[0] assert not rep.failed @@ -142,13 +154,15 @@ class BaseFunctionalTests(object): assert reports[1].passed # teardown def test_failure_in_setup_function(self, testdir): - reports = testdir.runitem(""" + reports = testdir.runitem( + """ import pytest def setup_function(func): raise ValueError(42) def test_func(): pass - """) + """ + ) rep = reports[0] assert not rep.skipped assert not rep.passed @@ -157,13 +171,15 @@ class BaseFunctionalTests(object): assert len(reports) == 2 def test_failure_in_teardown_function(self, testdir): - reports = testdir.runitem(""" + reports = testdir.runitem( + """ import pytest def teardown_function(func): raise ValueError(42) def test_func(): pass - """) + """ + ) print(reports) assert len(reports) == 3 rep = reports[2] @@ -175,17 +191,21 @@ class BaseFunctionalTests(object): # assert rep.longrepr.reprtraceback.reprentries def test_custom_failure_repr(self, testdir): - testdir.makepyfile(conftest=""" + testdir.makepyfile( + conftest=""" import pytest class Function(pytest.Function): def repr_failure(self, excinfo): return "hello" - """) - reports = testdir.runitem(""" + """ + ) + reports = testdir.runitem( + """ import pytest def test_func(): assert 0 - """) + """ + ) rep = reports[1] assert not rep.skipped assert not rep.passed @@ -196,28 +216,36 @@ class BaseFunctionalTests(object): # assert rep.failed.failurerepr == "hello" def test_teardown_final_returncode(self, testdir): - rec = testdir.inline_runsource(""" + rec = testdir.inline_runsource( + """ def test_func(): pass def teardown_function(func): raise ValueError(42) - """) + """ + ) assert rec.ret == 1 def test_logstart_logfinish_hooks(self, testdir): - rec = testdir.inline_runsource(""" + rec = testdir.inline_runsource( + """ import pytest def test_func(): pass - """) + """ + ) reps = rec.getcalls("pytest_runtest_logstart pytest_runtest_logfinish") - assert [x._name for x in reps] == ['pytest_runtest_logstart', 'pytest_runtest_logfinish'] + assert ( + [x._name for x in reps] + == ["pytest_runtest_logstart", "pytest_runtest_logfinish"] + ) for rep in reps: - assert rep.nodeid == 'test_logstart_logfinish_hooks.py::test_func' - assert rep.location == ('test_logstart_logfinish_hooks.py', 1, 'test_func') + assert rep.nodeid == "test_logstart_logfinish_hooks.py::test_func" + assert rep.location == ("test_logstart_logfinish_hooks.py", 1, "test_func") def test_exact_teardown_issue90(self, testdir): - rec = testdir.inline_runsource(""" + rec = testdir.inline_runsource( + """ import pytest class TestClass(object): @@ -238,7 +266,8 @@ class BaseFunctionalTests(object): traceback.format_exception(*excinfo) def teardown_function(func): raise ValueError(42) - """) + """ + ) reps = rec.getreports("pytest_runtest_logreport") print(reps) for i in range(2): @@ -256,7 +285,8 @@ class BaseFunctionalTests(object): def test_exact_teardown_issue1206(self, testdir): """issue shadowing error with wrong number of arguments on teardown_method.""" - rec = testdir.inline_runsource(""" + rec = testdir.inline_runsource( + """ import pytest class TestClass(object): @@ -265,18 +295,19 @@ class BaseFunctionalTests(object): def test_method(self): assert True - """) + """ + ) reps = rec.getreports("pytest_runtest_logreport") print(reps) assert len(reps) == 3 # assert reps[0].nodeid.endswith("test_method") assert reps[0].passed - assert reps[0].when == 'setup' + assert reps[0].when == "setup" # assert reps[1].nodeid.endswith("test_method") assert reps[1].passed - assert reps[1].when == 'call' + assert reps[1].when == "call" # assert reps[2].nodeid.endswith("test_method") assert reps[2].failed @@ -285,22 +316,26 @@ class BaseFunctionalTests(object): # python3 error "TypeError: teardown_method() missing 2 required positional arguments: 'y' and 'z'", # python2 error - 'TypeError: teardown_method() takes exactly 4 arguments (2 given)' + "TypeError: teardown_method() takes exactly 4 arguments (2 given)", ) def test_failure_in_setup_function_ignores_custom_repr(self, testdir): - testdir.makepyfile(conftest=""" + testdir.makepyfile( + conftest=""" import pytest class Function(pytest.Function): def repr_failure(self, excinfo): assert 0 - """) - reports = testdir.runitem(""" + """ + ) + reports = testdir.runitem( + """ def setup_function(func): raise ValueError(42) def test_func(): pass - """) + """ + ) assert len(reports) == 2 rep = reports[0] print(rep) @@ -314,10 +349,12 @@ class BaseFunctionalTests(object): def test_systemexit_does_not_bail_out(self, testdir): try: - reports = testdir.runitem(""" + reports = testdir.runitem( + """ def test_func(): raise SystemExit(42) - """) + """ + ) except SystemExit: pytest.fail("runner did not catch SystemExit") rep = reports[1] @@ -326,11 +363,13 @@ class BaseFunctionalTests(object): def test_exit_propagates(self, testdir): try: - testdir.runitem(""" + testdir.runitem( + """ import pytest def test_func(): raise pytest.exit.Exception() - """) + """ + ) except pytest.exit.Exception: pass else: @@ -338,17 +377,22 @@ class BaseFunctionalTests(object): class TestExecutionNonForked(BaseFunctionalTests): + def getrunner(self): + def f(item): return runner.runtestprotocol(item, log=False) + return f def test_keyboardinterrupt_propagates(self, testdir): try: - testdir.runitem(""" + testdir.runitem( + """ def test_func(): raise KeyboardInterrupt("fake") - """) + """ + ) except KeyboardInterrupt: pass else: @@ -364,24 +408,29 @@ class TestExecutionForked(BaseFunctionalTests): return boxed.forked_run_report def test_suicide(self, testdir): - reports = testdir.runitem(""" + reports = testdir.runitem( + """ def test_func(): import os os.kill(os.getpid(), 15) - """) + """ + ) rep = reports[0] assert rep.failed assert rep.when == "???" class TestSessionReports(object): + def test_collect_result(self, testdir): - col = testdir.getmodulecol(""" + col = testdir.getmodulecol( + """ def test_func1(): pass class TestClass(object): pass - """) + """ + ) rep = runner.collect_one_node(col) assert not rep.failed assert not rep.skipped @@ -404,9 +453,11 @@ reporttypes = [ ] -@pytest.mark.parametrize('reporttype', reporttypes, ids=[x.__name__ for x in reporttypes]) +@pytest.mark.parametrize( + "reporttype", reporttypes, ids=[x.__name__ for x in reporttypes] +) def test_report_extra_parameters(reporttype): - if hasattr(inspect, 'signature'): + if hasattr(inspect, "signature"): args = list(inspect.signature(reporttype.__init__).parameters.keys())[1:] else: args = inspect.getargspec(reporttype.__init__)[0][1:] @@ -416,23 +467,25 @@ def test_report_extra_parameters(reporttype): def test_callinfo(): - ci = runner.CallInfo(lambda: 0, '123') + ci = runner.CallInfo(lambda: 0, "123") assert ci.when == "123" assert ci.result == 0 assert "result" in repr(ci) - ci = runner.CallInfo(lambda: 0 / 0, '123') + ci = runner.CallInfo(lambda: 0 / 0, "123") assert ci.when == "123" - assert not hasattr(ci, 'result') + assert not hasattr(ci, "result") assert ci.excinfo assert "exc" in repr(ci) + # design question: do we want general hooks in python files? # then something like the following functional tests makes sense @pytest.mark.xfail def test_runtest_in_module_ordering(testdir): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ import pytest def pytest_runtest_setup(item): # runs after class-level! item.function.mylist.append("module") @@ -455,22 +508,21 @@ def test_runtest_in_module_ordering(testdir): assert mylist == ['class', 'module'], mylist def pytest_runtest_teardown(item): del item.function.mylist - """) + """ + ) result = testdir.runpytest(p1) - result.stdout.fnmatch_lines([ - "*2 passed*" - ]) + result.stdout.fnmatch_lines(["*2 passed*"]) def test_outcomeexception_exceptionattributes(): - outcome = outcomes.OutcomeException('test') + outcome = outcomes.OutcomeException("test") assert outcome.args[0] == outcome.msg def test_outcomeexception_passes_except_Exception(): with pytest.raises(outcomes.OutcomeException): try: - raise outcomes.OutcomeException('test') + raise outcomes.OutcomeException("test") except Exception: pass @@ -493,72 +545,76 @@ def test_pytest_fail(): def test_pytest_exit_msg(testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest def pytest_configure(config): pytest.exit('oh noes') - """) + """ + ) result = testdir.runpytest() - result.stderr.fnmatch_lines([ - "Exit: oh noes", - ]) + result.stderr.fnmatch_lines(["Exit: oh noes"]) def test_pytest_fail_notrace(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def test_hello(): pytest.fail("hello", pytrace=False) def teardown_function(function): pytest.fail("world", pytrace=False) - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "world", - "hello", - ]) - assert 'def teardown_function' not in result.stdout.str() + result.stdout.fnmatch_lines(["world", "hello"]) + assert "def teardown_function" not in result.stdout.str() -@pytest.mark.parametrize('str_prefix', ['u', '']) +@pytest.mark.parametrize("str_prefix", ["u", ""]) def test_pytest_fail_notrace_non_ascii(testdir, str_prefix): """Fix pytest.fail with pytrace=False with non-ascii characters (#1178). This tests with native and unicode strings containing non-ascii chars. """ - testdir.makepyfile(u""" + testdir.makepyfile( + u""" # coding: utf-8 import pytest def test_hello(): pytest.fail(%s'oh oh: ☺', pytrace=False) - """ % str_prefix) + """ + % str_prefix + ) result = testdir.runpytest() if sys.version_info[0] >= 3: - result.stdout.fnmatch_lines(['*test_hello*', "oh oh: ☺"]) + result.stdout.fnmatch_lines(["*test_hello*", "oh oh: ☺"]) else: - result.stdout.fnmatch_lines(['*test_hello*', "oh oh: *"]) - assert 'def test_hello' not in result.stdout.str() + result.stdout.fnmatch_lines(["*test_hello*", "oh oh: *"]) + assert "def test_hello" not in result.stdout.str() def test_pytest_no_tests_collected_exit_status(testdir): result = testdir.runpytest() - result.stdout.fnmatch_lines('*collected 0 items*') + result.stdout.fnmatch_lines("*collected 0 items*") assert result.ret == main.EXIT_NOTESTSCOLLECTED - testdir.makepyfile(test_foo=""" + testdir.makepyfile( + test_foo=""" def test_foo(): assert 1 - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines('*collected 1 item*') - result.stdout.fnmatch_lines('*1 passed*') + result.stdout.fnmatch_lines("*collected 1 item*") + result.stdout.fnmatch_lines("*1 passed*") assert result.ret == main.EXIT_OK - result = testdir.runpytest('-k nonmatch') - result.stdout.fnmatch_lines('*collected 1 item*') - result.stdout.fnmatch_lines('*1 deselected*') + result = testdir.runpytest("-k nonmatch") + result.stdout.fnmatch_lines("*collected 1 item*") + result.stdout.fnmatch_lines("*1 deselected*") assert result.ret == main.EXIT_NOTESTSCOLLECTED @@ -592,9 +648,12 @@ def test_importorskip(monkeypatch): mod = types.ModuleType("hello123") mod.__version__ = "1.3" monkeypatch.setitem(sys.modules, "hello123", mod) - pytest.raises(pytest.skip.Exception, """ + pytest.raises( + pytest.skip.Exception, + """ pytest.importorskip("hello123", minversion="1.3.1") - """) + """, + ) mod2 = pytest.importorskip("hello123", minversion="1.3") assert mod2 == mod except pytest.skip.Exception: @@ -610,12 +669,15 @@ def test_importorskip_imports_last_module_part(): def test_importorskip_dev_module(monkeypatch): try: mod = types.ModuleType("mockmodule") - mod.__version__ = '0.13.0.dev-43290' - monkeypatch.setitem(sys.modules, 'mockmodule', mod) - mod2 = pytest.importorskip('mockmodule', minversion='0.12.0') + mod.__version__ = "0.13.0.dev-43290" + monkeypatch.setitem(sys.modules, "mockmodule", mod) + mod2 = pytest.importorskip("mockmodule", minversion="0.12.0") assert mod2 == mod - pytest.raises(pytest.skip.Exception, """ - pytest.importorskip('mockmodule1', minversion='0.14.0')""") + pytest.raises( + pytest.skip.Exception, + """ + pytest.importorskip('mockmodule1', minversion='0.14.0')""", + ) except pytest.skip.Exception: print(_pytest._code.ExceptionInfo()) pytest.fail("spurious skip") @@ -623,26 +685,31 @@ def test_importorskip_dev_module(monkeypatch): def test_importorskip_module_level(testdir): """importorskip must be able to skip entire modules when used at module level""" - testdir.makepyfile(''' + testdir.makepyfile( + """ import pytest foobarbaz = pytest.importorskip("foobarbaz") def test_foo(): pass - ''') + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(['*collected 0 items / 1 skipped*']) + result.stdout.fnmatch_lines(["*collected 0 items / 1 skipped*"]) def test_pytest_cmdline_main(testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest def test_hello(): assert 1 if __name__ == '__main__': pytest.cmdline.main([__file__]) - """) + """ + ) import subprocess + popen = subprocess.Popen([sys.executable, str(p)], stdout=subprocess.PIPE) popen.communicate() ret = popen.wait() @@ -650,7 +717,8 @@ def test_pytest_cmdline_main(testdir): def test_unicode_in_longrepr(testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ # -*- coding: utf-8 -*- import pytest @pytest.hookimpl(hookwrapper=True) @@ -659,52 +727,61 @@ def test_unicode_in_longrepr(testdir): rep = outcome.get_result() if rep.when == "call": rep.longrepr = u'ä' - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ def test_out(): assert 0 - """) + """ + ) result = testdir.runpytest() assert result.ret == 1 assert "UnicodeEncodeError" not in result.stderr.str() def test_failure_in_setup(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def setup_module(): 0/0 def test_func(): pass - """) + """ + ) result = testdir.runpytest("--tb=line") assert "def setup_module" not in result.stdout.str() def test_makereport_getsource(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_foo(): if False: pass else: assert False - """) + """ + ) result = testdir.runpytest() - assert 'INTERNALERROR' not in result.stdout.str() - result.stdout.fnmatch_lines(['*else: assert False*']) + assert "INTERNALERROR" not in result.stdout.str() + result.stdout.fnmatch_lines(["*else: assert False*"]) def test_makereport_getsource_dynamic_code(testdir, monkeypatch): """Test that exception in dynamically generated code doesn't break getting the source line.""" import inspect + original_findsource = inspect.findsource def findsource(obj, *args, **kwargs): # Can be triggered by dynamically created functions - if obj.__name__ == 'foo': + if obj.__name__ == "foo": raise IndexError() return original_findsource(obj, *args, **kwargs) - monkeypatch.setattr(inspect, 'findsource', findsource) + monkeypatch.setattr(inspect, "findsource", findsource) - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture @@ -713,9 +790,10 @@ def test_makereport_getsource_dynamic_code(testdir, monkeypatch): def test_fix(foo): assert False - """) - result = testdir.runpytest('-vv') - assert 'INTERNALERROR' not in result.stdout.str() + """ + ) + result = testdir.runpytest("-vv") + assert "INTERNALERROR" not in result.stdout.str() result.stdout.fnmatch_lines(["*test_fix*", "*fixture*'missing'*not found*"]) @@ -725,19 +803,20 @@ def test_store_except_info_on_error(): """ # Simulate item that might raise a specific exception, depending on `raise_error` class var class ItemMightRaise(object): - nodeid = 'item_that_raises' + nodeid = "item_that_raises" raise_error = True def runtest(self): if self.raise_error: - raise IndexError('TEST') + raise IndexError("TEST") + try: runner.pytest_runtest_call(ItemMightRaise()) except IndexError: pass # Check that exception info is stored on sys assert sys.last_type is IndexError - assert sys.last_value.args[0] == 'TEST' + assert sys.last_value.args[0] == "TEST" assert sys.last_traceback # The next run should clear the exception info stored by the previous run @@ -750,8 +829,11 @@ def test_store_except_info_on_error(): def test_current_test_env_var(testdir, monkeypatch): pytest_current_test_vars = [] - monkeypatch.setattr(sys, 'pytest_current_test_vars', pytest_current_test_vars, raising=False) - testdir.makepyfile(''' + monkeypatch.setattr( + sys, "pytest_current_test_vars", pytest_current_test_vars, raising=False + ) + testdir.makepyfile( + """ import pytest import sys import os @@ -764,13 +846,20 @@ def test_current_test_env_var(testdir, monkeypatch): def test(fix): sys.pytest_current_test_vars.append(('call', os.environ['PYTEST_CURRENT_TEST'])) - ''') + """ + ) result = testdir.runpytest_inprocess() assert result.ret == 0 - test_id = 'test_current_test_env_var.py::test' - assert pytest_current_test_vars == [ - ('setup', test_id + ' (setup)'), ('call', test_id + ' (call)'), ('teardown', test_id + ' (teardown)')] - assert 'PYTEST_CURRENT_TEST' not in os.environ + test_id = "test_current_test_env_var.py::test" + assert ( + pytest_current_test_vars + == [ + ("setup", test_id + " (setup)"), + ("call", test_id + " (call)"), + ("teardown", test_id + " (teardown)"), + ] + ) + assert "PYTEST_CURRENT_TEST" not in os.environ class TestReportContents(object): @@ -782,24 +871,29 @@ class TestReportContents(object): return lambda item: runner.runtestprotocol(item, log=False) def test_longreprtext_pass(self, testdir): - reports = testdir.runitem(""" + reports = testdir.runitem( + """ def test_func(): pass - """) + """ + ) rep = reports[1] - assert rep.longreprtext == '' + assert rep.longreprtext == "" def test_longreprtext_failure(self, testdir): - reports = testdir.runitem(""" + reports = testdir.runitem( + """ def test_func(): x = 1 assert x == 4 - """) + """ + ) rep = reports[1] - assert 'assert 1 == 4' in rep.longreprtext + assert "assert 1 == 4" in rep.longreprtext def test_captured_text(self, testdir): - reports = testdir.runitem(""" + reports = testdir.runitem( + """ import pytest import sys @@ -816,21 +910,24 @@ class TestReportContents(object): sys.stdout.write('call: stdout\\n') sys.stderr.write('call: stderr\\n') assert 0 - """) + """ + ) setup, call, teardown = reports - assert setup.capstdout == 'setup: stdout\n' - assert call.capstdout == 'setup: stdout\ncall: stdout\n' - assert teardown.capstdout == 'setup: stdout\ncall: stdout\nteardown: stdout\n' + assert setup.capstdout == "setup: stdout\n" + assert call.capstdout == "setup: stdout\ncall: stdout\n" + assert teardown.capstdout == "setup: stdout\ncall: stdout\nteardown: stdout\n" - assert setup.capstderr == 'setup: stderr\n' - assert call.capstderr == 'setup: stderr\ncall: stderr\n' - assert teardown.capstderr == 'setup: stderr\ncall: stderr\nteardown: stderr\n' + assert setup.capstderr == "setup: stderr\n" + assert call.capstderr == "setup: stderr\ncall: stderr\n" + assert teardown.capstderr == "setup: stderr\ncall: stderr\nteardown: stderr\n" def test_no_captured_text(self, testdir): - reports = testdir.runitem(""" + reports = testdir.runitem( + """ def test_func(): pass - """) + """ + ) rep = reports[1] - assert rep.capstdout == '' - assert rep.capstderr == '' + assert rep.capstdout == "" + assert rep.capstderr == "" diff --git a/testing/test_runner_xunit.py b/testing/test_runner_xunit.py index fc931f867..8316aafbf 100644 --- a/testing/test_runner_xunit.py +++ b/testing/test_runner_xunit.py @@ -7,7 +7,8 @@ import pytest def test_module_and_function_setup(testdir): - reprec = testdir.inline_runsource(""" + reprec = testdir.inline_runsource( + """ modlevel = [] def setup_module(module): assert not modlevel @@ -30,7 +31,8 @@ def test_module_and_function_setup(testdir): def test_module(self): assert modlevel[0] == 42 assert not hasattr(test_modlevel, 'answer') - """) + """ + ) rep = reprec.matchreport("test_modlevel") assert rep.passed rep = reprec.matchreport("test_module") @@ -38,7 +40,8 @@ def test_module_and_function_setup(testdir): def test_module_setup_failure_no_teardown(testdir): - reprec = testdir.inline_runsource(""" + reprec = testdir.inline_runsource( + """ values = [] def setup_module(module): values.append(1) @@ -49,14 +52,16 @@ def test_module_setup_failure_no_teardown(testdir): def teardown_module(module): values.append(2) - """) + """ + ) reprec.assertoutcome(failed=1) calls = reprec.getcalls("pytest_runtest_setup") assert calls[0].item.module.values == [1] def test_setup_function_failure_no_teardown(testdir): - reprec = testdir.inline_runsource(""" + reprec = testdir.inline_runsource( + """ modlevel = [] def setup_function(function): modlevel.append(1) @@ -67,13 +72,15 @@ def test_setup_function_failure_no_teardown(testdir): def test_func(): pass - """) + """ + ) calls = reprec.getcalls("pytest_runtest_setup") assert calls[0].item.module.modlevel == [1] def test_class_setup(testdir): - reprec = testdir.inline_runsource(""" + reprec = testdir.inline_runsource( + """ class TestSimpleClassSetup(object): clslevel = [] def setup_class(cls): @@ -92,12 +99,14 @@ def test_class_setup(testdir): def test_cleanup(): assert not TestSimpleClassSetup.clslevel assert not TestInheritedClassSetupStillWorks.clslevel - """) + """ + ) reprec.assertoutcome(passed=1 + 2 + 1) def test_class_setup_failure_no_teardown(testdir): - reprec = testdir.inline_runsource(""" + reprec = testdir.inline_runsource( + """ class TestSimpleClassSetup(object): clslevel = [] def setup_class(cls): @@ -111,12 +120,14 @@ def test_class_setup_failure_no_teardown(testdir): def test_cleanup(): assert not TestSimpleClassSetup.clslevel - """) + """ + ) reprec.assertoutcome(failed=1, passed=1) def test_method_setup(testdir): - reprec = testdir.inline_runsource(""" + reprec = testdir.inline_runsource( + """ class TestSetupMethod(object): def setup_method(self, meth): self.methsetup = meth @@ -128,12 +139,14 @@ def test_method_setup(testdir): def test_other(self): assert self.methsetup == self.test_other - """) + """ + ) reprec.assertoutcome(passed=2) def test_method_setup_failure_no_teardown(testdir): - reprec = testdir.inline_runsource(""" + reprec = testdir.inline_runsource( + """ class TestMethodSetup(object): clslevel = [] def setup_method(self, method): @@ -148,12 +161,14 @@ def test_method_setup_failure_no_teardown(testdir): def test_cleanup(): assert TestMethodSetup.clslevel == [1] - """) + """ + ) reprec.assertoutcome(failed=1, passed=1) def test_method_generator_setup(testdir): - reprec = testdir.inline_runsource(""" + reprec = testdir.inline_runsource( + """ class TestSetupTeardownOnInstance(object): def setup_class(cls): cls.classsetup = True @@ -171,12 +186,14 @@ def test_method_generator_setup(testdir): assert self.classsetup assert self.methsetup == self.test_generate assert value == 5 - """) + """ + ) reprec.assertoutcome(passed=1, failed=1) def test_func_generator_setup(testdir): - reprec = testdir.inline_runsource(""" + reprec = testdir.inline_runsource( + """ import sys def setup_module(mod): @@ -199,13 +216,15 @@ def test_func_generator_setup(testdir): assert x == [1] yield check assert x == [1] - """) + """ + ) rep = reprec.matchreport("test_one", names="pytest_runtest_logreport") assert rep.passed def test_method_setup_uses_fresh_instances(testdir): - reprec = testdir.inline_runsource(""" + reprec = testdir.inline_runsource( + """ class TestSelfState1(object): memory = [] def test_hello(self): @@ -213,12 +232,14 @@ def test_method_setup_uses_fresh_instances(testdir): def test_afterhello(self): assert self != self.memory[0] - """) + """ + ) reprec.assertoutcome(passed=2, failed=0) def test_setup_that_skips_calledagain(testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest def setup_module(mod): pytest.skip("x") @@ -226,13 +247,15 @@ def test_setup_that_skips_calledagain(testdir): pass def test_function2(): pass - """) + """ + ) reprec = testdir.inline_run(p) reprec.assertoutcome(skipped=2) def test_setup_fails_again_on_all_tests(testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest def setup_module(mod): raise ValueError(42) @@ -240,13 +263,15 @@ def test_setup_fails_again_on_all_tests(testdir): pass def test_function2(): pass - """) + """ + ) reprec = testdir.inline_run(p) reprec.assertoutcome(failed=2) def test_setup_funcarg_setup_when_outer_scope_fails(testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest def setup_module(mod): raise ValueError(42) @@ -257,25 +282,34 @@ def test_setup_funcarg_setup_when_outer_scope_fails(testdir): pass def test_function2(hello): pass - """) + """ + ) result = testdir.runpytest(p) - result.stdout.fnmatch_lines([ - "*function1*", - "*ValueError*42*", - "*function2*", - "*ValueError*42*", - "*2 error*" - ]) + result.stdout.fnmatch_lines( + [ + "*function1*", + "*ValueError*42*", + "*function2*", + "*ValueError*42*", + "*2 error*", + ] + ) assert "xyz43" not in result.stdout.str() -@pytest.mark.parametrize('arg', ['', 'arg']) -def test_setup_teardown_function_level_with_optional_argument(testdir, monkeypatch, arg): +@pytest.mark.parametrize("arg", ["", "arg"]) +def test_setup_teardown_function_level_with_optional_argument( + testdir, monkeypatch, arg +): """parameter to setup/teardown xunit-style functions parameter is now optional (#1728).""" import sys + trace_setups_teardowns = [] - monkeypatch.setattr(sys, 'trace_setups_teardowns', trace_setups_teardowns, raising=False) - p = testdir.makepyfile(""" + monkeypatch.setattr( + sys, "trace_setups_teardowns", trace_setups_teardowns, raising=False + ) + p = testdir.makepyfile( + """ import pytest import sys @@ -296,24 +330,23 @@ def test_setup_teardown_function_level_with_optional_argument(testdir, monkeypat def test_method_1(self): pass def test_method_2(self): pass - """.format(arg=arg)) + """.format( + arg=arg + ) + ) result = testdir.inline_run(p) result.assertoutcome(passed=4) expected = [ - 'setup_module', - - 'setup_function', - 'teardown_function', - 'setup_function', - 'teardown_function', - - 'setup_method', - 'teardown_method', - - 'setup_method', - 'teardown_method', - - 'teardown_module', + "setup_module", + "setup_function", + "teardown_function", + "setup_function", + "teardown_function", + "setup_method", + "teardown_method", + "setup_method", + "teardown_method", + "teardown_module", ] assert trace_setups_teardowns == expected diff --git a/testing/test_session.py b/testing/test_session.py index 32d8ce689..4a594009b 100644 --- a/testing/test_session.py +++ b/testing/test_session.py @@ -6,8 +6,10 @@ from _pytest.main import EXIT_NOTESTSCOLLECTED class SessionTests(object): + def test_basic_testitem_events(self, testdir): - tfile = testdir.makepyfile(""" + tfile = testdir.makepyfile( + """ def test_one(): pass def test_one_one(): @@ -17,7 +19,8 @@ class SessionTests(object): class TestClass(object): def test_two(self, someargs): pass - """) + """ + ) reprec = testdir.inline_run(tfile) passed, skipped, failed = reprec.listoutcomes() assert len(skipped) == 0 @@ -37,26 +40,31 @@ class SessionTests(object): # assert colreports[1].report.failed def test_nested_import_error(self, testdir): - tfile = testdir.makepyfile(""" + tfile = testdir.makepyfile( + """ import import_fails def test_this(): assert import_fails.a == 1 - """, import_fails=""" + """, + import_fails=""" import does_not_work a = 1 - """) + """, + ) reprec = testdir.inline_run(tfile) values = reprec.getfailedcollections() assert len(values) == 1 out = str(values[0].longrepr) - assert out.find('does_not_work') != -1 + assert out.find("does_not_work") != -1 def test_raises_output(self, testdir): - reprec = testdir.inline_runsource(""" + reprec = testdir.inline_runsource( + """ import pytest def test_raises_doesnt(): pytest.raises(ValueError, int, "3") - """) + """ + ) passed, skipped, failed = reprec.listoutcomes() assert len(failed) == 1 out = failed[0].longrepr.reprcrash.message @@ -65,13 +73,15 @@ class SessionTests(object): pytest.fail("incorrect raises() output") def test_generator_yields_None(self, testdir): - reprec = testdir.inline_runsource(""" + reprec = testdir.inline_runsource( + """ def test_1(): yield None - """) + """ + ) failures = reprec.getfailedcollections() out = failures[0].longrepr.reprcrash.message - i = out.find('TypeError') + i = out.find("TypeError") assert i != -1 def test_syntax_error_module(self, testdir): @@ -79,29 +89,36 @@ class SessionTests(object): values = reprec.getfailedcollections() assert len(values) == 1 out = str(values[0].longrepr) - assert out.find(str('not python')) != -1 + assert out.find(str("not python")) != -1 def test_exit_first_problem(self, testdir): - reprec = testdir.inline_runsource(""" + reprec = testdir.inline_runsource( + """ def test_one(): assert 0 def test_two(): assert 0 - """, '--exitfirst') + """, + "--exitfirst", + ) passed, skipped, failed = reprec.countoutcomes() assert failed == 1 assert passed == skipped == 0 def test_maxfail(self, testdir): - reprec = testdir.inline_runsource(""" + reprec = testdir.inline_runsource( + """ def test_one(): assert 0 def test_two(): assert 0 def test_three(): assert 0 - """, '--maxfail=2') + """, + "--maxfail=2", + ) passed, skipped, failed = reprec.countoutcomes() assert failed == 2 assert passed == skipped == 0 def test_broken_repr(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest class BrokenRepr1(object): foo=0 @@ -117,21 +134,30 @@ class SessionTests(object): t = BrokenRepr1() assert t.foo == 1 - """) + """ + ) reprec = testdir.inline_run(p) passed, skipped, failed = reprec.listoutcomes() assert len(failed) == 1 out = failed[0].longrepr.reprcrash.message - assert out.find("""[Exception("Ha Ha fooled you, I'm a broken repr().") raised in repr()]""") != -1 # ' + assert ( + out.find( + """[Exception("Ha Ha fooled you, I'm a broken repr().") raised in repr()]""" + ) + != -1 + ) # ' def test_skip_file_by_conftest(self, testdir): - testdir.makepyfile(conftest=""" + testdir.makepyfile( + conftest=""" import pytest def pytest_collect_file(): pytest.skip("intentional") - """, test_file=""" + """, + test_file=""" def test_one(): pass - """) + """, + ) try: reprec = testdir.inline_run(testdir.tmpdir) except pytest.skip.Exception: @@ -144,7 +170,8 @@ class SessionTests(object): class TestNewSession(SessionTests): def test_order_of_execution(self, testdir): - reprec = testdir.inline_runsource(""" + reprec = testdir.inline_runsource( + """ values = [] def test_1(): values.append(1) @@ -162,7 +189,8 @@ class TestNewSession(SessionTests): self.reslist.append(3) def test_4(self): assert self.reslist == [1,2,1,2,3] - """) + """ + ) passed, skipped, failed = reprec.countoutcomes() assert failed == skipped == 0 assert passed == 7 @@ -182,9 +210,9 @@ class TestNewSession(SessionTests): pass """, test_three="xxxdsadsadsadsa", - __init__="" + __init__="", ) - reprec = testdir.inline_run('--collect-only', p.dirpath()) + reprec = testdir.inline_run("--collect-only", p.dirpath()) itemstarted = reprec.getcalls("pytest_itemcollected") assert len(itemstarted) == 3 @@ -214,9 +242,12 @@ class TestNewSession(SessionTests): def test_plugin_specify(testdir): - pytest.raises(ImportError, """ + pytest.raises( + ImportError, + """ testdir.parseconfig("-p", "nqweotexistent") - """) + """, + ) # pytest.raises(ImportError, # "config.do_configure(config)" # ) @@ -224,7 +255,7 @@ def test_plugin_specify(testdir): def test_plugin_already_exists(testdir): config = testdir.parseconfig("-p", "terminal") - assert config.option.plugins == ['terminal'] + assert config.option.plugins == ["terminal"] config._do_configure() config._ensure_unconfigure() @@ -241,21 +272,26 @@ def test_exclude(testdir): def test_deselect(testdir): - testdir.makepyfile(test_a=""" + testdir.makepyfile( + test_a=""" import pytest def test_a1(): pass @pytest.mark.parametrize('b', range(3)) def test_a2(b): pass - """) - result = testdir.runpytest("-v", "--deselect=test_a.py::test_a2[1]", "--deselect=test_a.py::test_a2[2]") + """ + ) + result = testdir.runpytest( + "-v", "--deselect=test_a.py::test_a2[1]", "--deselect=test_a.py::test_a2[2]" + ) assert result.ret == 0 result.stdout.fnmatch_lines(["*2 passed, 2 deselected*"]) for line in result.stdout.lines: - assert not line.startswith(('test_a.py::test_a2[1]', 'test_a.py::test_a2[2]')) + assert not line.startswith(("test_a.py::test_a2[1]", "test_a.py::test_a2[2]")) def test_sessionfinish_with_start(testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import os values = [] def pytest_sessionstart(): @@ -265,35 +301,43 @@ def test_sessionfinish_with_start(testdir): def pytest_sessionfinish(): assert values[0] == os.getcwd() - """) + """ + ) res = testdir.runpytest("--collect-only") assert res.ret == EXIT_NOTESTSCOLLECTED @pytest.mark.parametrize("path", ["root", "{relative}/root", "{environment}/root"]) def test_rootdir_option_arg(testdir, monkeypatch, path): - monkeypatch.setenv('PY_ROOTDIR_PATH', str(testdir.tmpdir)) - path = path.format(relative=str(testdir.tmpdir), - environment='$PY_ROOTDIR_PATH') + monkeypatch.setenv("PY_ROOTDIR_PATH", str(testdir.tmpdir)) + path = path.format(relative=str(testdir.tmpdir), environment="$PY_ROOTDIR_PATH") rootdir = testdir.mkdir("root") rootdir.mkdir("tests") - testdir.makepyfile(""" + testdir.makepyfile( + """ import os def test_one(): assert 1 - """) + """ + ) result = testdir.runpytest("--rootdir={}".format(path)) - result.stdout.fnmatch_lines(['*rootdir: {}/root, inifile:*'.format(testdir.tmpdir), "*1 passed*"]) + result.stdout.fnmatch_lines( + ["*rootdir: {}/root, inifile:*".format(testdir.tmpdir), "*1 passed*"] + ) def test_rootdir_wrong_option_arg(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import os def test_one(): assert 1 - """) + """ + ) result = testdir.runpytest("--rootdir=wrong_dir") - result.stderr.fnmatch_lines(["*Directory *wrong_dir* not found. Check your '--rootdir' option.*"]) + result.stderr.fnmatch_lines( + ["*Directory *wrong_dir* not found. Check your '--rootdir' option.*"] + ) diff --git a/testing/test_skipping.py b/testing/test_skipping.py index 90562c939..5d970e2fe 100644 --- a/testing/test_skipping.py +++ b/testing/test_skipping.py @@ -7,20 +7,23 @@ from _pytest.runner import runtestprotocol class TestEvaluator(object): + def test_no_marker(self, testdir): item = testdir.getitem("def test_func(): pass") - evalskipif = MarkEvaluator(item, 'skipif') + evalskipif = MarkEvaluator(item, "skipif") assert not evalskipif assert not evalskipif.istrue() def test_marked_no_args(self, testdir): - item = testdir.getitem(""" + item = testdir.getitem( + """ import pytest @pytest.mark.xyz def test_func(): pass - """) - ev = MarkEvaluator(item, 'xyz') + """ + ) + ev = MarkEvaluator(item, "xyz") assert ev assert ev.istrue() expl = ev.getexplanation() @@ -28,40 +31,46 @@ class TestEvaluator(object): assert not ev.get("run", False) def test_marked_one_arg(self, testdir): - item = testdir.getitem(""" + item = testdir.getitem( + """ import pytest @pytest.mark.xyz("hasattr(os, 'sep')") def test_func(): pass - """) - ev = MarkEvaluator(item, 'xyz') + """ + ) + ev = MarkEvaluator(item, "xyz") assert ev assert ev.istrue() expl = ev.getexplanation() assert expl == "condition: hasattr(os, 'sep')" - @pytest.mark.skipif('sys.version_info[0] >= 3') + @pytest.mark.skipif("sys.version_info[0] >= 3") def test_marked_one_arg_unicode(self, testdir): - item = testdir.getitem(""" + item = testdir.getitem( + """ import pytest @pytest.mark.xyz(u"hasattr(os, 'sep')") def test_func(): pass - """) - ev = MarkEvaluator(item, 'xyz') + """ + ) + ev = MarkEvaluator(item, "xyz") assert ev assert ev.istrue() expl = ev.getexplanation() assert expl == "condition: hasattr(os, 'sep')" def test_marked_one_arg_with_reason(self, testdir): - item = testdir.getitem(""" + item = testdir.getitem( + """ import pytest @pytest.mark.xyz("hasattr(os, 'sep')", attr=2, reason="hello world") def test_func(): pass - """) - ev = MarkEvaluator(item, 'xyz') + """ + ) + ev = MarkEvaluator(item, "xyz") assert ev assert ev.istrue() expl = ev.getexplanation() @@ -70,58 +79,67 @@ class TestEvaluator(object): def test_marked_one_arg_twice(self, testdir): lines = [ - '''@pytest.mark.skipif("not hasattr(os, 'murks')")''', - '''@pytest.mark.skipif("hasattr(os, 'murks')")''' + """@pytest.mark.skipif("not hasattr(os, 'murks')")""", + """@pytest.mark.skipif("hasattr(os, 'murks')")""", ] for i in range(0, 2): - item = testdir.getitem(""" + item = testdir.getitem( + """ import pytest %s %s def test_func(): pass - """ % (lines[i], lines[(i + 1) % 2])) - ev = MarkEvaluator(item, 'skipif') + """ + % (lines[i], lines[(i + 1) % 2]) + ) + ev = MarkEvaluator(item, "skipif") assert ev assert ev.istrue() expl = ev.getexplanation() assert expl == "condition: not hasattr(os, 'murks')" def test_marked_one_arg_twice2(self, testdir): - item = testdir.getitem(""" + item = testdir.getitem( + """ import pytest @pytest.mark.skipif("hasattr(os, 'murks')") @pytest.mark.skipif("not hasattr(os, 'murks')") def test_func(): pass - """) - ev = MarkEvaluator(item, 'skipif') + """ + ) + ev = MarkEvaluator(item, "skipif") assert ev assert ev.istrue() expl = ev.getexplanation() assert expl == "condition: not hasattr(os, 'murks')" def test_marked_skip_with_not_string(self, testdir): - item = testdir.getitem(""" + item = testdir.getitem( + """ import pytest @pytest.mark.skipif(False) def test_func(): pass - """) - ev = MarkEvaluator(item, 'skipif') + """ + ) + ev = MarkEvaluator(item, "skipif") exc = pytest.raises(pytest.fail.Exception, ev.istrue) assert """Failed: you need to specify reason=STRING when using booleans as conditions.""" in exc.value.msg def test_skipif_class(self, testdir): - item, = testdir.getitems(""" + item, = testdir.getitems( + """ import pytest class TestClass(object): pytestmark = pytest.mark.skipif("config._hackxyz") def test_func(self): pass - """) + """ + ) item.config._hackxyz = 3 - ev = MarkEvaluator(item, 'skipif') + ev = MarkEvaluator(item, "skipif") assert ev.istrue() expl = ev.getexplanation() assert expl == "condition: config._hackxyz" @@ -129,14 +147,17 @@ class TestEvaluator(object): class TestXFail(object): - @pytest.mark.parametrize('strict', [True, False]) + @pytest.mark.parametrize("strict", [True, False]) def test_xfail_simple(self, testdir, strict): - item = testdir.getitem(""" + item = testdir.getitem( + """ import pytest @pytest.mark.xfail(strict=%s) def test_func(): assert 0 - """ % strict) + """ + % strict + ) reports = runtestprotocol(item, log=False) assert len(reports) == 3 callreport = reports[1] @@ -144,12 +165,14 @@ class TestXFail(object): assert callreport.wasxfail == "" def test_xfail_xpassed(self, testdir): - item = testdir.getitem(""" + item = testdir.getitem( + """ import pytest @pytest.mark.xfail(reason="this is an xfail") def test_func(): assert 1 - """) + """ + ) reports = runtestprotocol(item, log=False) assert len(reports) == 3 callreport = reports[1] @@ -160,24 +183,28 @@ class TestXFail(object): """ Verify that platform can be used with xfail statements. """ - item = testdir.getitem(""" + item = testdir.getitem( + """ import pytest @pytest.mark.xfail("platform.platform() == platform.platform()") def test_func(): assert 0 - """) + """ + ) reports = runtestprotocol(item, log=False) assert len(reports) == 3 callreport = reports[1] assert callreport.wasxfail def test_xfail_xpassed_strict(self, testdir): - item = testdir.getitem(""" + item = testdir.getitem( + """ import pytest @pytest.mark.xfail(strict=True, reason="nope") def test_func(): assert 1 - """) + """ + ) reports = runtestprotocol(item, log=False) assert len(reports) == 3 callreport = reports[1] @@ -186,48 +213,53 @@ class TestXFail(object): assert not hasattr(callreport, "wasxfail") def test_xfail_run_anyway(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.xfail def test_func(): assert 0 def test_func2(): pytest.xfail("hello") - """) + """ + ) result = testdir.runpytest("--runxfail") - result.stdout.fnmatch_lines([ - "*def test_func():*", - "*assert 0*", - "*1 failed*1 pass*", - ]) + result.stdout.fnmatch_lines( + ["*def test_func():*", "*assert 0*", "*1 failed*1 pass*"] + ) def test_xfail_evalfalse_but_fails(self, testdir): - item = testdir.getitem(""" + item = testdir.getitem( + """ import pytest @pytest.mark.xfail('False') def test_func(): assert 0 - """) + """ + ) reports = runtestprotocol(item, log=False) callreport = reports[1] assert callreport.failed assert not hasattr(callreport, "wasxfail") - assert 'xfail' in callreport.keywords + assert "xfail" in callreport.keywords def test_xfail_not_report_default(self, testdir): - p = testdir.makepyfile(test_one=""" + p = testdir.makepyfile( + test_one=""" import pytest @pytest.mark.xfail def test_this(): assert 0 - """) - testdir.runpytest(p, '-v') + """ + ) + testdir.runpytest(p, "-v") # result.stdout.fnmatch_lines([ # "*HINT*use*-r*" # ]) def test_xfail_not_run_xfail_reporting(self, testdir): - p = testdir.makepyfile(test_one=""" + p = testdir.makepyfile( + test_one=""" import pytest @pytest.mark.xfail(run=False, reason="noway") def test_this(): @@ -238,90 +270,89 @@ class TestXFail(object): @pytest.mark.xfail("False", run=False, reason="huh") def test_this_false(): assert 1 - """) - result = testdir.runpytest(p, '-rx', ) - result.stdout.fnmatch_lines([ - "*test_one*test_this*", - "*NOTRUN*noway", - "*test_one*test_this_true*", - "*NOTRUN*condition:*True*", - "*1 passed*", - ]) + """ + ) + result = testdir.runpytest(p, "-rx") + result.stdout.fnmatch_lines( + [ + "*test_one*test_this*", + "*NOTRUN*noway", + "*test_one*test_this_true*", + "*NOTRUN*condition:*True*", + "*1 passed*", + ] + ) def test_xfail_not_run_no_setup_run(self, testdir): - p = testdir.makepyfile(test_one=""" + p = testdir.makepyfile( + test_one=""" import pytest @pytest.mark.xfail(run=False, reason="hello") def test_this(): assert 0 def setup_module(mod): raise ValueError(42) - """) - result = testdir.runpytest(p, '-rx', ) - result.stdout.fnmatch_lines([ - "*test_one*test_this*", - "*NOTRUN*hello", - "*1 xfailed*", - ]) + """ + ) + result = testdir.runpytest(p, "-rx") + result.stdout.fnmatch_lines( + ["*test_one*test_this*", "*NOTRUN*hello", "*1 xfailed*"] + ) def test_xfail_xpass(self, testdir): - p = testdir.makepyfile(test_one=""" + p = testdir.makepyfile( + test_one=""" import pytest @pytest.mark.xfail def test_that(): assert 1 - """) - result = testdir.runpytest(p, '-rX') - result.stdout.fnmatch_lines([ - "*XPASS*test_that*", - "*1 xpassed*" - ]) + """ + ) + result = testdir.runpytest(p, "-rX") + result.stdout.fnmatch_lines(["*XPASS*test_that*", "*1 xpassed*"]) assert result.ret == 0 def test_xfail_imperative(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest def test_this(): pytest.xfail("hello") - """) + """ + ) result = testdir.runpytest(p) - result.stdout.fnmatch_lines([ - "*1 xfailed*", - ]) + result.stdout.fnmatch_lines(["*1 xfailed*"]) result = testdir.runpytest(p, "-rx") - result.stdout.fnmatch_lines([ - "*XFAIL*test_this*", - "*reason:*hello*", - ]) + result.stdout.fnmatch_lines(["*XFAIL*test_this*", "*reason:*hello*"]) result = testdir.runpytest(p, "--runxfail") result.stdout.fnmatch_lines("*1 pass*") def test_xfail_imperative_in_setup_function(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest def setup_function(function): pytest.xfail("hello") def test_this(): assert 0 - """) + """ + ) result = testdir.runpytest(p) - result.stdout.fnmatch_lines([ - "*1 xfailed*", - ]) + result.stdout.fnmatch_lines(["*1 xfailed*"]) result = testdir.runpytest(p, "-rx") - result.stdout.fnmatch_lines([ - "*XFAIL*test_this*", - "*reason:*hello*", - ]) + result.stdout.fnmatch_lines(["*XFAIL*test_this*", "*reason:*hello*"]) result = testdir.runpytest(p, "--runxfail") - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *def test_this* *1 fail* - """) + """ + ) def xtest_dynamic_xfail_set_during_setup(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest def setup_function(function): pytest.mark.xfail(function) @@ -329,147 +360,164 @@ class TestXFail(object): assert 0 def test_that(): assert 1 - """) - result = testdir.runpytest(p, '-rxX') - result.stdout.fnmatch_lines([ - "*XFAIL*test_this*", - "*XPASS*test_that*", - ]) + """ + ) + result = testdir.runpytest(p, "-rxX") + result.stdout.fnmatch_lines(["*XFAIL*test_this*", "*XPASS*test_that*"]) def test_dynamic_xfail_no_run(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest @pytest.fixture def arg(request): request.applymarker(pytest.mark.xfail(run=False)) def test_this(arg): assert 0 - """) - result = testdir.runpytest(p, '-rxX') - result.stdout.fnmatch_lines([ - "*XFAIL*test_this*", - "*NOTRUN*", - ]) + """ + ) + result = testdir.runpytest(p, "-rxX") + result.stdout.fnmatch_lines(["*XFAIL*test_this*", "*NOTRUN*"]) def test_dynamic_xfail_set_during_funcarg_setup(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest @pytest.fixture def arg(request): request.applymarker(pytest.mark.xfail) def test_this2(arg): assert 0 - """) + """ + ) result = testdir.runpytest(p) - result.stdout.fnmatch_lines([ - "*1 xfailed*", - ]) + result.stdout.fnmatch_lines(["*1 xfailed*"]) - @pytest.mark.parametrize('expected, actual, matchline', - [('TypeError', 'TypeError', "*1 xfailed*"), - ('(AttributeError, TypeError)', 'TypeError', "*1 xfailed*"), - ('TypeError', 'IndexError', "*1 failed*"), - ('(AttributeError, TypeError)', 'IndexError', "*1 failed*"), - ]) + @pytest.mark.parametrize( + "expected, actual, matchline", + [ + ("TypeError", "TypeError", "*1 xfailed*"), + ("(AttributeError, TypeError)", "TypeError", "*1 xfailed*"), + ("TypeError", "IndexError", "*1 failed*"), + ("(AttributeError, TypeError)", "IndexError", "*1 failed*"), + ], + ) def test_xfail_raises(self, expected, actual, matchline, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest @pytest.mark.xfail(raises=%s) def test_raises(): raise %s() - """ % (expected, actual)) + """ + % (expected, actual) + ) result = testdir.runpytest(p) - result.stdout.fnmatch_lines([ - matchline, - ]) + result.stdout.fnmatch_lines([matchline]) def test_strict_sanity(self, testdir): """sanity check for xfail(strict=True): a failing test should behave exactly like a normal xfail. """ - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest @pytest.mark.xfail(reason='unsupported feature', strict=True) def test_foo(): assert 0 - """) - result = testdir.runpytest(p, '-rxX') - result.stdout.fnmatch_lines([ - '*XFAIL*', - '*unsupported feature*', - ]) + """ + ) + result = testdir.runpytest(p, "-rxX") + result.stdout.fnmatch_lines(["*XFAIL*", "*unsupported feature*"]) assert result.ret == 0 - @pytest.mark.parametrize('strict', [True, False]) + @pytest.mark.parametrize("strict", [True, False]) def test_strict_xfail(self, testdir, strict): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest @pytest.mark.xfail(reason='unsupported feature', strict=%s) def test_foo(): with open('foo_executed', 'w'): pass # make sure test executes - """ % strict) - result = testdir.runpytest(p, '-rxX') + """ + % strict + ) + result = testdir.runpytest(p, "-rxX") if strict: - result.stdout.fnmatch_lines([ - '*test_foo*', - '*XPASS(strict)*unsupported feature*', - ]) + result.stdout.fnmatch_lines( + ["*test_foo*", "*XPASS(strict)*unsupported feature*"] + ) else: - result.stdout.fnmatch_lines([ - '*test_strict_xfail*', - 'XPASS test_strict_xfail.py::test_foo unsupported feature', - ]) + result.stdout.fnmatch_lines( + [ + "*test_strict_xfail*", + "XPASS test_strict_xfail.py::test_foo unsupported feature", + ] + ) assert result.ret == (1 if strict else 0) - assert testdir.tmpdir.join('foo_executed').isfile() + assert testdir.tmpdir.join("foo_executed").isfile() - @pytest.mark.parametrize('strict', [True, False]) + @pytest.mark.parametrize("strict", [True, False]) def test_strict_xfail_condition(self, testdir, strict): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest @pytest.mark.xfail(False, reason='unsupported feature', strict=%s) def test_foo(): pass - """ % strict) - result = testdir.runpytest(p, '-rxX') - result.stdout.fnmatch_lines('*1 passed*') + """ + % strict + ) + result = testdir.runpytest(p, "-rxX") + result.stdout.fnmatch_lines("*1 passed*") assert result.ret == 0 - @pytest.mark.parametrize('strict', [True, False]) + @pytest.mark.parametrize("strict", [True, False]) def test_xfail_condition_keyword(self, testdir, strict): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest @pytest.mark.xfail(condition=False, reason='unsupported feature', strict=%s) def test_foo(): pass - """ % strict) - result = testdir.runpytest(p, '-rxX') - result.stdout.fnmatch_lines('*1 passed*') + """ + % strict + ) + result = testdir.runpytest(p, "-rxX") + result.stdout.fnmatch_lines("*1 passed*") assert result.ret == 0 - @pytest.mark.parametrize('strict_val', ['true', 'false']) + @pytest.mark.parametrize("strict_val", ["true", "false"]) def test_strict_xfail_default_from_file(self, testdir, strict_val): - testdir.makeini(''' + testdir.makeini( + """ [pytest] xfail_strict = %s - ''' % strict_val) - p = testdir.makepyfile(""" + """ + % strict_val + ) + p = testdir.makepyfile( + """ import pytest @pytest.mark.xfail(reason='unsupported feature') def test_foo(): pass - """) - result = testdir.runpytest(p, '-rxX') - strict = strict_val == 'true' - result.stdout.fnmatch_lines('*1 failed*' if strict else '*1 xpassed*') + """ + ) + result = testdir.runpytest(p, "-rxX") + strict = strict_val == "true" + result.stdout.fnmatch_lines("*1 failed*" if strict else "*1 xpassed*") assert result.ret == (1 if strict else 0) class TestXFailwithSetupTeardown(object): + def test_failing_setup_issue9(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def setup_function(func): assert 0 @@ -477,14 +525,14 @@ class TestXFailwithSetupTeardown(object): @pytest.mark.xfail def test_func(): pass - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*1 xfail*", - ]) + result.stdout.fnmatch_lines(["*1 xfail*"]) def test_failing_teardown_issue9(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def teardown_function(func): assert 0 @@ -492,16 +540,17 @@ class TestXFailwithSetupTeardown(object): @pytest.mark.xfail def test_func(): pass - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*1 xfail*", - ]) + result.stdout.fnmatch_lines(["*1 xfail*"]) class TestSkip(object): + def test_skip_class(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.skip class TestSomething(object): @@ -512,61 +561,62 @@ class TestSkip(object): def test_baz(): pass - """) + """ + ) rec = testdir.inline_run() rec.assertoutcome(skipped=2, passed=1) def test_skips_on_false_string(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.skip('False') def test_foo(): pass - """) + """ + ) rec = testdir.inline_run() rec.assertoutcome(skipped=1) def test_arg_as_reason(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.skip('testing stuff') def test_bar(): pass - """) - result = testdir.runpytest('-rs') - result.stdout.fnmatch_lines([ - "*testing stuff*", - "*1 skipped*", - ]) + """ + ) + result = testdir.runpytest("-rs") + result.stdout.fnmatch_lines(["*testing stuff*", "*1 skipped*"]) def test_skip_no_reason(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.skip def test_foo(): pass - """) - result = testdir.runpytest('-rs') - result.stdout.fnmatch_lines([ - "*unconditional skip*", - "*1 skipped*", - ]) + """ + ) + result = testdir.runpytest("-rs") + result.stdout.fnmatch_lines(["*unconditional skip*", "*1 skipped*"]) def test_skip_with_reason(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.skip(reason="for lolz") def test_bar(): pass - """) - result = testdir.runpytest('-rs') - result.stdout.fnmatch_lines([ - "*for lolz*", - "*1 skipped*", - ]) + """ + ) + result = testdir.runpytest("-rs") + result.stdout.fnmatch_lines(["*for lolz*", "*1 skipped*"]) def test_only_skips_marked_test(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.skip def test_foo(): @@ -576,102 +626,112 @@ class TestSkip(object): pass def test_baz(): assert True - """) - result = testdir.runpytest('-rs') - result.stdout.fnmatch_lines([ - "*nothing in particular*", - "*1 passed*2 skipped*", - ]) + """ + ) + result = testdir.runpytest("-rs") + result.stdout.fnmatch_lines(["*nothing in particular*", "*1 passed*2 skipped*"]) def test_strict_and_skip(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.skip def test_hello(): pass - """) + """ + ) result = testdir.runpytest("-rs") - result.stdout.fnmatch_lines([ - "*unconditional skip*", - "*1 skipped*", - ]) + result.stdout.fnmatch_lines(["*unconditional skip*", "*1 skipped*"]) class TestSkipif(object): + def test_skipif_conditional(self, testdir): - item = testdir.getitem(""" + item = testdir.getitem( + """ import pytest @pytest.mark.skipif("hasattr(os, 'sep')") def test_func(): pass - """) - x = pytest.raises(pytest.skip.Exception, lambda: - pytest_runtest_setup(item)) + """ + ) + x = pytest.raises(pytest.skip.Exception, lambda: pytest_runtest_setup(item)) assert x.value.msg == "condition: hasattr(os, 'sep')" - @pytest.mark.parametrize('params', [ - '"hasattr(sys, \'platform\')"', - 'True, reason="invalid platform"', - ]) + @pytest.mark.parametrize( + "params", ["\"hasattr(sys, 'platform')\"", 'True, reason="invalid platform"'] + ) def test_skipif_reporting(self, testdir, params): - p = testdir.makepyfile(test_foo=""" + p = testdir.makepyfile( + test_foo=""" import pytest @pytest.mark.skipif(%(params)s) def test_that(): assert 0 - """ % dict(params=params)) - result = testdir.runpytest(p, '-s', '-rs') - result.stdout.fnmatch_lines([ - "*SKIP*1*test_foo.py*platform*", - "*1 skipped*" - ]) + """ + % dict(params=params) + ) + result = testdir.runpytest(p, "-s", "-rs") + result.stdout.fnmatch_lines(["*SKIP*1*test_foo.py*platform*", "*1 skipped*"]) assert result.ret == 0 def test_skipif_using_platform(self, testdir): - item = testdir.getitem(""" + item = testdir.getitem( + """ import pytest @pytest.mark.skipif("platform.platform() == platform.platform()") def test_func(): pass - """) - pytest.raises(pytest.skip.Exception, lambda: - pytest_runtest_setup(item)) + """ + ) + pytest.raises(pytest.skip.Exception, lambda: pytest_runtest_setup(item)) - @pytest.mark.parametrize('marker, msg1, msg2', [ - ('skipif', 'SKIP', 'skipped'), - ('xfail', 'XPASS', 'xpassed'), - ]) + @pytest.mark.parametrize( + "marker, msg1, msg2", + [("skipif", "SKIP", "skipped"), ("xfail", "XPASS", "xpassed")], + ) def test_skipif_reporting_multiple(self, testdir, marker, msg1, msg2): - testdir.makepyfile(test_foo=""" + testdir.makepyfile( + test_foo=""" import pytest @pytest.mark.{marker}(False, reason='first_condition') @pytest.mark.{marker}(True, reason='second_condition') def test_foobar(): assert 1 - """.format(marker=marker)) - result = testdir.runpytest('-s', '-rsxX') - result.stdout.fnmatch_lines([ - "*{msg1}*test_foo.py*second_condition*".format(msg1=msg1), - "*1 {msg2}*".format(msg2=msg2), - ]) + """.format( + marker=marker + ) + ) + result = testdir.runpytest("-s", "-rsxX") + result.stdout.fnmatch_lines( + [ + "*{msg1}*test_foo.py*second_condition*".format(msg1=msg1), + "*1 {msg2}*".format(msg2=msg2), + ] + ) assert result.ret == 0 def test_skip_not_report_default(testdir): - p = testdir.makepyfile(test_one=""" + p = testdir.makepyfile( + test_one=""" import pytest def test_this(): pytest.skip("hello") - """) - result = testdir.runpytest(p, '-v') - result.stdout.fnmatch_lines([ - # "*HINT*use*-r*", - "*1 skipped*", - ]) + """ + ) + result = testdir.runpytest(p, "-v") + result.stdout.fnmatch_lines( + [ + # "*HINT*use*-r*", + "*1 skipped*" + ] + ) def test_skipif_class(testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest class TestClass(object): @@ -680,11 +740,10 @@ def test_skipif_class(testdir): assert 0 def test_though(self): assert 0 - """) + """ + ) result = testdir.runpytest(p) - result.stdout.fnmatch_lines([ - "*2 skipped*" - ]) + result.stdout.fnmatch_lines(["*2 skipped*"]) def test_skip_reasons_folding(): @@ -695,6 +754,7 @@ def test_skip_reasons_folding(): class X(object): pass + ev1 = X() ev1.when = "execute" ev1.skipped = True @@ -735,12 +795,10 @@ def test_skipped_reasons_functional(testdir): import pytest def doskip(): pytest.skip('test') - """ + """, ) - result = testdir.runpytest('-rs') - result.stdout.fnmatch_lines([ - "*SKIP*2*conftest.py:4: test", - ]) + result = testdir.runpytest("-rs") + result.stdout.fnmatch_lines(["*SKIP*2*conftest.py:4: test"]) assert result.ret == 0 @@ -756,17 +814,16 @@ def test_skipped_folding(testdir): class TestClass(object): def test_method(self): pass - """, + """ ) - result = testdir.runpytest('-rs') - result.stdout.fnmatch_lines([ - "*SKIP*2*test_one.py: Folding" - ]) + result = testdir.runpytest("-rs") + result.stdout.fnmatch_lines(["*SKIP*2*test_one.py: Folding"]) assert result.ret == 0 def test_reportchars(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def test_1(): assert 0 @@ -778,14 +835,12 @@ def test_reportchars(testdir): pass def test_4(): pytest.skip("four") - """) + """ + ) result = testdir.runpytest("-rfxXs") - result.stdout.fnmatch_lines([ - "FAIL*test_1*", - "XFAIL*test_2*", - "XPASS*test_3*", - "SKIP*four*", - ]) + result.stdout.fnmatch_lines( + ["FAIL*test_1*", "XFAIL*test_2*", "XPASS*test_3*", "SKIP*four*"] + ) def test_reportchars_error(testdir): @@ -797,15 +852,15 @@ def test_reportchars_error(testdir): test_simple=""" def test_foo(): pass - """) - result = testdir.runpytest('-rE') - result.stdout.fnmatch_lines([ - 'ERROR*test_foo*', - ]) + """, + ) + result = testdir.runpytest("-rE") + result.stdout.fnmatch_lines(["ERROR*test_foo*"]) def test_reportchars_all(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def test_1(): assert 0 @@ -817,14 +872,12 @@ def test_reportchars_all(testdir): pass def test_4(): pytest.skip("four") - """) + """ + ) result = testdir.runpytest("-ra") - result.stdout.fnmatch_lines([ - "FAIL*test_1*", - "SKIP*four*", - "XFAIL*test_2*", - "XPASS*test_3*", - ]) + result.stdout.fnmatch_lines( + ["FAIL*test_1*", "SKIP*four*", "XFAIL*test_2*", "XPASS*test_3*"] + ) def test_reportchars_all_error(testdir): @@ -836,16 +889,16 @@ def test_reportchars_all_error(testdir): test_simple=""" def test_foo(): pass - """) - result = testdir.runpytest('-ra') - result.stdout.fnmatch_lines([ - 'ERROR*test_foo*', - ]) + """, + ) + result = testdir.runpytest("-ra") + result.stdout.fnmatch_lines(["ERROR*test_foo*"]) @pytest.mark.xfail("hasattr(sys, 'pypy_version_info')") def test_errors_in_xfail_skip_expressions(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.skipif("asd") def test_nameerror(): @@ -856,27 +909,31 @@ def test_errors_in_xfail_skip_expressions(testdir): def test_func(): pass - """) + """ + ) result = testdir.runpytest() markline = " ^" if sys.platform.startswith("java"): # XXX report this to java markline = "*" + markline[8:] - result.stdout.fnmatch_lines([ - "*ERROR*test_nameerror*", - "*evaluating*skipif*expression*", - "*asd*", - "*ERROR*test_syntax*", - "*evaluating*xfail*expression*", - " syntax error", - markline, - "SyntaxError: invalid syntax", - "*1 pass*2 error*", - ]) + result.stdout.fnmatch_lines( + [ + "*ERROR*test_nameerror*", + "*evaluating*skipif*expression*", + "*asd*", + "*ERROR*test_syntax*", + "*evaluating*xfail*expression*", + " syntax error", + markline, + "SyntaxError: invalid syntax", + "*1 pass*2 error*", + ] + ) def test_xfail_skipif_with_globals(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest x = 3 @pytest.mark.skipif("x == 3") @@ -885,55 +942,59 @@ def test_xfail_skipif_with_globals(testdir): @pytest.mark.xfail("x == 3") def test_boolean(): assert 0 - """) + """ + ) result = testdir.runpytest("-rsx") - result.stdout.fnmatch_lines([ - "*SKIP*x == 3*", - "*XFAIL*test_boolean*", - "*x == 3*", - ]) + result.stdout.fnmatch_lines(["*SKIP*x == 3*", "*XFAIL*test_boolean*", "*x == 3*"]) def test_direct_gives_error(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.skipif(True) def test_skip1(): pass - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*1 error*", - ]) + result.stdout.fnmatch_lines(["*1 error*"]) def test_default_markers(testdir): result = testdir.runpytest("--markers") - result.stdout.fnmatch_lines([ - "*skipif(*condition)*skip*", - "*xfail(*condition, reason=None, run=True, raises=None, strict=False)*expected failure*", - ]) + result.stdout.fnmatch_lines( + [ + "*skipif(*condition)*skip*", + "*xfail(*condition, reason=None, run=True, raises=None, strict=False)*expected failure*", + ] + ) def test_xfail_test_setup_exception(testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_runtest_setup(): 0 / 0 - """) - p = testdir.makepyfile(""" + """ + ) + p = testdir.makepyfile( + """ import pytest @pytest.mark.xfail def test_func(): assert 0 - """) + """ + ) result = testdir.runpytest(p) assert result.ret == 0 - assert 'xfailed' in result.stdout.str() - assert 'xpassed' not in result.stdout.str() + assert "xfailed" in result.stdout.str() + assert "xpassed" not in result.stdout.str() def test_imperativeskip_on_xfail_test(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.xfail def test_that_fails(): @@ -942,23 +1003,30 @@ def test_imperativeskip_on_xfail_test(testdir): @pytest.mark.skipif("True") def test_hello(): pass - """) - testdir.makeconftest(""" + """ + ) + testdir.makeconftest( + """ import pytest def pytest_runtest_setup(item): pytest.skip("abc") - """) + """ + ) result = testdir.runpytest("-rsxX") - result.stdout.fnmatch_lines_random(""" + result.stdout.fnmatch_lines_random( + """ *SKIP*abc* *SKIP*condition: True* *2 skipped* - """) + """ + ) class TestBooleanCondition(object): + def test_skipif(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.skipif(True, reason="True123") def test_func1(): @@ -966,42 +1034,54 @@ class TestBooleanCondition(object): @pytest.mark.skipif(False, reason="True123") def test_func2(): pass - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *1 passed*1 skipped* - """) + """ + ) def test_skipif_noreason(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.skipif(True) def test_func(): pass - """) + """ + ) result = testdir.runpytest("-rs") - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *1 error* - """) + """ + ) def test_xfail(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.xfail(True, reason="True123") def test_func(): assert 0 - """) + """ + ) result = testdir.runpytest("-rxs") - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *XFAIL* *True123* *1 xfail* - """) + """ + ) def test_xfail_item(testdir): # Ensure pytest.xfail works with non-Python Item - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest class MyItem(pytest.Item): @@ -1011,11 +1091,12 @@ def test_xfail_item(testdir): def pytest_collect_file(path, parent): return MyItem("foo", parent) - """) + """ + ) result = testdir.inline_run() passed, skipped, failed = result.listoutcomes() assert not failed - xfailed = [r for r in skipped if hasattr(r, 'wasxfail')] + xfailed = [r for r in skipped if hasattr(r, "wasxfail")] assert xfailed @@ -1023,55 +1104,56 @@ def test_module_level_skip_error(testdir): """ Verify that using pytest.skip at module level causes a collection error """ - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.skip def test_func(): assert True - """) - result = testdir.runpytest() - result.stdout.fnmatch_lines( - "*Using pytest.skip outside of a test is not allowed*" + """ ) + result = testdir.runpytest() + result.stdout.fnmatch_lines("*Using pytest.skip outside of a test is not allowed*") def test_module_level_skip_with_allow_module_level(testdir): """ Verify that using pytest.skip(allow_module_level=True) is allowed """ - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest pytest.skip("skip_module_level", allow_module_level=True) def test_func(): assert 0 - """) - result = testdir.runpytest("-rxs") - result.stdout.fnmatch_lines( - "*SKIP*skip_module_level" + """ ) + result = testdir.runpytest("-rxs") + result.stdout.fnmatch_lines("*SKIP*skip_module_level") def test_invalid_skip_keyword_parameter(testdir): """ Verify that using pytest.skip() with unknown parameter raises an error """ - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest pytest.skip("skip_module_level", unknown=1) def test_func(): assert 0 - """) - result = testdir.runpytest() - result.stdout.fnmatch_lines( - "*TypeError:*['unknown']*" + """ ) + result = testdir.runpytest() + result.stdout.fnmatch_lines("*TypeError:*['unknown']*") def test_mark_xfail_item(testdir): # Ensure pytest.mark.xfail works with non-Python Item - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest class MyItem(pytest.Item): @@ -1084,24 +1166,29 @@ def test_mark_xfail_item(testdir): def pytest_collect_file(path, parent): return MyItem("foo", parent) - """) + """ + ) result = testdir.inline_run() passed, skipped, failed = result.listoutcomes() assert not failed - xfailed = [r for r in skipped if hasattr(r, 'wasxfail')] + xfailed = [r for r in skipped if hasattr(r, "wasxfail")] assert xfailed def test_summary_list_after_errors(testdir): """Ensure the list of errors/fails/xfails/skips appears after tracebacks in terminal reporting.""" - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def test_fail(): assert 0 - """) - result = testdir.runpytest('-ra') - result.stdout.fnmatch_lines([ - '=* FAILURES *=', - '*= short test summary info =*', - 'FAIL test_summary_list_after_errors.py::test_fail', - ]) + """ + ) + result = testdir.runpytest("-ra") + result.stdout.fnmatch_lines( + [ + "=* FAILURES *=", + "*= short test summary info =*", + "FAIL test_summary_list_after_errors.py::test_fail", + ] + ) diff --git a/testing/test_terminal.py b/testing/test_terminal.py index 8ef25062e..7c9d8ccf7 100644 --- a/testing/test_terminal.py +++ b/testing/test_terminal.py @@ -14,10 +14,11 @@ from _pytest.terminal import TerminalReporter, repr_pythonversion, getreportopt from _pytest.terminal import build_summary_stats_line, _plugin_nameversions -DistInfo = collections.namedtuple('DistInfo', ['project_name', 'version']) +DistInfo = collections.namedtuple("DistInfo", ["project_name", "version"]) class Option(object): + def __init__(self, verbose=False, fulltrace=False): self.verbose = verbose self.fulltrace = fulltrace @@ -26,35 +27,40 @@ class Option(object): def args(self): values = [] if self.verbose: - values.append('-v') + values.append("-v") if self.fulltrace: - values.append('--fulltrace') + values.append("--fulltrace") return values -@pytest.fixture(params=[ - Option(verbose=False), - Option(verbose=True), - Option(verbose=-1), - Option(fulltrace=True), -], ids=[ - "default", - "verbose", - "quiet", - "fulltrace", -]) +@pytest.fixture( + params=[ + Option(verbose=False), + Option(verbose=True), + Option(verbose=-1), + Option(fulltrace=True), + ], + ids=["default", "verbose", "quiet", "fulltrace"], +) def option(request): return request.param -@pytest.mark.parametrize('input,expected', [ - ([DistInfo(project_name='test', version=1)], ['test-1']), - ([DistInfo(project_name='pytest-test', version=1)], ['test-1']), - ([ - DistInfo(project_name='test', version=1), - DistInfo(project_name='test', version=1) - ], ['test-1']), -], ids=['normal', 'prefix-strip', 'deduplicate']) +@pytest.mark.parametrize( + "input,expected", + [ + ([DistInfo(project_name="test", version=1)], ["test-1"]), + ([DistInfo(project_name="pytest-test", version=1)], ["test-1"]), + ( + [ + DistInfo(project_name="test", version=1), + DistInfo(project_name="test", version=1), + ], + ["test-1"], + ), + ], + ids=["normal", "prefix-strip", "deduplicate"], +) def test_plugin_nameversion(input, expected): pluginlist = [(None, x) for x in input] result = _plugin_nameversions(pluginlist) @@ -62,8 +68,10 @@ def test_plugin_nameversion(input, expected): class TestTerminal(object): + def test_pass_skip_fail(self, testdir, option): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def test_ok(): pass @@ -71,39 +79,36 @@ class TestTerminal(object): pytest.skip("xx") def test_func(): assert 0 - """) + """ + ) result = testdir.runpytest(*option.args) if option.verbose: - result.stdout.fnmatch_lines([ - "*test_pass_skip_fail.py::test_ok PASS*", - "*test_pass_skip_fail.py::test_skip SKIP*", - "*test_pass_skip_fail.py::test_func FAIL*", - ]) + result.stdout.fnmatch_lines( + [ + "*test_pass_skip_fail.py::test_ok PASS*", + "*test_pass_skip_fail.py::test_skip SKIP*", + "*test_pass_skip_fail.py::test_func FAIL*", + ] + ) else: - result.stdout.fnmatch_lines([ - "*test_pass_skip_fail.py .sF*" - ]) - result.stdout.fnmatch_lines([ - " def test_func():", - "> assert 0", - "E assert 0", - ]) + result.stdout.fnmatch_lines(["*test_pass_skip_fail.py .sF*"]) + result.stdout.fnmatch_lines( + [" def test_func():", "> assert 0", "E assert 0"] + ) def test_internalerror(self, testdir, linecomp): modcol = testdir.getmodulecol("def test_one(): pass") rep = TerminalReporter(modcol.config, file=linecomp.stringio) excinfo = pytest.raises(ValueError, "raise ValueError('hello')") rep.pytest_internalerror(excinfo.getrepr()) - linecomp.assert_contains_lines([ - "INTERNALERROR> *ValueError*hello*" - ]) + linecomp.assert_contains_lines(["INTERNALERROR> *ValueError*hello*"]) def test_writeline(self, testdir, linecomp): modcol = testdir.getmodulecol("def test_one(): pass") rep = TerminalReporter(modcol.config, file=linecomp.stringio) rep.write_fspath_result(modcol.nodeid, ".") rep.write_line("hello world") - lines = linecomp.stringio.getvalue().split('\n') + lines = linecomp.stringio.getvalue().split("\n") assert not lines[0] assert lines[1].endswith(modcol.name + " .") assert lines[2] == "hello world" @@ -113,191 +118,212 @@ class TestTerminal(object): tr = TerminalReporter(item.config, file=linecomp.stringio) item.config.pluginmanager.register(tr) location = item.reportinfo() - tr.config.hook.pytest_runtest_logstart(nodeid=item.nodeid, - location=location, fspath=str(item.fspath)) - linecomp.assert_contains_lines([ - "*test_show_runtest_logstart.py*" - ]) + tr.config.hook.pytest_runtest_logstart( + nodeid=item.nodeid, location=location, fspath=str(item.fspath) + ) + linecomp.assert_contains_lines(["*test_show_runtest_logstart.py*"]) def test_runtest_location_shown_before_test_starts(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_1(): import time time.sleep(20) - """) + """ + ) child = testdir.spawn_pytest("") child.expect(".*test_runtest_location.*py") child.sendeof() child.kill(15) def test_itemreport_subclasses_show_subclassed_file(self, testdir): - testdir.makepyfile(test_p1=""" + testdir.makepyfile( + test_p1=""" class BaseTests(object): def test_p1(self): pass class TestClass(BaseTests): pass - """) - p2 = testdir.makepyfile(test_p2=""" + """ + ) + p2 = testdir.makepyfile( + test_p2=""" from test_p1 import BaseTests class TestMore(BaseTests): pass - """) + """ + ) result = testdir.runpytest(p2) - result.stdout.fnmatch_lines([ - "*test_p2.py .*", - "*1 passed*", - ]) + result.stdout.fnmatch_lines(["*test_p2.py .*", "*1 passed*"]) result = testdir.runpytest("-v", p2) - result.stdout.fnmatch_lines([ - "*test_p2.py::TestMore::test_p1* <- *test_p1.py*PASSED*", - ]) + result.stdout.fnmatch_lines( + ["*test_p2.py::TestMore::test_p1* <- *test_p1.py*PASSED*"] + ) def test_itemreport_directclasses_not_shown_as_subclasses(self, testdir): a = testdir.mkpydir("a123") - a.join("test_hello123.py").write(_pytest._code.Source(""" + a.join("test_hello123.py").write( + _pytest._code.Source( + """ class TestClass(object): def test_method(self): pass - """)) + """ + ) + ) result = testdir.runpytest("-v") assert result.ret == 0 - result.stdout.fnmatch_lines([ - "*a123/test_hello123.py*PASS*", - ]) + result.stdout.fnmatch_lines(["*a123/test_hello123.py*PASS*"]) assert " <- " not in result.stdout.str() def test_keyboard_interrupt(self, testdir, option): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_foobar(): assert 0 def test_spamegg(): import py; pytest.skip('skip me please!') def test_interrupt_me(): raise KeyboardInterrupt # simulating the user - """) + """ + ) result = testdir.runpytest(*option.args, no_reraise_ctrlc=True) - result.stdout.fnmatch_lines([ - " def test_foobar():", - "> assert 0", - "E assert 0", - "*_keyboard_interrupt.py:6: KeyboardInterrupt*", - ]) + result.stdout.fnmatch_lines( + [ + " def test_foobar():", + "> assert 0", + "E assert 0", + "*_keyboard_interrupt.py:6: KeyboardInterrupt*", + ] + ) if option.fulltrace: - result.stdout.fnmatch_lines([ - "*raise KeyboardInterrupt # simulating the user*", - ]) + result.stdout.fnmatch_lines( + ["*raise KeyboardInterrupt # simulating the user*"] + ) else: - result.stdout.fnmatch_lines([ - "to show a full traceback on KeyboardInterrupt use --fulltrace" - ]) - result.stdout.fnmatch_lines(['*KeyboardInterrupt*']) + result.stdout.fnmatch_lines( + ["to show a full traceback on KeyboardInterrupt use --fulltrace"] + ) + result.stdout.fnmatch_lines(["*KeyboardInterrupt*"]) def test_keyboard_in_sessionstart(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_sessionstart(): raise KeyboardInterrupt - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ def test_foobar(): pass - """) + """ + ) result = testdir.runpytest(no_reraise_ctrlc=True) assert result.ret == 2 - result.stdout.fnmatch_lines(['*KeyboardInterrupt*']) + result.stdout.fnmatch_lines(["*KeyboardInterrupt*"]) def test_collect_single_item(self, testdir): """Use singular 'item' when reporting a single test item""" - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_foobar(): pass - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(['collected 1 item']) + result.stdout.fnmatch_lines(["collected 1 item"]) def test_rewrite(self, testdir, monkeypatch): config = testdir.parseconfig() f = py.io.TextIO() - monkeypatch.setattr(f, 'isatty', lambda *args: True) + monkeypatch.setattr(f, "isatty", lambda *args: True) tr = TerminalReporter(config, f) tr._tw.fullwidth = 10 - tr.write('hello') - tr.rewrite('hey', erase=True) - assert f.getvalue() == 'hello' + '\r' + 'hey' + (6 * ' ') + tr.write("hello") + tr.rewrite("hey", erase=True) + assert f.getvalue() == "hello" + "\r" + "hey" + (6 * " ") class TestCollectonly(object): + def test_collectonly_basic(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_func(): pass - """) - result = testdir.runpytest("--collect-only",) - result.stdout.fnmatch_lines([ - "", - " ", - ]) + """ + ) + result = testdir.runpytest("--collect-only") + result.stdout.fnmatch_lines( + ["", " "] + ) def test_collectonly_skipped_module(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest pytest.skip("hello") - """) + """ + ) result = testdir.runpytest("--collect-only", "-rs") - result.stdout.fnmatch_lines([ - "*ERROR collecting*", - ]) + result.stdout.fnmatch_lines(["*ERROR collecting*"]) def test_collectonly_failed_module(self, testdir): testdir.makepyfile("""raise ValueError(0)""") result = testdir.runpytest("--collect-only") - result.stdout.fnmatch_lines([ - "*raise ValueError*", - "*1 error*", - ]) + result.stdout.fnmatch_lines(["*raise ValueError*", "*1 error*"]) def test_collectonly_fatal(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_collectstart(collector): assert 0, "urgs" - """) + """ + ) result = testdir.runpytest("--collect-only") - result.stdout.fnmatch_lines([ - "*INTERNAL*args*" - ]) + result.stdout.fnmatch_lines(["*INTERNAL*args*"]) assert result.ret == 3 def test_collectonly_simple(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def test_func1(): pass class TestClass(object): def test_method(self): pass - """) + """ + ) result = testdir.runpytest("--collect-only", p) # assert stderr.startswith("inserting into sys.path") assert result.ret == 0 - result.stdout.fnmatch_lines([ - "*", - "* ", - "* ", - # "* ", - "* ", - ]) + result.stdout.fnmatch_lines( + [ + "*", + "* ", + "* ", + # "* ", + "* ", + ] + ) def test_collectonly_error(self, testdir): p = testdir.makepyfile("import Errlkjqweqwe") result = testdir.runpytest("--collect-only", p) assert result.ret == 2 - result.stdout.fnmatch_lines(_pytest._code.Source(""" + result.stdout.fnmatch_lines( + _pytest._code.Source( + """ *ERROR* *ImportError* *No module named *Errlk* *1 error* - """).strip()) + """ + ).strip() + ) def test_collectonly_missing_path(self, testdir): """this checks issue 115, @@ -306,28 +332,22 @@ class TestCollectonly(object): """ result = testdir.runpytest("--collect-only", "uhm_missing_path") assert result.ret == 4 - result.stderr.fnmatch_lines([ - '*ERROR: file not found*', - ]) + result.stderr.fnmatch_lines(["*ERROR: file not found*"]) def test_collectonly_quiet(self, testdir): testdir.makepyfile("def test_foo(): pass") result = testdir.runpytest("--collect-only", "-q") - result.stdout.fnmatch_lines([ - '*test_foo*', - ]) + result.stdout.fnmatch_lines(["*test_foo*"]) def test_collectonly_more_quiet(self, testdir): testdir.makepyfile(test_fun="def test_foo(): pass") result = testdir.runpytest("--collect-only", "-qq") - result.stdout.fnmatch_lines([ - '*test_fun.py: 1*', - ]) + result.stdout.fnmatch_lines(["*test_fun.py: 1*"]) def test_repr_python_version(monkeypatch): try: - monkeypatch.setattr(sys, 'version_info', (2, 5, 1, 'final', 0)) + monkeypatch.setattr(sys, "version_info", (2, 5, 1, "final", 0)) assert repr_pythonversion() == "2.5.1-final-0" sys.version_info = x = (2, 3) assert repr_pythonversion() == str(x) @@ -336,68 +356,81 @@ def test_repr_python_version(monkeypatch): class TestFixtureReporting(object): + def test_setup_fixture_error(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def setup_function(function): print ("setup func") assert 0 def test_nada(): pass - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*ERROR at setup of test_nada*", - "*setup_function(function):*", - "*setup func*", - "*assert 0*", - "*1 error*", - ]) + result.stdout.fnmatch_lines( + [ + "*ERROR at setup of test_nada*", + "*setup_function(function):*", + "*setup func*", + "*assert 0*", + "*1 error*", + ] + ) assert result.ret != 0 def test_teardown_fixture_error(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_nada(): pass def teardown_function(function): print ("teardown func") assert 0 - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*ERROR at teardown*", - "*teardown_function(function):*", - "*assert 0*", - "*Captured stdout*", - "*teardown func*", - "*1 passed*1 error*", - ]) + result.stdout.fnmatch_lines( + [ + "*ERROR at teardown*", + "*teardown_function(function):*", + "*assert 0*", + "*Captured stdout*", + "*teardown func*", + "*1 passed*1 error*", + ] + ) def test_teardown_fixture_error_and_test_failure(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_fail(): assert 0, "failingfunc" def teardown_function(function): print ("teardown func") assert False - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*ERROR at teardown of test_fail*", - "*teardown_function(function):*", - "*assert False*", - "*Captured stdout*", - "*teardown func*", - - "*test_fail*", - "*def test_fail():", - "*failingfunc*", - "*1 failed*1 error*", - ]) + result.stdout.fnmatch_lines( + [ + "*ERROR at teardown of test_fail*", + "*teardown_function(function):*", + "*assert False*", + "*Captured stdout*", + "*teardown func*", + "*test_fail*", + "*def test_fail():", + "*failingfunc*", + "*1 failed*1 error*", + ] + ) def test_setup_teardown_output_and_test_failure(self, testdir): """ Test for issue #442 """ - testdir.makepyfile(""" + testdir.makepyfile( + """ def setup_function(function): print ("setup func") @@ -406,24 +439,28 @@ class TestFixtureReporting(object): def teardown_function(function): print ("teardown func") - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*test_fail*", - "*def test_fail():", - "*failingfunc*", - "*Captured stdout setup*", - "*setup func*", - "*Captured stdout teardown*", - "*teardown func*", - - "*1 failed*", - ]) + result.stdout.fnmatch_lines( + [ + "*test_fail*", + "*def test_fail():", + "*failingfunc*", + "*Captured stdout setup*", + "*setup func*", + "*Captured stdout teardown*", + "*teardown func*", + "*1 failed*", + ] + ) class TestTerminalFunctional(object): + def test_deselected(self, testdir): - testpath = testdir.makepyfile(""" + testpath = testdir.makepyfile( + """ def test_one(): pass def test_two(): @@ -431,17 +468,16 @@ class TestTerminalFunctional(object): def test_three(): pass """ - ) + ) result = testdir.runpytest("-k", "test_two:", testpath) - result.stdout.fnmatch_lines([ - "collected 3 items / 1 deselected", - "*test_deselected.py ..*", - ]) + result.stdout.fnmatch_lines( + ["collected 3 items / 1 deselected", "*test_deselected.py ..*"] + ) assert result.ret == 0 - def test_show_deselected_items_using_markexpr_before_test_execution( - self, testdir): - testdir.makepyfile(""" + def test_show_deselected_items_using_markexpr_before_test_execution(self, testdir): + testdir.makepyfile( + """ import pytest @pytest.mark.foo @@ -454,18 +490,22 @@ class TestTerminalFunctional(object): def test_pass(): pass - """) - result = testdir.runpytest('-m', 'not foo') - result.stdout.fnmatch_lines([ - "collected 3 items / 1 deselected", - "*test_show_des*.py ..*", - "*= 2 passed, 1 deselected in * =*", - ]) + """ + ) + result = testdir.runpytest("-m", "not foo") + result.stdout.fnmatch_lines( + [ + "collected 3 items / 1 deselected", + "*test_show_des*.py ..*", + "*= 2 passed, 1 deselected in * =*", + ] + ) assert "= 1 deselected =" not in result.stdout.str() assert result.ret == 0 def test_no_skip_summary_if_failure(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def test_ok(): pass @@ -473,66 +513,78 @@ class TestTerminalFunctional(object): assert 0 def test_skip(): pytest.skip("dontshow") - """) + """ + ) result = testdir.runpytest() assert result.stdout.str().find("skip test summary") == -1 assert result.ret == 1 def test_passes(self, testdir): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ def test_passes(): pass class TestClass(object): def test_method(self): pass - """) + """ + ) old = p1.dirpath().chdir() try: result = testdir.runpytest() finally: old.chdir() - result.stdout.fnmatch_lines([ - "test_passes.py ..*", - "* 2 pass*", - ]) + result.stdout.fnmatch_lines(["test_passes.py ..*", "* 2 pass*"]) assert result.ret == 0 def test_header_trailer_info(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_passes(): pass - """) + """ + ) result = testdir.runpytest() verinfo = ".".join(map(str, sys.version_info[:3])) - result.stdout.fnmatch_lines([ - "*===== test session starts ====*", - "platform %s -- Python %s*pytest-%s*py-%s*pluggy-%s" % ( - sys.platform, verinfo, - pytest.__version__, py.__version__, pluggy.__version__), - "*test_header_trailer_info.py .*", - "=* 1 passed*in *.[0-9][0-9] seconds *=", - ]) + result.stdout.fnmatch_lines( + [ + "*===== test session starts ====*", + "platform %s -- Python %s*pytest-%s*py-%s*pluggy-%s" + % ( + sys.platform, + verinfo, + pytest.__version__, + py.__version__, + pluggy.__version__, + ), + "*test_header_trailer_info.py .*", + "=* 1 passed*in *.[0-9][0-9] seconds *=", + ] + ) if pytest.config.pluginmanager.list_plugin_distinfo(): - result.stdout.fnmatch_lines([ - "plugins: *", - ]) + result.stdout.fnmatch_lines(["plugins: *"]) def test_showlocals(self, testdir): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ def test_showlocals(): x = 3 y = "x" * 5000 assert 0 - """) - result = testdir.runpytest(p1, '-l') - result.stdout.fnmatch_lines([ - # "_ _ * Locals *", - "x* = 3", - "y* = 'xxxxxx*" - ]) + """ + ) + result = testdir.runpytest(p1, "-l") + result.stdout.fnmatch_lines( + [ + # "_ _ * Locals *", + "x* = 3", + "y* = 'xxxxxx*", + ] + ) def test_verbose_reporting(self, testdir, pytestconfig): - p1 = testdir.makepyfile(""" + p1 = testdir.makepyfile( + """ import pytest def test_fail(): raise ValueError() @@ -545,151 +597,153 @@ class TestTerminalFunctional(object): def check(x): assert x == 1 yield check, 0 - """) - result = testdir.runpytest(p1, '-v') - result.stdout.fnmatch_lines([ - "*test_verbose_reporting.py::test_fail *FAIL*", - "*test_verbose_reporting.py::test_pass *PASS*", - "*test_verbose_reporting.py::TestClass::test_skip *SKIP*", - "*test_verbose_reporting.py::test_gen*0* *FAIL*", - ]) + """ + ) + result = testdir.runpytest(p1, "-v") + result.stdout.fnmatch_lines( + [ + "*test_verbose_reporting.py::test_fail *FAIL*", + "*test_verbose_reporting.py::test_pass *PASS*", + "*test_verbose_reporting.py::TestClass::test_skip *SKIP*", + "*test_verbose_reporting.py::test_gen*0* *FAIL*", + ] + ) assert result.ret == 1 if not pytestconfig.pluginmanager.get_plugin("xdist"): pytest.skip("xdist plugin not installed") - result = testdir.runpytest(p1, '-v', '-n 1') - result.stdout.fnmatch_lines([ - "*FAIL*test_verbose_reporting.py::test_fail*", - ]) + result = testdir.runpytest(p1, "-v", "-n 1") + result.stdout.fnmatch_lines(["*FAIL*test_verbose_reporting.py::test_fail*"]) assert result.ret == 1 def test_quiet_reporting(self, testdir): p1 = testdir.makepyfile("def test_pass(): pass") - result = testdir.runpytest(p1, '-q') + result = testdir.runpytest(p1, "-q") s = result.stdout.str() - assert 'test session starts' not in s + assert "test session starts" not in s assert p1.basename not in s assert "===" not in s assert "passed" in s def test_more_quiet_reporting(self, testdir): p1 = testdir.makepyfile("def test_pass(): pass") - result = testdir.runpytest(p1, '-qq') + result = testdir.runpytest(p1, "-qq") s = result.stdout.str() - assert 'test session starts' not in s + assert "test session starts" not in s assert p1.basename not in s assert "===" not in s assert "passed" not in s def test_report_collectionfinish_hook(self, testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_report_collectionfinish(config, startdir, items): return ['hello from hook: {0} items'.format(len(items))] - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ import pytest @pytest.mark.parametrize('i', range(3)) def test(i): pass - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "collected 3 items", - "hello from hook: 3 items", - ]) + result.stdout.fnmatch_lines(["collected 3 items", "hello from hook: 3 items"]) def test_fail_extra_reporting(testdir): testdir.makepyfile("def test_this(): assert 0") result = testdir.runpytest() - assert 'short test summary' not in result.stdout.str() - result = testdir.runpytest('-rf') - result.stdout.fnmatch_lines([ - "*test summary*", - "FAIL*test_fail_extra_reporting*", - ]) + assert "short test summary" not in result.stdout.str() + result = testdir.runpytest("-rf") + result.stdout.fnmatch_lines(["*test summary*", "FAIL*test_fail_extra_reporting*"]) def test_fail_reporting_on_pass(testdir): testdir.makepyfile("def test_this(): assert 1") - result = testdir.runpytest('-rf') - assert 'short test summary' not in result.stdout.str() + result = testdir.runpytest("-rf") + assert "short test summary" not in result.stdout.str() def test_pass_extra_reporting(testdir): testdir.makepyfile("def test_this(): assert 1") result = testdir.runpytest() - assert 'short test summary' not in result.stdout.str() - result = testdir.runpytest('-rp') - result.stdout.fnmatch_lines([ - "*test summary*", - "PASS*test_pass_extra_reporting*", - ]) + assert "short test summary" not in result.stdout.str() + result = testdir.runpytest("-rp") + result.stdout.fnmatch_lines(["*test summary*", "PASS*test_pass_extra_reporting*"]) def test_pass_reporting_on_fail(testdir): testdir.makepyfile("def test_this(): assert 0") - result = testdir.runpytest('-rp') - assert 'short test summary' not in result.stdout.str() + result = testdir.runpytest("-rp") + assert "short test summary" not in result.stdout.str() def test_pass_output_reporting(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_pass_output(): print("Four score and seven years ago...") - """) + """ + ) result = testdir.runpytest() - assert 'Four score and seven years ago...' not in result.stdout.str() - result = testdir.runpytest('-rP') - result.stdout.fnmatch_lines([ - "Four score and seven years ago...", - ]) + assert "Four score and seven years ago..." not in result.stdout.str() + result = testdir.runpytest("-rP") + result.stdout.fnmatch_lines(["Four score and seven years ago..."]) def test_color_yes(testdir): testdir.makepyfile("def test_this(): assert 1") - result = testdir.runpytest('--color=yes') - assert 'test session starts' in result.stdout.str() - assert '\x1b[1m' in result.stdout.str() + result = testdir.runpytest("--color=yes") + assert "test session starts" in result.stdout.str() + assert "\x1b[1m" in result.stdout.str() def test_color_no(testdir): testdir.makepyfile("def test_this(): assert 1") - result = testdir.runpytest('--color=no') - assert 'test session starts' in result.stdout.str() - assert '\x1b[1m' not in result.stdout.str() + result = testdir.runpytest("--color=no") + assert "test session starts" in result.stdout.str() + assert "\x1b[1m" not in result.stdout.str() -@pytest.mark.parametrize('verbose', [True, False]) +@pytest.mark.parametrize("verbose", [True, False]) def test_color_yes_collection_on_non_atty(testdir, verbose): """skip collect progress report when working on non-terminals. #1397 """ - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.parametrize('i', range(10)) def test_this(i): assert 1 - """) - args = ['--color=yes'] + """ + ) + args = ["--color=yes"] if verbose: - args.append('-vv') + args.append("-vv") result = testdir.runpytest(*args) - assert 'test session starts' in result.stdout.str() - assert '\x1b[1m' in result.stdout.str() - assert 'collecting 10 items' not in result.stdout.str() + assert "test session starts" in result.stdout.str() + assert "\x1b[1m" in result.stdout.str() + assert "collecting 10 items" not in result.stdout.str() if verbose: - assert 'collecting ...' in result.stdout.str() - assert 'collected 10 items' in result.stdout.str() + assert "collecting ..." in result.stdout.str() + assert "collected 10 items" in result.stdout.str() def test_getreportopt(): + class Config(object): + class Option(object): reportchars = "" disable_warnings = True + option = Option() + config = Config() config.option.reportchars = "sf" @@ -709,7 +763,8 @@ def test_getreportopt(): def test_terminalreporter_reportopt_addopts(testdir): testdir.makeini("[pytest]\naddopts=-rs") - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture @@ -719,15 +774,15 @@ def test_terminalreporter_reportopt_addopts(testdir): def test_opt(tr): assert tr.hasopt('skipped') assert not tr.hasopt('qwe') - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*1 passed*" - ]) + result.stdout.fnmatch_lines(["*1 passed*"]) def test_tbstyle_short(testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest @pytest.fixture @@ -736,27 +791,22 @@ def test_tbstyle_short(testdir): def test_opt(arg): x = 0 assert x - """) + """ + ) result = testdir.runpytest("--tb=short") s = result.stdout.str() - assert 'arg = 42' not in s - assert 'x = 0' not in s - result.stdout.fnmatch_lines([ - "*%s:8*" % p.basename, - " assert x", - "E assert*", - ]) + assert "arg = 42" not in s + assert "x = 0" not in s + result.stdout.fnmatch_lines(["*%s:8*" % p.basename, " assert x", "E assert*"]) result = testdir.runpytest() s = result.stdout.str() - assert 'x = 0' in s - assert 'assert x' in s + assert "x = 0" in s + assert "assert x" in s def test_traceconfig(testdir, monkeypatch): result = testdir.runpytest("--traceconfig") - result.stdout.fnmatch_lines([ - "*active plugins*" - ]) + result.stdout.fnmatch_lines(["*active plugins*"]) assert result.ret == EXIT_NOTESTSCOLLECTED @@ -768,55 +818,56 @@ class TestGenericReporting(object): def test_collect_fail(self, testdir, option): testdir.makepyfile("import xyz\n") result = testdir.runpytest(*option.args) - result.stdout.fnmatch_lines([ - "ImportError while importing*", - "*No module named *xyz*", - "*1 error*", - ]) + result.stdout.fnmatch_lines( + ["ImportError while importing*", "*No module named *xyz*", "*1 error*"] + ) def test_maxfailures(self, testdir, option): - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_1(): assert 0 def test_2(): assert 0 def test_3(): assert 0 - """) + """ + ) result = testdir.runpytest("--maxfail=2", *option.args) - result.stdout.fnmatch_lines([ - "*def test_1():*", - "*def test_2():*", - "*2 failed*", - ]) + result.stdout.fnmatch_lines( + ["*def test_1():*", "*def test_2():*", "*2 failed*"] + ) def test_tb_option(self, testdir, option): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def g(): raise IndexError def test_func(): print (6*7) g() # --calling-- - """) + """ + ) for tbopt in ["long", "short", "no"]: - print('testing --tb=%s...' % tbopt) - result = testdir.runpytest('--tb=%s' % tbopt) + print("testing --tb=%s..." % tbopt) + result = testdir.runpytest("--tb=%s" % tbopt) s = result.stdout.str() if tbopt == "long": - assert 'print (6*7)' in s + assert "print (6*7)" in s else: - assert 'print (6*7)' not in s + assert "print (6*7)" not in s if tbopt != "no": - assert '--calling--' in s - assert 'IndexError' in s + assert "--calling--" in s + assert "IndexError" in s else: - assert 'FAILURES' not in s - assert '--calling--' not in s - assert 'IndexError' not in s + assert "FAILURES" not in s + assert "--calling--" not in s + assert "IndexError" not in s def test_tb_crashline(self, testdir, option): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest def g(): raise IndexError @@ -825,36 +876,37 @@ class TestGenericReporting(object): g() # --calling-- def test_func2(): assert 0, "hello" - """) + """ + ) result = testdir.runpytest("--tb=line") bn = p.basename - result.stdout.fnmatch_lines([ - "*%s:3: IndexError*" % bn, - "*%s:8: AssertionError: hello*" % bn, - ]) + result.stdout.fnmatch_lines( + ["*%s:3: IndexError*" % bn, "*%s:8: AssertionError: hello*" % bn] + ) s = result.stdout.str() assert "def test_func2" not in s def test_pytest_report_header(self, testdir, option): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_sessionstart(session): session.config._somevalue = 42 def pytest_report_header(config): return "hello: %s" % config._somevalue - """) - testdir.mkdir("a").join("conftest.py").write(""" + """ + ) + testdir.mkdir("a").join("conftest.py").write( + """ def pytest_report_header(config, startdir): return ["line1", str(startdir)] -""") +""" + ) result = testdir.runpytest("a") - result.stdout.fnmatch_lines([ - "*hello: 42*", - "line1", - str(testdir.tmpdir), - ]) + result.stdout.fnmatch_lines(["*hello: 42*", "line1", str(testdir.tmpdir)]) def test_show_capture(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import sys import logging def test_one(): @@ -862,38 +914,43 @@ def pytest_report_header(config, startdir): sys.stderr.write('!This is stderr!') logging.warning('!This is a warning log msg!') assert False, 'Something failed' - """) + """ + ) result = testdir.runpytest("--tb=short") - result.stdout.fnmatch_lines(["!This is stdout!", - "!This is stderr!", - "*WARNING*!This is a warning log msg!"]) + result.stdout.fnmatch_lines( + [ + "!This is stdout!", + "!This is stderr!", + "*WARNING*!This is a warning log msg!", + ] + ) result = testdir.runpytest("--show-capture=all", "--tb=short") - result.stdout.fnmatch_lines(["!This is stdout!", - "!This is stderr!", - "*WARNING*!This is a warning log msg!"]) + result.stdout.fnmatch_lines( + [ + "!This is stdout!", + "!This is stderr!", + "*WARNING*!This is a warning log msg!", + ] + ) - stdout = testdir.runpytest( - "--show-capture=stdout", "--tb=short").stdout.str() + stdout = testdir.runpytest("--show-capture=stdout", "--tb=short").stdout.str() assert "!This is stderr!" not in stdout assert "!This is stdout!" in stdout assert "!This is a warning log msg!" not in stdout - stdout = testdir.runpytest( - "--show-capture=stderr", "--tb=short").stdout.str() + stdout = testdir.runpytest("--show-capture=stderr", "--tb=short").stdout.str() assert "!This is stdout!" not in stdout assert "!This is stderr!" in stdout assert "!This is a warning log msg!" not in stdout - stdout = testdir.runpytest( - "--show-capture=log", "--tb=short").stdout.str() + stdout = testdir.runpytest("--show-capture=log", "--tb=short").stdout.str() assert "!This is stdout!" not in stdout assert "!This is stderr!" not in stdout assert "!This is a warning log msg!" in stdout - stdout = testdir.runpytest( - "--show-capture=no", "--tb=short").stdout.str() + stdout = testdir.runpytest("--show-capture=no", "--tb=short").stdout.str() assert "!This is stdout!" not in stdout assert "!This is stderr!" not in stdout assert "!This is a warning log msg!" not in stdout @@ -901,7 +958,8 @@ def pytest_report_header(config, startdir): @pytest.mark.xfail("not hasattr(os, 'dup')") def test_fdopen_kept_alive_issue124(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import os, sys k = [] def test_open_file_and_keep_alive(capfd): @@ -911,15 +969,15 @@ def test_fdopen_kept_alive_issue124(testdir): def test_close_kept_alive_file(): stdout = k.pop() stdout.close() - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*2 passed*" - ]) + result.stdout.fnmatch_lines(["*2 passed*"]) def test_tbstyle_native_setup_error(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture def setup_error_fixture(): @@ -927,118 +985,117 @@ def test_tbstyle_native_setup_error(testdir): def test_error_fixture(setup_error_fixture): pass - """) + """ + ) result = testdir.runpytest("--tb=native") - result.stdout.fnmatch_lines([ - '*File *test_tbstyle_native_setup_error.py", line *, in setup_error_fixture*' - ]) + result.stdout.fnmatch_lines( + ['*File *test_tbstyle_native_setup_error.py", line *, in setup_error_fixture*'] + ) def test_terminal_summary(testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_terminal_summary(terminalreporter, exitstatus): w = terminalreporter w.section("hello") w.line("world") w.line("exitstatus: {0}".format(exitstatus)) - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *==== hello ====* world exitstatus: 5 - """) + """ + ) def test_terminal_summary_warnings_are_displayed(testdir): """Test that warnings emitted during pytest_terminal_summary are displayed. (#1305). """ - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_terminal_summary(terminalreporter): config = terminalreporter.config config.warn('C1', 'internal warning') - """) - result = testdir.runpytest('-rw') - result.stdout.fnmatch_lines([ - '*internal warning', - '*== 1 warnings in *', - ]) + """ + ) + result = testdir.runpytest("-rw") + result.stdout.fnmatch_lines(["*internal warning", "*== 1 warnings in *"]) -@pytest.mark.parametrize("exp_color, exp_line, stats_arg", [ - # The method under test only cares about the length of each - # dict value, not the actual contents, so tuples of anything - # suffice - - # Important statuses -- the highest priority of these always wins - ("red", "1 failed", {"failed": (1,)}), - ("red", "1 failed, 1 passed", {"failed": (1,), "passed": (1,)}), - - ("red", "1 error", {"error": (1,)}), - ("red", "1 passed, 1 error", {"error": (1,), "passed": (1,)}), - - # (a status that's not known to the code) - ("yellow", "1 weird", {"weird": (1,)}), - ("yellow", "1 passed, 1 weird", {"weird": (1,), "passed": (1,)}), - - ("yellow", "1 warnings", {"warnings": (1,)}), - ("yellow", "1 passed, 1 warnings", {"warnings": (1,), - "passed": (1,)}), - - ("green", "5 passed", {"passed": (1, 2, 3, 4, 5)}), - - - # "Boring" statuses. These have no effect on the color of the summary - # line. Thus, if *every* test has a boring status, the summary line stays - # at its default color, i.e. yellow, to warn the user that the test run - # produced no useful information - ("yellow", "1 skipped", {"skipped": (1,)}), - ("green", "1 passed, 1 skipped", {"skipped": (1,), "passed": (1,)}), - - ("yellow", "1 deselected", {"deselected": (1,)}), - ("green", "1 passed, 1 deselected", {"deselected": (1,), "passed": (1,)}), - - ("yellow", "1 xfailed", {"xfailed": (1,)}), - ("green", "1 passed, 1 xfailed", {"xfailed": (1,), "passed": (1,)}), - - ("yellow", "1 xpassed", {"xpassed": (1,)}), - ("green", "1 passed, 1 xpassed", {"xpassed": (1,), "passed": (1,)}), - - # Likewise if no tests were found at all - ("yellow", "no tests ran", {}), - - # Test the empty-key special case - ("yellow", "no tests ran", {"": (1,)}), - ("green", "1 passed", {"": (1,), "passed": (1,)}), - - - # A couple more complex combinations - ("red", "1 failed, 2 passed, 3 xfailed", - {"passed": (1, 2), "failed": (1,), "xfailed": (1, 2, 3)}), - - ("green", "1 passed, 2 skipped, 3 deselected, 2 xfailed", - {"passed": (1,), - "skipped": (1, 2), - "deselected": (1, 2, 3), - "xfailed": (1, 2)}), -]) +@pytest.mark.parametrize( + "exp_color, exp_line, stats_arg", + [ + # The method under test only cares about the length of each + # dict value, not the actual contents, so tuples of anything + # suffice + # Important statuses -- the highest priority of these always wins + ("red", "1 failed", {"failed": (1,)}), + ("red", "1 failed, 1 passed", {"failed": (1,), "passed": (1,)}), + ("red", "1 error", {"error": (1,)}), + ("red", "1 passed, 1 error", {"error": (1,), "passed": (1,)}), + # (a status that's not known to the code) + ("yellow", "1 weird", {"weird": (1,)}), + ("yellow", "1 passed, 1 weird", {"weird": (1,), "passed": (1,)}), + ("yellow", "1 warnings", {"warnings": (1,)}), + ("yellow", "1 passed, 1 warnings", {"warnings": (1,), "passed": (1,)}), + ("green", "5 passed", {"passed": (1, 2, 3, 4, 5)}), + # "Boring" statuses. These have no effect on the color of the summary + # line. Thus, if *every* test has a boring status, the summary line stays + # at its default color, i.e. yellow, to warn the user that the test run + # produced no useful information + ("yellow", "1 skipped", {"skipped": (1,)}), + ("green", "1 passed, 1 skipped", {"skipped": (1,), "passed": (1,)}), + ("yellow", "1 deselected", {"deselected": (1,)}), + ("green", "1 passed, 1 deselected", {"deselected": (1,), "passed": (1,)}), + ("yellow", "1 xfailed", {"xfailed": (1,)}), + ("green", "1 passed, 1 xfailed", {"xfailed": (1,), "passed": (1,)}), + ("yellow", "1 xpassed", {"xpassed": (1,)}), + ("green", "1 passed, 1 xpassed", {"xpassed": (1,), "passed": (1,)}), + # Likewise if no tests were found at all + ("yellow", "no tests ran", {}), + # Test the empty-key special case + ("yellow", "no tests ran", {"": (1,)}), + ("green", "1 passed", {"": (1,), "passed": (1,)}), + # A couple more complex combinations + ( + "red", + "1 failed, 2 passed, 3 xfailed", + {"passed": (1, 2), "failed": (1,), "xfailed": (1, 2, 3)}, + ), + ( + "green", + "1 passed, 2 skipped, 3 deselected, 2 xfailed", + { + "passed": (1,), + "skipped": (1, 2), + "deselected": (1, 2, 3), + "xfailed": (1, 2), + }, + ), + ], +) def test_summary_stats(exp_line, exp_color, stats_arg): print("Based on stats: %s" % stats_arg) - print("Expect summary: \"%s\"; with color \"%s\"" % (exp_line, exp_color)) + print('Expect summary: "%s"; with color "%s"' % (exp_line, exp_color)) (line, color) = build_summary_stats_line(stats_arg) - print("Actually got: \"%s\"; with color \"%s\"" % (line, color)) + print('Actually got: "%s"; with color "%s"' % (line, color)) assert line == exp_line assert color == exp_color def test_no_trailing_whitespace_after_inifile_word(testdir): - result = testdir.runpytest('') - assert 'inifile:\n' in result.stdout.str() + result = testdir.runpytest("") + assert "inifile:\n" in result.stdout.str() - testdir.makeini('[pytest]') - result = testdir.runpytest('') - assert 'inifile: tox.ini\n' in result.stdout.str() + testdir.makeini("[pytest]") + result = testdir.runpytest("") + assert "inifile: tox.ini\n" in result.stdout.str() class TestProgress(object): @@ -1066,7 +1123,8 @@ class TestProgress(object): def test_zero_tests_collected(self, testdir): """Some plugins (testmon for example) might issue pytest_runtest_logreport without any tests being actually collected (#2971).""" - testdir.makeconftest(""" + testdir.makeconftest( + """ def pytest_collection_modifyitems(items, config): from _pytest.runner import CollectReport for node_id in ('nodeid1', 'nodeid2'): @@ -1074,54 +1132,55 @@ class TestProgress(object): rep.when = 'passed' rep.duration = 0.1 config.hook.pytest_runtest_logreport(report=rep) - """) + """ + ) output = testdir.runpytest() - assert 'ZeroDivisionError' not in output.stdout.str() - output.stdout.fnmatch_lines([ - '=* 2 passed in *=', - ]) + assert "ZeroDivisionError" not in output.stdout.str() + output.stdout.fnmatch_lines(["=* 2 passed in *="]) def test_normal(self, many_tests_files, testdir): output = testdir.runpytest() - output.stdout.re_match_lines([ - r'test_bar.py \.{10} \s+ \[ 50%\]', - r'test_foo.py \.{5} \s+ \[ 75%\]', - r'test_foobar.py \.{5} \s+ \[100%\]', - ]) + output.stdout.re_match_lines( + [ + r"test_bar.py \.{10} \s+ \[ 50%\]", + r"test_foo.py \.{5} \s+ \[ 75%\]", + r"test_foobar.py \.{5} \s+ \[100%\]", + ] + ) def test_verbose(self, many_tests_files, testdir): - output = testdir.runpytest('-v') - output.stdout.re_match_lines([ - r'test_bar.py::test_bar\[0\] PASSED \s+ \[ 5%\]', - r'test_foo.py::test_foo\[4\] PASSED \s+ \[ 75%\]', - r'test_foobar.py::test_foobar\[4\] PASSED \s+ \[100%\]', - ]) + output = testdir.runpytest("-v") + output.stdout.re_match_lines( + [ + r"test_bar.py::test_bar\[0\] PASSED \s+ \[ 5%\]", + r"test_foo.py::test_foo\[4\] PASSED \s+ \[ 75%\]", + r"test_foobar.py::test_foobar\[4\] PASSED \s+ \[100%\]", + ] + ) def test_xdist_normal(self, many_tests_files, testdir): - pytest.importorskip('xdist') - output = testdir.runpytest('-n2') - output.stdout.re_match_lines([ - r'\.{20} \s+ \[100%\]', - ]) + pytest.importorskip("xdist") + output = testdir.runpytest("-n2") + output.stdout.re_match_lines([r"\.{20} \s+ \[100%\]"]) def test_xdist_verbose(self, many_tests_files, testdir): - pytest.importorskip('xdist') - output = testdir.runpytest('-n2', '-v') - output.stdout.re_match_lines_random([ - r'\[gw\d\] \[\s*\d+%\] PASSED test_bar.py::test_bar\[1\]', - r'\[gw\d\] \[\s*\d+%\] PASSED test_foo.py::test_foo\[1\]', - r'\[gw\d\] \[\s*\d+%\] PASSED test_foobar.py::test_foobar\[1\]', - ]) + pytest.importorskip("xdist") + output = testdir.runpytest("-n2", "-v") + output.stdout.re_match_lines_random( + [ + r"\[gw\d\] \[\s*\d+%\] PASSED test_bar.py::test_bar\[1\]", + r"\[gw\d\] \[\s*\d+%\] PASSED test_foo.py::test_foo\[1\]", + r"\[gw\d\] \[\s*\d+%\] PASSED test_foobar.py::test_foobar\[1\]", + ] + ) def test_capture_no(self, many_tests_files, testdir): - output = testdir.runpytest('-s') - output.stdout.re_match_lines([ - r'test_bar.py \.{10}', - r'test_foo.py \.{5}', - r'test_foobar.py \.{5}', - ]) + output = testdir.runpytest("-s") + output.stdout.re_match_lines( + [r"test_bar.py \.{10}", r"test_foo.py \.{5}", r"test_foobar.py \.{5}"] + ) - output = testdir.runpytest('--capture=no') + output = testdir.runpytest("--capture=no") assert "%]" not in output.stdout.str() @@ -1130,71 +1189,76 @@ class TestProgressWithTeardown(object): @pytest.fixture def contest_with_teardown_fixture(self, testdir): - testdir.makeconftest(''' + testdir.makeconftest( + """ import pytest @pytest.fixture def fail_teardown(): yield assert False - ''') + """ + ) @pytest.fixture def many_files(self, testdir, contest_with_teardown_fixture): testdir.makepyfile( - test_bar=''' + test_bar=""" import pytest @pytest.mark.parametrize('i', range(5)) def test_bar(fail_teardown, i): pass - ''', - test_foo=''' + """, + test_foo=""" import pytest @pytest.mark.parametrize('i', range(15)) def test_foo(fail_teardown, i): pass - ''', + """, ) def test_teardown_simple(self, testdir, contest_with_teardown_fixture): - testdir.makepyfile(''' + testdir.makepyfile( + """ def test_foo(fail_teardown): pass - ''') + """ + ) output = testdir.runpytest() - output.stdout.re_match_lines([ - r'test_teardown_simple.py \.E\s+\[100%\]', - ]) + output.stdout.re_match_lines([r"test_teardown_simple.py \.E\s+\[100%\]"]) - def test_teardown_with_test_also_failing(self, testdir, contest_with_teardown_fixture): - testdir.makepyfile(''' + def test_teardown_with_test_also_failing( + self, testdir, contest_with_teardown_fixture + ): + testdir.makepyfile( + """ def test_foo(fail_teardown): assert False - ''') + """ + ) output = testdir.runpytest() - output.stdout.re_match_lines([ - r'test_teardown_with_test_also_failing.py FE\s+\[100%\]', - ]) + output.stdout.re_match_lines( + [r"test_teardown_with_test_also_failing.py FE\s+\[100%\]"] + ) def test_teardown_many(self, testdir, many_files): output = testdir.runpytest() - output.stdout.re_match_lines([ - r'test_bar.py (\.E){5}\s+\[ 25%\]', - r'test_foo.py (\.E){15}\s+\[100%\]', - ]) + output.stdout.re_match_lines( + [r"test_bar.py (\.E){5}\s+\[ 25%\]", r"test_foo.py (\.E){15}\s+\[100%\]"] + ) def test_teardown_many_verbose(self, testdir, many_files): - output = testdir.runpytest('-v') - output.stdout.re_match_lines([ - r'test_bar.py::test_bar\[0\] PASSED\s+\[ 5%\]', - r'test_bar.py::test_bar\[0\] ERROR\s+\[ 5%\]', - r'test_bar.py::test_bar\[4\] PASSED\s+\[ 25%\]', - r'test_bar.py::test_bar\[4\] ERROR\s+\[ 25%\]', - ]) + output = testdir.runpytest("-v") + output.stdout.re_match_lines( + [ + r"test_bar.py::test_bar\[0\] PASSED\s+\[ 5%\]", + r"test_bar.py::test_bar\[0\] ERROR\s+\[ 5%\]", + r"test_bar.py::test_bar\[4\] PASSED\s+\[ 25%\]", + r"test_bar.py::test_bar\[4\] ERROR\s+\[ 25%\]", + ] + ) def test_xdist_normal(self, many_files, testdir): - pytest.importorskip('xdist') - output = testdir.runpytest('-n2') - output.stdout.re_match_lines([ - r'[\.E]{40} \s+ \[100%\]', - ]) + pytest.importorskip("xdist") + output = testdir.runpytest("-n2") + output.stdout.re_match_lines([r"[\.E]{40} \s+ \[100%\]"]) diff --git a/testing/test_tmpdir.py b/testing/test_tmpdir.py index 467e77252..336249094 100644 --- a/testing/test_tmpdir.py +++ b/testing/test_tmpdir.py @@ -7,13 +7,16 @@ from _pytest.tmpdir import tmpdir def test_funcarg(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ def pytest_generate_tests(metafunc): metafunc.addcall(id='a') metafunc.addcall(id='b') def test_func(tmpdir): pass - """) + """ + ) from _pytest.tmpdir import TempdirFactory + reprec = testdir.inline_run() calls = reprec.getcalls("pytest_runtest_setup") item = calls[0].item @@ -32,15 +35,17 @@ def test_funcarg(testdir): def test_ensuretemp(recwarn): - d1 = pytest.ensuretemp('hello') - d2 = pytest.ensuretemp('hello') + d1 = pytest.ensuretemp("hello") + d2 = pytest.ensuretemp("hello") assert d1 == d2 assert d1.check(dir=1) class TestTempdirHandler(object): + def test_mktemp(self, testdir): from _pytest.tmpdir import TempdirFactory + config = testdir.parseconfig() config.option.basetemp = testdir.mkdir("hello") t = TempdirFactory(config) @@ -54,35 +59,42 @@ class TestTempdirHandler(object): class TestConfigTmpdir(object): + def test_getbasetemp_custom_removes_old(self, testdir): mytemp = testdir.tmpdir.join("xyz") - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def test_1(tmpdir): pass - """) - testdir.runpytest(p, '--basetemp=%s' % mytemp) + """ + ) + testdir.runpytest(p, "--basetemp=%s" % mytemp) mytemp.check() mytemp.ensure("hello") - testdir.runpytest(p, '--basetemp=%s' % mytemp) + testdir.runpytest(p, "--basetemp=%s" % mytemp) mytemp.check() assert not mytemp.join("hello").check() def test_basetemp(testdir): mytemp = testdir.tmpdir.mkdir("mytemp") - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ import pytest def test_1(): pytest.ensuretemp("hello") - """) - result = testdir.runpytest(p, '--basetemp=%s' % mytemp) + """ + ) + result = testdir.runpytest(p, "--basetemp=%s" % mytemp) assert result.ret == 0 - assert mytemp.join('hello').check() + assert mytemp.join("hello").check() -@pytest.mark.skipif(not hasattr(py.path.local, 'mksymlinkto'), - reason="symlink not available on this platform") +@pytest.mark.skipif( + not hasattr(py.path.local, "mksymlinkto"), + reason="symlink not available on this platform", +) def test_tmpdir_always_is_realpath(testdir): # the reason why tmpdir should be a realpath is that # when you cd to it and do "os.getcwd()" you will anyway @@ -93,35 +105,41 @@ def test_tmpdir_always_is_realpath(testdir): realtemp = testdir.tmpdir.mkdir("myrealtemp") linktemp = testdir.tmpdir.join("symlinktemp") linktemp.mksymlinkto(realtemp) - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ def test_1(tmpdir): import os assert os.path.realpath(str(tmpdir)) == str(tmpdir) - """) - result = testdir.runpytest("-s", p, '--basetemp=%s/bt' % linktemp) + """ + ) + result = testdir.runpytest("-s", p, "--basetemp=%s/bt" % linktemp) assert not result.ret def test_tmpdir_too_long_on_parametrization(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.mark.parametrize("arg", ["1"*1000]) def test_some(arg, tmpdir): tmpdir.ensure("hello") - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_tmpdir_factory(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest @pytest.fixture(scope='session') def session_dir(tmpdir_factory): return tmpdir_factory.mktemp('data', numbered=False) def test_some(session_dir): session_dir.isdir() - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) @@ -130,59 +148,65 @@ def test_tmpdir_fallback_tox_env(testdir, monkeypatch): """Test that tmpdir works even if environment variables required by getpass module are missing (#1010). """ - monkeypatch.delenv('USER', raising=False) - monkeypatch.delenv('USERNAME', raising=False) - testdir.makepyfile(""" + monkeypatch.delenv("USER", raising=False) + monkeypatch.delenv("USERNAME", raising=False) + testdir.makepyfile( + """ import pytest def test_some(tmpdir): assert tmpdir.isdir() - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) @pytest.fixture def break_getuser(monkeypatch): - monkeypatch.setattr('os.getuid', lambda: -1) + monkeypatch.setattr("os.getuid", lambda: -1) # taken from python 2.7/3.4 - for envvar in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'): + for envvar in ("LOGNAME", "USER", "LNAME", "USERNAME"): monkeypatch.delenv(envvar, raising=False) @pytest.mark.usefixtures("break_getuser") -@pytest.mark.skipif(sys.platform.startswith('win'), reason='no os.getuid on windows') +@pytest.mark.skipif(sys.platform.startswith("win"), reason="no os.getuid on windows") def test_tmpdir_fallback_uid_not_found(testdir): """Test that tmpdir works even if the current process's user id does not correspond to a valid user. """ - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest def test_some(tmpdir): assert tmpdir.isdir() - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) @pytest.mark.usefixtures("break_getuser") -@pytest.mark.skipif(sys.platform.startswith('win'), reason='no os.getuid on windows') +@pytest.mark.skipif(sys.platform.startswith("win"), reason="no os.getuid on windows") def test_get_user_uid_not_found(): """Test that get_user() function works even if the current process's user id does not correspond to a valid user (e.g. running pytest in a Docker container with 'docker run -u'. """ from _pytest.tmpdir import get_user + assert get_user() is None -@pytest.mark.skipif(not sys.platform.startswith('win'), reason='win only') +@pytest.mark.skipif(not sys.platform.startswith("win"), reason="win only") def test_get_user(monkeypatch): """Test that get_user() function works even if environment variables required by getpass module are missing from the environment on Windows (#1010). """ from _pytest.tmpdir import get_user - monkeypatch.delenv('USER', raising=False) - monkeypatch.delenv('USERNAME', raising=False) + + monkeypatch.delenv("USER", raising=False) + monkeypatch.delenv("USERNAME", raising=False) assert get_user() is None diff --git a/testing/test_unittest.py b/testing/test_unittest.py index 202b20c19..65ffdb975 100644 --- a/testing/test_unittest.py +++ b/testing/test_unittest.py @@ -5,21 +5,24 @@ import gc def test_simple_unittest(testdir): - testpath = testdir.makepyfile(""" + testpath = testdir.makepyfile( + """ import unittest class MyTestCase(unittest.TestCase): def testpassing(self): self.assertEqual('foo', 'foo') def test_failing(self): self.assertEqual('foo', 'bar') - """) + """ + ) reprec = testdir.inline_run(testpath) assert reprec.matchreport("testpassing").passed assert reprec.matchreport("test_failing").failed def test_runTest_method(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import unittest class MyTestCaseWithRunTest(unittest.TestCase): def runTest(self): @@ -29,29 +32,35 @@ def test_runTest_method(testdir): self.assertEqual('foo', 'foo') def test_something(self): pass - """) + """ + ) result = testdir.runpytest("-v") - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines( + """ *MyTestCaseWithRunTest::runTest* *MyTestCaseWithoutRunTest::test_something* *2 passed* - """) + """ + ) def test_isclasscheck_issue53(testdir): - testpath = testdir.makepyfile(""" + testpath = testdir.makepyfile( + """ import unittest class _E(object): def __getattr__(self, tag): pass E = _E() - """) + """ + ) result = testdir.runpytest(testpath) assert result.ret == EXIT_NOTESTSCOLLECTED def test_setup(testdir): - testpath = testdir.makepyfile(""" + testpath = testdir.makepyfile( + """ import unittest class MyTestCase(unittest.TestCase): def setUp(self): @@ -64,15 +73,17 @@ def test_setup(testdir): def teardown_method(self, method): assert 0, "42" - """) + """ + ) reprec = testdir.inline_run("-s", testpath) assert reprec.matchreport("test_both", when="call").passed rep = reprec.matchreport("test_both", when="teardown") - assert rep.failed and '42' in str(rep.longrepr) + assert rep.failed and "42" in str(rep.longrepr) def test_setUpModule(testdir): - testpath = testdir.makepyfile(""" + testpath = testdir.makepyfile( + """ values = [] def setUpModule(): @@ -86,15 +97,15 @@ def test_setUpModule(testdir): def test_world(): assert values == [1] - """) + """ + ) result = testdir.runpytest(testpath) - result.stdout.fnmatch_lines([ - "*2 passed*", - ]) + result.stdout.fnmatch_lines(["*2 passed*"]) def test_setUpModule_failing_no_teardown(testdir): - testpath = testdir.makepyfile(""" + testpath = testdir.makepyfile( + """ values = [] def setUpModule(): @@ -105,7 +116,8 @@ def test_setUpModule_failing_no_teardown(testdir): def test_hello(): pass - """) + """ + ) reprec = testdir.inline_run(testpath) reprec.assertoutcome(passed=0, failed=1) call = reprec.getcalls("pytest_runtest_setup")[0] @@ -113,20 +125,23 @@ def test_setUpModule_failing_no_teardown(testdir): def test_new_instances(testdir): - testpath = testdir.makepyfile(""" + testpath = testdir.makepyfile( + """ import unittest class MyTestCase(unittest.TestCase): def test_func1(self): self.x = 2 def test_func2(self): assert not hasattr(self, 'x') - """) + """ + ) reprec = testdir.inline_run(testpath) reprec.assertoutcome(passed=2) def test_teardown(testdir): - testpath = testdir.makepyfile(""" + testpath = testdir.makepyfile( + """ import unittest class MyTestCase(unittest.TestCase): values = [] @@ -137,7 +152,8 @@ def test_teardown(testdir): class Second(unittest.TestCase): def test_check(self): self.assertEqual(MyTestCase.values, [None]) - """) + """ + ) reprec = testdir.inline_run(testpath) passed, skipped, failed = reprec.countoutcomes() assert failed == 0, failed @@ -153,7 +169,8 @@ def test_teardown_issue1649(testdir): The TestCase will not be cleaned up if the test fails, because it would then exist in the stackframe. """ - testpath = testdir.makepyfile(""" + testpath = testdir.makepyfile( + """ import unittest class TestCaseObjectsShouldBeCleanedUp(unittest.TestCase): def setUp(self): @@ -161,15 +178,17 @@ def test_teardown_issue1649(testdir): def test_demo(self): pass - """) + """ + ) testdir.inline_run("-s", testpath) gc.collect() for obj in gc.get_objects(): - assert type(obj).__name__ != 'TestCaseObjectsShouldBeCleanedUp' + assert type(obj).__name__ != "TestCaseObjectsShouldBeCleanedUp" def test_unittest_skip_issue148(testdir): - testpath = testdir.makepyfile(""" + testpath = testdir.makepyfile( + """ import unittest @unittest.skip("hello") @@ -182,33 +201,39 @@ def test_unittest_skip_issue148(testdir): @classmethod def tearDownClass(self): xxx - """) + """ + ) reprec = testdir.inline_run(testpath) reprec.assertoutcome(skipped=1) def test_method_and_teardown_failing_reporting(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import unittest, pytest class TC(unittest.TestCase): def tearDown(self): assert 0, "down1" def test_method(self): assert False, "down2" - """) + """ + ) result = testdir.runpytest("-s") assert result.ret == 1 - result.stdout.fnmatch_lines([ - "*tearDown*", - "*assert 0*", - "*test_method*", - "*assert False*", - "*1 failed*1 error*", - ]) + result.stdout.fnmatch_lines( + [ + "*tearDown*", + "*assert 0*", + "*test_method*", + "*assert False*", + "*1 failed*1 error*", + ] + ) def test_setup_failure_is_shown(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import unittest import pytest class TC(unittest.TestCase): @@ -217,19 +242,17 @@ def test_setup_failure_is_shown(testdir): def test_method(self): print ("never42") xyz - """) + """ + ) result = testdir.runpytest("-s") assert result.ret == 1 - result.stdout.fnmatch_lines([ - "*setUp*", - "*assert 0*down1*", - "*1 failed*", - ]) - assert 'never42' not in result.stdout.str() + result.stdout.fnmatch_lines(["*setUp*", "*assert 0*down1*", "*1 failed*"]) + assert "never42" not in result.stdout.str() def test_setup_setUpClass(testdir): - testpath = testdir.makepyfile(""" + testpath = testdir.makepyfile( + """ import unittest import pytest class MyTestCase(unittest.TestCase): @@ -246,13 +269,15 @@ def test_setup_setUpClass(testdir): cls.x -= 1 def test_teareddown(): assert MyTestCase.x == 0 - """) + """ + ) reprec = testdir.inline_run(testpath) reprec.assertoutcome(passed=3) def test_setup_class(testdir): - testpath = testdir.makepyfile(""" + testpath = testdir.makepyfile( + """ import unittest import pytest class MyTestCase(unittest.TestCase): @@ -267,14 +292,16 @@ def test_setup_class(testdir): cls.x -= 1 def test_teareddown(): assert MyTestCase.x == 0 - """) + """ + ) reprec = testdir.inline_run(testpath) reprec.assertoutcome(passed=3) -@pytest.mark.parametrize("type", ['Error', 'Failure']) +@pytest.mark.parametrize("type", ["Error", "Failure"]) def test_testcase_adderrorandfailure_defers(testdir, type): - testdir.makepyfile(""" + testdir.makepyfile( + """ from unittest import TestCase import pytest class MyTestCase(TestCase): @@ -288,14 +315,17 @@ def test_testcase_adderrorandfailure_defers(testdir, type): pytest.fail("add%s should not raise") def test_hello(self): pass - """ % (type, type)) + """ + % (type, type) + ) result = testdir.runpytest() - assert 'should not raise' not in result.stdout.str() + assert "should not raise" not in result.stdout.str() -@pytest.mark.parametrize("type", ['Error', 'Failure']) +@pytest.mark.parametrize("type", ["Error", "Failure"]) def test_testcase_custom_exception_info(testdir, type): - testdir.makepyfile(""" + testdir.makepyfile( + """ from unittest import TestCase import py, pytest import _pytest._code @@ -316,69 +346,83 @@ def test_testcase_custom_exception_info(testdir, type): mp.undo() def test_hello(self): pass - """ % locals()) + """ + % locals() + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "NOTE: Incompatible Exception Representation*", - "*ZeroDivisionError*", - "*1 failed*", - ]) + result.stdout.fnmatch_lines( + [ + "NOTE: Incompatible Exception Representation*", + "*ZeroDivisionError*", + "*1 failed*", + ] + ) def test_testcase_totally_incompatible_exception_info(testdir): - item, = testdir.getitems(""" + item, = testdir.getitems( + """ from unittest import TestCase class MyTestCase(TestCase): def test_hello(self): pass - """) + """ + ) item.addError(None, 42) excinfo = item._excinfo.pop(0) - assert 'ERROR: Unknown Incompatible' in str(excinfo.getrepr()) + assert "ERROR: Unknown Incompatible" in str(excinfo.getrepr()) def test_module_level_pytestmark(testdir): - testpath = testdir.makepyfile(""" + testpath = testdir.makepyfile( + """ import unittest import pytest pytestmark = pytest.mark.xfail class MyTestCase(unittest.TestCase): def test_func1(self): assert 0 - """) + """ + ) reprec = testdir.inline_run(testpath, "-s") reprec.assertoutcome(skipped=1) class TestTrialUnittest(object): + def setup_class(cls): cls.ut = pytest.importorskip("twisted.trial.unittest") # on windows trial uses a socket for a reactor and apparently doesn't close it properly # https://twistedmatrix.com/trac/ticket/9227 - cls.ignore_unclosed_socket_warning = ('-W', 'always') + cls.ignore_unclosed_socket_warning = ("-W", "always") def test_trial_testcase_runtest_not_collected(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ from twisted.trial.unittest import TestCase class TC(TestCase): def test_hello(self): pass - """) + """ + ) reprec = testdir.inline_run(*self.ignore_unclosed_socket_warning) reprec.assertoutcome(passed=1) - testdir.makepyfile(""" + testdir.makepyfile( + """ from twisted.trial.unittest import TestCase class TC(TestCase): def runTest(self): pass - """) + """ + ) reprec = testdir.inline_run(*self.ignore_unclosed_socket_warning) reprec.assertoutcome(passed=1) def test_trial_exceptions_with_skips(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ from twisted.trial import unittest import pytest class TC(unittest.TestCase): @@ -409,24 +453,31 @@ class TestTrialUnittest(object): pytest.skip("skip_in_setup_class") def test_method(self): pass - """) + """ + ) from _pytest.compat import _is_unittest_unexpected_success_a_failure + should_fail = _is_unittest_unexpected_success_a_failure() result = testdir.runpytest("-rxs", *self.ignore_unclosed_socket_warning) - result.stdout.fnmatch_lines_random([ - "*XFAIL*test_trial_todo*", - "*trialselfskip*", - "*skip_in_setup_class*", - "*iwanto*", - "*i2wanto*", - "*sys.version_info*", - "*skip_in_method*", - "*1 failed*4 skipped*3 xfailed*" if should_fail else "*4 skipped*3 xfail*1 xpass*", - ]) + result.stdout.fnmatch_lines_random( + [ + "*XFAIL*test_trial_todo*", + "*trialselfskip*", + "*skip_in_setup_class*", + "*iwanto*", + "*i2wanto*", + "*sys.version_info*", + "*skip_in_method*", + "*1 failed*4 skipped*3 xfailed*" + if should_fail + else "*4 skipped*3 xfail*1 xpass*", + ] + ) assert result.ret == (1 if should_fail else 0) def test_trial_error(self, testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ from twisted.trial.unittest import TestCase from twisted.internet.defer import Deferred from twisted.internet import reactor @@ -460,81 +511,97 @@ class TestTrialUnittest(object): reactor.callLater(0.3, d.callback, None) return d # will crash both at test time and at teardown - """) + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*ERRORS*", - "*DelayedCalls*", - "*test_four*", - "*NameError*crash*", - "*test_one*", - "*NameError*crash*", - "*test_three*", - "*DelayedCalls*", - "*test_two*", - "*crash*", - ]) + result.stdout.fnmatch_lines( + [ + "*ERRORS*", + "*DelayedCalls*", + "*test_four*", + "*NameError*crash*", + "*test_one*", + "*NameError*crash*", + "*test_three*", + "*DelayedCalls*", + "*test_two*", + "*crash*", + ] + ) def test_trial_pdb(self, testdir): - p = testdir.makepyfile(""" + p = testdir.makepyfile( + """ from twisted.trial import unittest import pytest class TC(unittest.TestCase): def test_hello(self): assert 0, "hellopdb" - """) + """ + ) child = testdir.spawn_pytest(p) child.expect("hellopdb") child.sendeof() def test_trial_testcase_skip_property(self, testdir): - testpath = testdir.makepyfile(""" + testpath = testdir.makepyfile( + """ from twisted.trial import unittest class MyTestCase(unittest.TestCase): skip = 'dont run' def test_func(self): pass - """) + """ + ) reprec = testdir.inline_run(testpath, "-s") reprec.assertoutcome(skipped=1) def test_trial_testfunction_skip_property(self, testdir): - testpath = testdir.makepyfile(""" + testpath = testdir.makepyfile( + """ from twisted.trial import unittest class MyTestCase(unittest.TestCase): def test_func(self): pass test_func.skip = 'dont run' - """) + """ + ) reprec = testdir.inline_run(testpath, "-s") reprec.assertoutcome(skipped=1) def test_trial_testcase_todo_property(self, testdir): - testpath = testdir.makepyfile(""" + testpath = testdir.makepyfile( + """ from twisted.trial import unittest class MyTestCase(unittest.TestCase): todo = 'dont run' def test_func(self): assert 0 - """) + """ + ) reprec = testdir.inline_run(testpath, "-s") reprec.assertoutcome(skipped=1) def test_trial_testfunction_todo_property(self, testdir): - testpath = testdir.makepyfile(""" + testpath = testdir.makepyfile( + """ from twisted.trial import unittest class MyTestCase(unittest.TestCase): def test_func(self): assert 0 test_func.todo = 'dont run' - """) - reprec = testdir.inline_run(testpath, "-s", *self.ignore_unclosed_socket_warning) + """ + ) + reprec = testdir.inline_run( + testpath, "-s", *self.ignore_unclosed_socket_warning + ) reprec.assertoutcome(skipped=1) def test_djangolike_testcase(testdir): # contributed from Morten Breekevold - testdir.makepyfile(""" + testdir.makepyfile( + """ from unittest import TestCase, main class DjangoLikeTestCase(TestCase): @@ -574,32 +641,38 @@ def test_djangolike_testcase(testdir): def _post_teardown(self): print ("_post_teardown()") - """) + """ + ) result = testdir.runpytest("-s") assert result.ret == 0 - result.stdout.fnmatch_lines([ - "*_pre_setup()*", - "*setUp()*", - "*test_thing()*", - "*tearDown()*", - "*_post_teardown()*", - ]) + result.stdout.fnmatch_lines( + [ + "*_pre_setup()*", + "*setUp()*", + "*test_thing()*", + "*tearDown()*", + "*_post_teardown()*", + ] + ) def test_unittest_not_shown_in_traceback(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import unittest class t(unittest.TestCase): def test_hello(self): x = 3 self.assertEqual(x, 4) - """) + """ + ) res = testdir.runpytest() assert "failUnlessEqual" not in res.stdout.str() def test_unorderable_types(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import unittest class TestJoinEmpty(unittest.TestCase): pass @@ -610,27 +683,31 @@ def test_unorderable_types(testdir): Test.__name__ = "TestFoo" return Test TestFoo = make_test() - """) + """ + ) result = testdir.runpytest() assert "TypeError" not in result.stdout.str() assert result.ret == EXIT_NOTESTSCOLLECTED def test_unittest_typerror_traceback(testdir): - testdir.makepyfile(""" + testdir.makepyfile( + """ import unittest class TestJoinEmpty(unittest.TestCase): def test_hello(self, arg1): pass - """) + """ + ) result = testdir.runpytest() assert "TypeError" in result.stdout.str() assert result.ret == 1 -@pytest.mark.parametrize('runner', ['pytest', 'unittest']) +@pytest.mark.parametrize("runner", ["pytest", "unittest"]) def test_unittest_expected_failure_for_failing_test_is_xfail(testdir, runner): - script = testdir.makepyfile(""" + script = testdir.makepyfile( + """ import unittest class MyTestCase(unittest.TestCase): @unittest.expectedFailure @@ -638,25 +715,23 @@ def test_unittest_expected_failure_for_failing_test_is_xfail(testdir, runner): assert False if __name__ == '__main__': unittest.main() - """) - if runner == 'pytest': + """ + ) + if runner == "pytest": result = testdir.runpytest("-rxX") - result.stdout.fnmatch_lines([ - "*XFAIL*MyTestCase*test_failing_test_is_xfail*", - "*1 xfailed*", - ]) + result.stdout.fnmatch_lines( + ["*XFAIL*MyTestCase*test_failing_test_is_xfail*", "*1 xfailed*"] + ) else: result = testdir.runpython(script) - result.stderr.fnmatch_lines([ - "*1 test in*", - "*OK*(expected failures=1)*", - ]) + result.stderr.fnmatch_lines(["*1 test in*", "*OK*(expected failures=1)*"]) assert result.ret == 0 -@pytest.mark.parametrize('runner', ['pytest', 'unittest']) +@pytest.mark.parametrize("runner", ["pytest", "unittest"]) def test_unittest_expected_failure_for_passing_test_is_fail(testdir, runner): - script = testdir.makepyfile(""" + script = testdir.makepyfile( + """ import unittest class MyTestCase(unittest.TestCase): @unittest.expectedFailure @@ -664,31 +739,32 @@ def test_unittest_expected_failure_for_passing_test_is_fail(testdir, runner): assert True if __name__ == '__main__': unittest.main() - """) + """ + ) from _pytest.compat import _is_unittest_unexpected_success_a_failure + should_fail = _is_unittest_unexpected_success_a_failure() - if runner == 'pytest': + if runner == "pytest": result = testdir.runpytest("-rxX") - result.stdout.fnmatch_lines([ - "*MyTestCase*test_passing_test_is_fail*", - "*1 failed*" if should_fail else "*1 xpassed*", - ]) + result.stdout.fnmatch_lines( + [ + "*MyTestCase*test_passing_test_is_fail*", + "*1 failed*" if should_fail else "*1 xpassed*", + ] + ) else: result = testdir.runpython(script) - result.stderr.fnmatch_lines([ - "*1 test in*", - "*(unexpected successes=1)*", - ]) + result.stderr.fnmatch_lines(["*1 test in*", "*(unexpected successes=1)*"]) assert result.ret == (1 if should_fail else 0) -@pytest.mark.parametrize('fix_type, stmt', [ - ('fixture', 'return'), - ('yield_fixture', 'yield'), -]) +@pytest.mark.parametrize( + "fix_type, stmt", [("fixture", "return"), ("yield_fixture", "yield")] +) def test_unittest_setup_interaction(testdir, fix_type, stmt): - testdir.makepyfile(""" + testdir.makepyfile( + """ import unittest import pytest class MyTestCase(unittest.TestCase): @@ -710,13 +786,17 @@ def test_unittest_setup_interaction(testdir, fix_type, stmt): def test_classattr(self): assert self.__class__.hello == "world" - """.format(fix_type=fix_type, stmt=stmt)) + """.format( + fix_type=fix_type, stmt=stmt + ) + ) result = testdir.runpytest() result.stdout.fnmatch_lines("*3 passed*") def test_non_unittest_no_setupclass_support(testdir): - testpath = testdir.makepyfile(""" + testpath = testdir.makepyfile( + """ class TestFoo(object): x = 0 @@ -734,13 +814,15 @@ def test_non_unittest_no_setupclass_support(testdir): def test_not_teareddown(): assert TestFoo.x == 0 - """) + """ + ) reprec = testdir.inline_run(testpath) reprec.assertoutcome(passed=2) def test_no_teardown_if_setupclass_failed(testdir): - testpath = testdir.makepyfile(""" + testpath = testdir.makepyfile( + """ import unittest class MyTestCase(unittest.TestCase): @@ -760,63 +842,77 @@ def test_no_teardown_if_setupclass_failed(testdir): def test_notTornDown(): assert MyTestCase.x == 1 - """) + """ + ) reprec = testdir.inline_run(testpath) reprec.assertoutcome(passed=1, failed=1) def test_issue333_result_clearing(testdir): - testdir.makeconftest(""" + testdir.makeconftest( + """ import pytest @pytest.hookimpl(hookwrapper=True) def pytest_runtest_call(item): yield assert 0 - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ import unittest class TestIt(unittest.TestCase): def test_func(self): 0/0 - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(failed=1) def test_unittest_raise_skip_issue748(testdir): - testdir.makepyfile(test_foo=""" + testdir.makepyfile( + test_foo=""" import unittest class MyTestCase(unittest.TestCase): def test_one(self): raise unittest.SkipTest('skipping due to reasons') - """) - result = testdir.runpytest("-v", '-rs') - result.stdout.fnmatch_lines(""" + """ + ) + result = testdir.runpytest("-v", "-rs") + result.stdout.fnmatch_lines( + """ *SKIP*[1]*test_foo.py*skipping due to reasons* *1 skipped* - """) + """ + ) def test_unittest_skip_issue1169(testdir): - testdir.makepyfile(test_foo=""" + testdir.makepyfile( + test_foo=""" import unittest class MyTestCase(unittest.TestCase): @unittest.skip("skipping due to reasons") def test_skip(self): self.fail() - """) - result = testdir.runpytest("-v", '-rs') - result.stdout.fnmatch_lines(""" + """ + ) + result = testdir.runpytest("-v", "-rs") + result.stdout.fnmatch_lines( + """ *SKIP*[1]*skipping due to reasons* *1 skipped* - """) + """ + ) def test_class_method_containing_test_issue1558(testdir): - testdir.makepyfile(test_foo=""" + testdir.makepyfile( + test_foo=""" import unittest class MyTestCase(unittest.TestCase): @@ -825,21 +921,21 @@ def test_class_method_containing_test_issue1558(testdir): def test_should_not_run(self): pass test_should_not_run.__test__ = False - """) + """ + ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) @pytest.mark.issue(3498) -@pytest.mark.parametrize("base", [ - 'six.moves.builtins.object', - 'unittest.TestCase', - 'unittest2.TestCase', -]) +@pytest.mark.parametrize( + "base", ["six.moves.builtins.object", "unittest.TestCase", "unittest2.TestCase"] +) def test_usefixtures_marker_on_unittest(base, testdir): - module = base.rsplit('.', 1)[0] + module = base.rsplit(".", 1)[0] pytest.importorskip(module) - testdir.makepyfile(conftest=""" + testdir.makepyfile( + conftest=""" import pytest @pytest.fixture(scope='function') @@ -864,9 +960,11 @@ def test_usefixtures_marker_on_unittest(base, testdir): for item in items: node_and_marks(item) - """) + """ + ) - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest import {module} @@ -885,7 +983,10 @@ def test_usefixtures_marker_on_unittest(base, testdir): assert self.fixture2 - """.format(module=module, base=base)) + """.format( + module=module, base=base + ) + ) - result = testdir.runpytest('-s') + result = testdir.runpytest("-s") result.assert_outcomes(passed=2) diff --git a/testing/test_warnings.py b/testing/test_warnings.py index 02400bd1d..c5bea052a 100644 --- a/testing/test_warnings.py +++ b/testing/test_warnings.py @@ -6,7 +6,7 @@ import sys import pytest -WARNINGS_SUMMARY_HEADER = 'warnings summary' +WARNINGS_SUMMARY_HEADER = "warnings summary" @pytest.fixture @@ -16,48 +16,52 @@ def pyfile_with_warnings(testdir, request): """ testdir.syspathinsert() test_name = request.function.__name__ - module_name = test_name.lstrip('test_') + '_module' - testdir.makepyfile(**{ - module_name: ''' + module_name = test_name.lstrip("test_") + "_module" + testdir.makepyfile( + **{ + module_name: """ import warnings def foo(): warnings.warn(UserWarning("user warning")) warnings.warn(RuntimeWarning("runtime warning")) return 1 - ''', - test_name: ''' + """, + test_name: """ import {module_name} def test_func(): assert {module_name}.foo() == 1 - '''.format(module_name=module_name) - }) + """.format( + module_name=module_name + ), + } + ) -@pytest.mark.filterwarnings('always') +@pytest.mark.filterwarnings("always") def test_normal_flow(testdir, pyfile_with_warnings): """ Check that the warnings section is displayed, containing test node ids followed by all warnings generated by that test node. """ result = testdir.runpytest() - result.stdout.fnmatch_lines([ - '*== %s ==*' % WARNINGS_SUMMARY_HEADER, - - '*test_normal_flow.py::test_func', - - '*normal_flow_module.py:3: UserWarning: user warning', - '* warnings.warn(UserWarning("user warning"))', - - '*normal_flow_module.py:4: RuntimeWarning: runtime warning', - '* warnings.warn(RuntimeWarning("runtime warning"))', - '* 1 passed, 2 warnings*', - ]) - assert result.stdout.str().count('test_normal_flow.py::test_func') == 1 + result.stdout.fnmatch_lines( + [ + "*== %s ==*" % WARNINGS_SUMMARY_HEADER, + "*test_normal_flow.py::test_func", + "*normal_flow_module.py:3: UserWarning: user warning", + '* warnings.warn(UserWarning("user warning"))', + "*normal_flow_module.py:4: RuntimeWarning: runtime warning", + '* warnings.warn(RuntimeWarning("runtime warning"))', + "* 1 passed, 2 warnings*", + ] + ) + assert result.stdout.str().count("test_normal_flow.py::test_func") == 1 -@pytest.mark.filterwarnings('always') +@pytest.mark.filterwarnings("always") def test_setup_teardown_warnings(testdir, pyfile_with_warnings): - testdir.makepyfile(''' + testdir.makepyfile( + """ import warnings import pytest @@ -69,57 +73,64 @@ def test_setup_teardown_warnings(testdir, pyfile_with_warnings): def test_func(fix): pass - ''') + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - '*== %s ==*' % WARNINGS_SUMMARY_HEADER, - - '*test_setup_teardown_warnings.py:6: UserWarning: warning during setup', - '*warnings.warn(UserWarning("warning during setup"))', - - '*test_setup_teardown_warnings.py:8: UserWarning: warning during teardown', - '*warnings.warn(UserWarning("warning during teardown"))', - '* 1 passed, 2 warnings*', - ]) + result.stdout.fnmatch_lines( + [ + "*== %s ==*" % WARNINGS_SUMMARY_HEADER, + "*test_setup_teardown_warnings.py:6: UserWarning: warning during setup", + '*warnings.warn(UserWarning("warning during setup"))', + "*test_setup_teardown_warnings.py:8: UserWarning: warning during teardown", + '*warnings.warn(UserWarning("warning during teardown"))', + "* 1 passed, 2 warnings*", + ] + ) -@pytest.mark.parametrize('method', ['cmdline', 'ini']) +@pytest.mark.parametrize("method", ["cmdline", "ini"]) def test_as_errors(testdir, pyfile_with_warnings, method): - args = ('-W', 'error') if method == 'cmdline' else () - if method == 'ini': - testdir.makeini(''' + args = ("-W", "error") if method == "cmdline" else () + if method == "ini": + testdir.makeini( + """ [pytest] filterwarnings= error - ''') + """ + ) result = testdir.runpytest(*args) - result.stdout.fnmatch_lines([ - 'E UserWarning: user warning', - 'as_errors_module.py:3: UserWarning', - '* 1 failed in *', - ]) + result.stdout.fnmatch_lines( + [ + "E UserWarning: user warning", + "as_errors_module.py:3: UserWarning", + "* 1 failed in *", + ] + ) -@pytest.mark.parametrize('method', ['cmdline', 'ini']) +@pytest.mark.parametrize("method", ["cmdline", "ini"]) def test_ignore(testdir, pyfile_with_warnings, method): - args = ('-W', 'ignore') if method == 'cmdline' else () - if method == 'ini': - testdir.makeini(''' + args = ("-W", "ignore") if method == "cmdline" else () + if method == "ini": + testdir.makeini( + """ [pytest] filterwarnings= ignore - ''') + """ + ) result = testdir.runpytest(*args) - result.stdout.fnmatch_lines([ - '* 1 passed in *', - ]) + result.stdout.fnmatch_lines(["* 1 passed in *"]) assert WARNINGS_SUMMARY_HEADER not in result.stdout.str() -@pytest.mark.skipif(sys.version_info < (3, 0), - reason='warnings message is unicode is ok in python3') -@pytest.mark.filterwarnings('always') +@pytest.mark.skipif( + sys.version_info < (3, 0), reason="warnings message is unicode is ok in python3" +) +@pytest.mark.filterwarnings("always") def test_unicode(testdir, pyfile_with_warnings): - testdir.makepyfile(''' + testdir.makepyfile( + """ # -*- coding: utf8 -*- import warnings import pytest @@ -132,21 +143,30 @@ def test_unicode(testdir, pyfile_with_warnings): def test_func(fix): pass - ''') + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - '*== %s ==*' % WARNINGS_SUMMARY_HEADER, - '*test_unicode.py:8: UserWarning: \u6d4b\u8bd5*', - '* 1 passed, 1 warnings*', - ]) + result.stdout.fnmatch_lines( + [ + "*== %s ==*" % WARNINGS_SUMMARY_HEADER, + "*test_unicode.py:8: UserWarning: \u6d4b\u8bd5*", + "* 1 passed, 1 warnings*", + ] + ) -@pytest.mark.skipif(sys.version_info >= (3, 0), - reason='warnings message is broken as it is not str instance') +@pytest.mark.skipif( + sys.version_info >= (3, 0), + reason="warnings message is broken as it is not str instance", +) def test_py2_unicode(testdir, pyfile_with_warnings): - if getattr(sys, "pypy_version_info", ())[:2] == (5, 9) and sys.platform.startswith('win'): + if ( + getattr(sys, "pypy_version_info", ())[:2] == (5, 9) + and sys.platform.startswith("win") + ): pytest.xfail("fails with unicode error on PyPy2 5.9 and Windows (#2905)") - testdir.makepyfile(''' + testdir.makepyfile( + """ # -*- coding: utf8 -*- import warnings import pytest @@ -160,41 +180,48 @@ def test_py2_unicode(testdir, pyfile_with_warnings): @pytest.mark.filterwarnings('always') def test_func(fix): pass - ''') + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - '*== %s ==*' % WARNINGS_SUMMARY_HEADER, - - '*test_py2_unicode.py:8: UserWarning: \\u6d4b\\u8bd5', - '*warnings.warn(u"\u6d4b\u8bd5")', - '*warnings.py:*: UnicodeWarning: Warning is using unicode non*', - '* 1 passed, 2 warnings*', - ]) + result.stdout.fnmatch_lines( + [ + "*== %s ==*" % WARNINGS_SUMMARY_HEADER, + "*test_py2_unicode.py:8: UserWarning: \\u6d4b\\u8bd5", + '*warnings.warn(u"\u6d4b\u8bd5")', + "*warnings.py:*: UnicodeWarning: Warning is using unicode non*", + "* 1 passed, 2 warnings*", + ] + ) def test_py2_unicode_ascii(testdir): """Ensure that our warning about 'unicode warnings containing non-ascii messages' does not trigger with ascii-convertible messages""" - testdir.makeini('[pytest]') - testdir.makepyfile(''' + testdir.makeini("[pytest]") + testdir.makepyfile( + """ import pytest import warnings @pytest.mark.filterwarnings('always') def test_func(): warnings.warn(u"hello") - ''') + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - '*== %s ==*' % WARNINGS_SUMMARY_HEADER, - '*warnings.warn(u"hello")', - '* 1 passed, 1 warnings in*' - ]) + result.stdout.fnmatch_lines( + [ + "*== %s ==*" % WARNINGS_SUMMARY_HEADER, + '*warnings.warn(u"hello")', + "* 1 passed, 1 warnings in*", + ] + ) def test_works_with_filterwarnings(testdir): """Ensure our warnings capture does not mess with pre-installed filters (#2430).""" - testdir.makepyfile(''' + testdir.makepyfile( + """ import warnings class MyWarning(Warning): @@ -209,24 +236,26 @@ def test_works_with_filterwarnings(testdir): assert False except MyWarning: assert True - ''') + """ + ) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - '*== 1 passed in *', - ]) + result.stdout.fnmatch_lines(["*== 1 passed in *"]) -@pytest.mark.parametrize('default_config', ['ini', 'cmdline']) +@pytest.mark.parametrize("default_config", ["ini", "cmdline"]) def test_filterwarnings_mark(testdir, default_config): """ Test ``filterwarnings`` mark works and takes precedence over command line and ini options. """ - if default_config == 'ini': - testdir.makeini(""" + if default_config == "ini": + testdir.makeini( + """ [pytest] filterwarnings = always - """) - testdir.makepyfile(""" + """ + ) + testdir.makepyfile( + """ import warnings import pytest @@ -240,19 +269,22 @@ def test_filterwarnings_mark(testdir, default_config): def test_show_warning(): warnings.warn(RuntimeWarning()) - """) - result = testdir.runpytest('-W always' if default_config == 'cmdline' else '') - result.stdout.fnmatch_lines(['*= 1 failed, 2 passed, 1 warnings in *']) + """ + ) + result = testdir.runpytest("-W always" if default_config == "cmdline" else "") + result.stdout.fnmatch_lines(["*= 1 failed, 2 passed, 1 warnings in *"]) def test_non_string_warning_argument(testdir): """Non-str argument passed to warning breaks pytest (#2956)""" - testdir.makepyfile(""" + testdir.makepyfile( + """ import warnings import pytest def test(): warnings.warn(UserWarning(1, u'foo')) - """) - result = testdir.runpytest('-W', 'always') - result.stdout.fnmatch_lines(['*= 1 passed, 1 warnings in *']) + """ + ) + result = testdir.runpytest("-W", "always") + result.stdout.fnmatch_lines(["*= 1 passed, 1 warnings in *"]) diff --git a/tox.ini b/tox.ini index 981945265..2d0fee99c 100644 --- a/tox.ini +++ b/tox.ini @@ -38,15 +38,9 @@ commands = [testenv:linting] skipsdist = True usedevelop = True -basepython = python2.7 -deps = - flake8 - # pygments required by rst-lint - pygments - restructuredtext_lint -commands = - flake8 pytest.py _pytest testing setup.py pytest.py - {envpython} scripts/check-rst.py +basepython = python3.6 +deps = pre-commit +commands = pre-commit run --all-files [testenv:py27-xdist] deps =