Merge commit '56156bb119194014129ac08c4a2c370f0b893104' into merge-master
This commit is contained in:
commit
01d2ff804b
1
AUTHORS
1
AUTHORS
|
@ -73,6 +73,7 @@ Omar Kohl
|
||||||
Pieter Mulder
|
Pieter Mulder
|
||||||
Piotr Banaszkiewicz
|
Piotr Banaszkiewicz
|
||||||
Punyashloka Biswal
|
Punyashloka Biswal
|
||||||
|
Quentin Pradet
|
||||||
Ralf Schmitt
|
Ralf Schmitt
|
||||||
Raphael Pierzina
|
Raphael Pierzina
|
||||||
Roman Bolshakov
|
Roman Bolshakov
|
||||||
|
|
|
@ -77,7 +77,9 @@
|
||||||
|
|
||||||
*
|
*
|
||||||
|
|
||||||
*
|
* Fix ``pytest.mark.skip`` mark when used in strict mode.
|
||||||
|
Thanks `@pquentin`_ for the PR and `@RonnyPfannschmidt`_ for
|
||||||
|
showing how to fix the bug.
|
||||||
|
|
||||||
* Minor improvements and fixes to the documentation.
|
* Minor improvements and fixes to the documentation.
|
||||||
Thanks `@omarkohl`_ for the PR.
|
Thanks `@omarkohl`_ for the PR.
|
||||||
|
@ -237,6 +239,7 @@
|
||||||
.. _@rabbbit: https://github.com/rabbbit
|
.. _@rabbbit: https://github.com/rabbbit
|
||||||
.. _@hackebrot: https://github.com/hackebrot
|
.. _@hackebrot: https://github.com/hackebrot
|
||||||
.. _@omarkohl: https://github.com/omarkohl
|
.. _@omarkohl: https://github.com/omarkohl
|
||||||
|
.. _@pquentin: https://github.com/pquentin
|
||||||
|
|
||||||
2.8.7
|
2.8.7
|
||||||
=====
|
=====
|
||||||
|
|
|
@ -30,6 +30,11 @@ def pytest_configure(config):
|
||||||
nop.Exception = XFailed
|
nop.Exception = XFailed
|
||||||
setattr(pytest, "xfail", nop)
|
setattr(pytest, "xfail", nop)
|
||||||
|
|
||||||
|
config.addinivalue_line("markers",
|
||||||
|
"skip(reason=None): skip the given test function with an optional reason. "
|
||||||
|
"Example: skip(reason=\"no way of currently testing this\") skips the "
|
||||||
|
"test."
|
||||||
|
)
|
||||||
config.addinivalue_line("markers",
|
config.addinivalue_line("markers",
|
||||||
"skipif(condition): skip the given test function if eval(condition) "
|
"skipif(condition): skip the given test function if eval(condition) "
|
||||||
"results in a True value. Evaluation happens within the "
|
"results in a True value. Evaluation happens within the "
|
||||||
|
@ -38,13 +43,13 @@ def pytest_configure(config):
|
||||||
"http://pytest.org/latest/skipping.html"
|
"http://pytest.org/latest/skipping.html"
|
||||||
)
|
)
|
||||||
config.addinivalue_line("markers",
|
config.addinivalue_line("markers",
|
||||||
"xfail(condition, reason=None, run=True, raises=None): mark the the test function "
|
"xfail(condition, reason=None, run=True, raises=None, strict=False): "
|
||||||
"as an expected failure if eval(condition) has a True value. "
|
"mark the the test function as an expected failure if eval(condition) "
|
||||||
"Optionally specify a reason for better reporting and run=False if "
|
"has a True value. Optionally specify a reason for better reporting "
|
||||||
"you don't even want to execute the test function. If only specific "
|
"and run=False if you don't even want to execute the test function. "
|
||||||
"exception(s) are expected, you can list them in raises, and if the test fails "
|
"If only specific exception(s) are expected, you can list them in "
|
||||||
"in other ways, it will be reported as a true failure. "
|
"raises, and if the test fails in other ways, it will be reported as "
|
||||||
"See http://pytest.org/latest/skipping.html"
|
"a true failure. See http://pytest.org/latest/skipping.html"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -158,13 +158,22 @@ it in your setuptools-invocation:
|
||||||
'name_of_plugin = myproject.pluginmodule',
|
'name_of_plugin = myproject.pluginmodule',
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|
||||||
|
# custom PyPI classifier for pytest plugins
|
||||||
|
classifiers=[
|
||||||
|
"Framework :: Pytest",
|
||||||
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
If a package is installed this way, ``pytest`` will load
|
If a package is installed this way, ``pytest`` will load
|
||||||
``myproject.pluginmodule`` as a plugin which can define
|
``myproject.pluginmodule`` as a plugin which can define
|
||||||
`well specified hooks`_.
|
`well specified hooks`_.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
Make sure to include ``Framework :: Pytest`` in your list of
|
||||||
|
`PyPI classifiers <http://python-packaging-user-guide.readthedocs.org/en/latest/distributing/#classifiers>`_
|
||||||
|
to make it easy for users to find your plugin.
|
||||||
|
|
||||||
|
|
||||||
Requiring/Loading plugins in a test module or conftest file
|
Requiring/Loading plugins in a test module or conftest file
|
||||||
|
|
|
@ -2,30 +2,34 @@ import json
|
||||||
import py
|
import py
|
||||||
import textwrap
|
import textwrap
|
||||||
|
|
||||||
issues_url = "http://bitbucket.org/api/1.0/repositories/pytest-dev/pytest/issues"
|
issues_url = "https://api.github.com/repos/pytest-dev/pytest/issues"
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
|
|
||||||
def get_issues():
|
def get_issues():
|
||||||
chunksize = 50
|
|
||||||
start = 0
|
|
||||||
issues = []
|
issues = []
|
||||||
|
url = issues_url
|
||||||
while 1:
|
while 1:
|
||||||
post_data = {"accountname": "pytest-dev",
|
get_data = {"state": "all"}
|
||||||
"repo_slug": "pytest",
|
r = requests.get(url, params=get_data)
|
||||||
"start": start,
|
|
||||||
"limit": chunksize}
|
|
||||||
print ("getting from", start)
|
|
||||||
r = requests.get(issues_url, params=post_data)
|
|
||||||
data = r.json()
|
data = r.json()
|
||||||
issues.extend(data["issues"])
|
if r.status_code == 403:
|
||||||
if start + chunksize >= data["count"]:
|
# API request limit exceeded
|
||||||
|
print(data['message'])
|
||||||
|
exit(1)
|
||||||
|
issues.extend(data)
|
||||||
|
|
||||||
|
# Look for next page
|
||||||
|
links = requests.utils.parse_header_links(r.headers['Link'])
|
||||||
|
another_page = False
|
||||||
|
for link in links:
|
||||||
|
if link['rel'] == 'next':
|
||||||
|
url = link['url']
|
||||||
|
another_page = True
|
||||||
|
if not another_page:
|
||||||
return issues
|
return issues
|
||||||
start += chunksize
|
|
||||||
|
|
||||||
kind2num = "bug enhancement task proposal".split()
|
|
||||||
|
|
||||||
status2num = "new open resolved duplicate invalid wontfix".split()
|
|
||||||
|
|
||||||
def main(args):
|
def main(args):
|
||||||
cachefile = py.path.local(args.cache)
|
cachefile = py.path.local(args.cache)
|
||||||
|
@ -35,33 +39,38 @@ def main(args):
|
||||||
else:
|
else:
|
||||||
issues = json.loads(cachefile.read())
|
issues = json.loads(cachefile.read())
|
||||||
|
|
||||||
open_issues = [x for x in issues
|
open_issues = [x for x in issues if x["state"] == "open"]
|
||||||
if x["status"] in ("new", "open")]
|
|
||||||
|
|
||||||
def kind_and_id(x):
|
open_issues.sort(key=lambda x: x["number"])
|
||||||
kind = x["metadata"]["kind"]
|
|
||||||
return kind2num.index(kind), len(issues)-int(x["local_id"])
|
|
||||||
open_issues.sort(key=kind_and_id)
|
|
||||||
report(open_issues)
|
report(open_issues)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_kind(issue):
|
||||||
|
labels = [l['name'] for l in issue['labels']]
|
||||||
|
for key in ('bug', 'enhancement', 'proposal'):
|
||||||
|
if key in labels:
|
||||||
|
return key
|
||||||
|
return 'issue'
|
||||||
|
|
||||||
|
|
||||||
def report(issues):
|
def report(issues):
|
||||||
for issue in issues:
|
for issue in issues:
|
||||||
metadata = issue["metadata"]
|
|
||||||
priority = issue["priority"]
|
|
||||||
title = issue["title"]
|
title = issue["title"]
|
||||||
content = issue["content"]
|
body = issue["body"]
|
||||||
kind = metadata["kind"]
|
kind = _get_kind(issue)
|
||||||
status = issue["status"]
|
status = issue["state"]
|
||||||
id = issue["local_id"]
|
number = issue["number"]
|
||||||
link = "https://bitbucket.org/pytest-dev/pytest/issue/%s/" % id
|
link = "https://github.com/pytest-dev/pytest/issues/%s/" % number
|
||||||
print("----")
|
print("----")
|
||||||
print(status, kind, link)
|
print(status, kind, link)
|
||||||
print(title)
|
print(title)
|
||||||
#print()
|
#print()
|
||||||
#lines = content.split("\n")
|
#lines = body.split("\n")
|
||||||
#print ("\n".join(lines[:3]))
|
#print ("\n".join(lines[:3]))
|
||||||
#if len(lines) > 3 or len(content) > 240:
|
#if len(lines) > 3 or len(body) > 240:
|
||||||
# print ("...")
|
# print ("...")
|
||||||
|
print("\n\nFound %s open issues" % len(issues))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import argparse
|
import argparse
|
||||||
|
@ -72,3 +81,4 @@ if __name__ == "__main__":
|
||||||
help="cache file")
|
help="cache file")
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
main(args)
|
main(args)
|
||||||
|
|
||||||
|
|
|
@ -539,6 +539,19 @@ class TestSkip:
|
||||||
"*1 passed*2 skipped*",
|
"*1 passed*2 skipped*",
|
||||||
])
|
])
|
||||||
|
|
||||||
|
def test_strict_and_skip(self, testdir):
|
||||||
|
testdir.makepyfile("""
|
||||||
|
import pytest
|
||||||
|
@pytest.mark.skip
|
||||||
|
def test_hello():
|
||||||
|
pass
|
||||||
|
""")
|
||||||
|
result = testdir.runpytest("-rs --strict")
|
||||||
|
result.stdout.fnmatch_lines([
|
||||||
|
"*unconditional skip*",
|
||||||
|
"*1 skipped*",
|
||||||
|
])
|
||||||
|
|
||||||
class TestSkipif:
|
class TestSkipif:
|
||||||
def test_skipif_conditional(self, testdir):
|
def test_skipif_conditional(self, testdir):
|
||||||
item = testdir.getitem("""
|
item = testdir.getitem("""
|
||||||
|
@ -812,7 +825,7 @@ def test_default_markers(testdir):
|
||||||
result = testdir.runpytest("--markers")
|
result = testdir.runpytest("--markers")
|
||||||
result.stdout.fnmatch_lines([
|
result.stdout.fnmatch_lines([
|
||||||
"*skipif(*condition)*skip*",
|
"*skipif(*condition)*skip*",
|
||||||
"*xfail(*condition, reason=None, run=True, raises=None)*expected failure*",
|
"*xfail(*condition, reason=None, run=True, raises=None, strict=False)*expected failure*",
|
||||||
])
|
])
|
||||||
|
|
||||||
def test_xfail_test_setup_exception(testdir):
|
def test_xfail_test_setup_exception(testdir):
|
||||||
|
|
Loading…
Reference in New Issue