Merge pull request #8241 from bluetech/skip-improvements
Minor code improvements in nose, unittest, skipping
This commit is contained in:
commit
c9e9a599fe
|
@ -543,10 +543,8 @@ class FixtureRequest:
|
||||||
self._addfinalizer(finalizer, scope=self.scope)
|
self._addfinalizer(finalizer, scope=self.scope)
|
||||||
|
|
||||||
def _addfinalizer(self, finalizer: Callable[[], object], scope) -> None:
|
def _addfinalizer(self, finalizer: Callable[[], object], scope) -> None:
|
||||||
colitem = self._getscopeitem(scope)
|
item = self._getscopeitem(scope)
|
||||||
self._pyfuncitem.session._setupstate.addfinalizer(
|
item.addfinalizer(finalizer)
|
||||||
finalizer=finalizer, colitem=colitem
|
|
||||||
)
|
|
||||||
|
|
||||||
def applymarker(self, marker: Union[str, MarkDecorator]) -> None:
|
def applymarker(self, marker: Union[str, MarkDecorator]) -> None:
|
||||||
"""Apply a marker to a single test function invocation.
|
"""Apply a marker to a single test function invocation.
|
||||||
|
@ -694,9 +692,7 @@ class FixtureRequest:
|
||||||
self, fixturedef: "FixtureDef[object]", subrequest: "SubRequest"
|
self, fixturedef: "FixtureDef[object]", subrequest: "SubRequest"
|
||||||
) -> None:
|
) -> None:
|
||||||
# If fixture function failed it might have registered finalizers.
|
# If fixture function failed it might have registered finalizers.
|
||||||
self.session._setupstate.addfinalizer(
|
subrequest.node.addfinalizer(lambda: fixturedef.finish(request=subrequest))
|
||||||
functools.partial(fixturedef.finish, request=subrequest), subrequest.node
|
|
||||||
)
|
|
||||||
|
|
||||||
def _check_scope(
|
def _check_scope(
|
||||||
self,
|
self,
|
||||||
|
|
|
@ -1,33 +1,35 @@
|
||||||
"""Run testsuites written for nose."""
|
"""Run testsuites written for nose."""
|
||||||
from _pytest import python
|
|
||||||
from _pytest import unittest
|
|
||||||
from _pytest.config import hookimpl
|
from _pytest.config import hookimpl
|
||||||
from _pytest.fixtures import getfixturemarker
|
from _pytest.fixtures import getfixturemarker
|
||||||
from _pytest.nodes import Item
|
from _pytest.nodes import Item
|
||||||
|
from _pytest.python import Function
|
||||||
|
from _pytest.unittest import TestCaseFunction
|
||||||
|
|
||||||
|
|
||||||
@hookimpl(trylast=True)
|
@hookimpl(trylast=True)
|
||||||
def pytest_runtest_setup(item) -> None:
|
def pytest_runtest_setup(item: Item) -> None:
|
||||||
if is_potential_nosetest(item):
|
if not isinstance(item, Function):
|
||||||
if not call_optional(item.obj, "setup"):
|
return
|
||||||
# Call module level setup if there is no object level one.
|
# Don't do nose style setup/teardown on direct unittest style classes.
|
||||||
call_optional(item.parent.obj, "setup")
|
if isinstance(item, TestCaseFunction):
|
||||||
# XXX This implies we only call teardown when setup worked.
|
return
|
||||||
item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item)
|
|
||||||
|
|
||||||
|
# Capture the narrowed type of item for the teardown closure,
|
||||||
|
# see https://github.com/python/mypy/issues/2608
|
||||||
|
func = item
|
||||||
|
|
||||||
def teardown_nose(item) -> None:
|
if not call_optional(func.obj, "setup"):
|
||||||
if is_potential_nosetest(item):
|
# Call module level setup if there is no object level one.
|
||||||
if not call_optional(item.obj, "teardown"):
|
assert func.parent is not None
|
||||||
call_optional(item.parent.obj, "teardown")
|
call_optional(func.parent.obj, "setup") # type: ignore[attr-defined]
|
||||||
|
|
||||||
|
def teardown_nose() -> None:
|
||||||
|
if not call_optional(func.obj, "teardown"):
|
||||||
|
assert func.parent is not None
|
||||||
|
call_optional(func.parent.obj, "teardown") # type: ignore[attr-defined]
|
||||||
|
|
||||||
def is_potential_nosetest(item: Item) -> bool:
|
# XXX This implies we only call teardown when setup worked.
|
||||||
# Extra check needed since we do not do nose style setup/teardown
|
func.addfinalizer(teardown_nose)
|
||||||
# on direct unittest style classes.
|
|
||||||
return isinstance(item, python.Function) and not isinstance(
|
|
||||||
item, unittest.TestCaseFunction
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def call_optional(obj: object, name: str) -> bool:
|
def call_optional(obj: object, name: str) -> bool:
|
||||||
|
|
|
@ -58,9 +58,14 @@ class Skipped(OutcomeException):
|
||||||
msg: Optional[str] = None,
|
msg: Optional[str] = None,
|
||||||
pytrace: bool = True,
|
pytrace: bool = True,
|
||||||
allow_module_level: bool = False,
|
allow_module_level: bool = False,
|
||||||
|
*,
|
||||||
|
_use_item_location: bool = False,
|
||||||
) -> None:
|
) -> None:
|
||||||
OutcomeException.__init__(self, msg=msg, pytrace=pytrace)
|
OutcomeException.__init__(self, msg=msg, pytrace=pytrace)
|
||||||
self.allow_module_level = allow_module_level
|
self.allow_module_level = allow_module_level
|
||||||
|
# If true, the skip location is reported as the item's location,
|
||||||
|
# instead of the place that raises the exception/calls skip().
|
||||||
|
self._use_item_location = _use_item_location
|
||||||
|
|
||||||
|
|
||||||
class Failed(OutcomeException):
|
class Failed(OutcomeException):
|
||||||
|
|
|
@ -324,7 +324,12 @@ class TestReport(BaseReport):
|
||||||
elif isinstance(excinfo.value, skip.Exception):
|
elif isinstance(excinfo.value, skip.Exception):
|
||||||
outcome = "skipped"
|
outcome = "skipped"
|
||||||
r = excinfo._getreprcrash()
|
r = excinfo._getreprcrash()
|
||||||
longrepr = (str(r.path), r.lineno, r.message)
|
if excinfo.value._use_item_location:
|
||||||
|
filename, line = item.reportinfo()[:2]
|
||||||
|
assert line is not None
|
||||||
|
longrepr = str(filename), line + 1, r.message
|
||||||
|
else:
|
||||||
|
longrepr = (str(r.path), r.lineno, r.message)
|
||||||
else:
|
else:
|
||||||
outcome = "failed"
|
outcome = "failed"
|
||||||
if call.when == "call":
|
if call.when == "call":
|
||||||
|
|
|
@ -230,8 +230,6 @@ def evaluate_xfail_marks(item: Item) -> Optional[Xfail]:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
# Whether skipped due to skip or skipif marks.
|
|
||||||
skipped_by_mark_key = StoreKey[bool]()
|
|
||||||
# Saves the xfail mark evaluation. Can be refreshed during call if None.
|
# Saves the xfail mark evaluation. Can be refreshed during call if None.
|
||||||
xfailed_key = StoreKey[Optional[Xfail]]()
|
xfailed_key = StoreKey[Optional[Xfail]]()
|
||||||
|
|
||||||
|
@ -239,9 +237,8 @@ xfailed_key = StoreKey[Optional[Xfail]]()
|
||||||
@hookimpl(tryfirst=True)
|
@hookimpl(tryfirst=True)
|
||||||
def pytest_runtest_setup(item: Item) -> None:
|
def pytest_runtest_setup(item: Item) -> None:
|
||||||
skipped = evaluate_skip_marks(item)
|
skipped = evaluate_skip_marks(item)
|
||||||
item._store[skipped_by_mark_key] = skipped is not None
|
|
||||||
if skipped:
|
if skipped:
|
||||||
skip(skipped.reason)
|
raise skip.Exception(skipped.reason, _use_item_location=True)
|
||||||
|
|
||||||
item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
|
item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
|
||||||
if xfailed and not item.config.option.runxfail and not xfailed.run:
|
if xfailed and not item.config.option.runxfail and not xfailed.run:
|
||||||
|
@ -292,19 +289,6 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
|
||||||
rep.outcome = "passed"
|
rep.outcome = "passed"
|
||||||
rep.wasxfail = xfailed.reason
|
rep.wasxfail = xfailed.reason
|
||||||
|
|
||||||
if (
|
|
||||||
item._store.get(skipped_by_mark_key, True)
|
|
||||||
and rep.skipped
|
|
||||||
and type(rep.longrepr) is tuple
|
|
||||||
):
|
|
||||||
# Skipped by mark.skipif; change the location of the failure
|
|
||||||
# to point to the item definition, otherwise it will display
|
|
||||||
# the location of where the skip exception was raised within pytest.
|
|
||||||
_, _, reason = rep.longrepr
|
|
||||||
filename, line = item.reportinfo()[:2]
|
|
||||||
assert line is not None
|
|
||||||
rep.longrepr = str(filename), line + 1, reason
|
|
||||||
|
|
||||||
|
|
||||||
def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]:
|
def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]:
|
||||||
if hasattr(report, "wasxfail"):
|
if hasattr(report, "wasxfail"):
|
||||||
|
|
|
@ -29,7 +29,6 @@ from _pytest.python import Class
|
||||||
from _pytest.python import Function
|
from _pytest.python import Function
|
||||||
from _pytest.python import PyCollector
|
from _pytest.python import PyCollector
|
||||||
from _pytest.runner import CallInfo
|
from _pytest.runner import CallInfo
|
||||||
from _pytest.skipping import skipped_by_mark_key
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
import unittest
|
import unittest
|
||||||
|
@ -150,7 +149,7 @@ def _make_xunit_fixture(
|
||||||
def fixture(self, request: FixtureRequest) -> Generator[None, None, None]:
|
def fixture(self, request: FixtureRequest) -> Generator[None, None, None]:
|
||||||
if _is_skipped(self):
|
if _is_skipped(self):
|
||||||
reason = self.__unittest_skip_why__
|
reason = self.__unittest_skip_why__
|
||||||
pytest.skip(reason)
|
raise pytest.skip.Exception(reason, _use_item_location=True)
|
||||||
if setup is not None:
|
if setup is not None:
|
||||||
try:
|
try:
|
||||||
if pass_self:
|
if pass_self:
|
||||||
|
@ -256,9 +255,8 @@ class TestCaseFunction(Function):
|
||||||
|
|
||||||
def addSkip(self, testcase: "unittest.TestCase", reason: str) -> None:
|
def addSkip(self, testcase: "unittest.TestCase", reason: str) -> None:
|
||||||
try:
|
try:
|
||||||
skip(reason)
|
raise pytest.skip.Exception(reason, _use_item_location=True)
|
||||||
except skip.Exception:
|
except skip.Exception:
|
||||||
self._store[skipped_by_mark_key] = True
|
|
||||||
self._addexcinfo(sys.exc_info())
|
self._addexcinfo(sys.exc_info())
|
||||||
|
|
||||||
def addExpectedFailure(
|
def addExpectedFailure(
|
||||||
|
@ -343,6 +341,10 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> None:
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
# Convert unittest.SkipTest to pytest.skip.
|
||||||
|
# This is actually only needed for nose, which reuses unittest.SkipTest for
|
||||||
|
# its own nose.SkipTest. For unittest TestCases, SkipTest is already
|
||||||
|
# handled internally, and doesn't reach here.
|
||||||
unittest = sys.modules.get("unittest")
|
unittest = sys.modules.get("unittest")
|
||||||
if (
|
if (
|
||||||
unittest
|
unittest
|
||||||
|
@ -350,7 +352,6 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> None:
|
||||||
and isinstance(call.excinfo.value, unittest.SkipTest) # type: ignore[attr-defined]
|
and isinstance(call.excinfo.value, unittest.SkipTest) # type: ignore[attr-defined]
|
||||||
):
|
):
|
||||||
excinfo = call.excinfo
|
excinfo = call.excinfo
|
||||||
# Let's substitute the excinfo with a pytest.skip one.
|
|
||||||
call2 = CallInfo[None].from_call(
|
call2 = CallInfo[None].from_call(
|
||||||
lambda: pytest.skip(str(excinfo.value)), call.when
|
lambda: pytest.skip(str(excinfo.value)), call.when
|
||||||
)
|
)
|
||||||
|
|
Loading…
Reference in New Issue