325 lines
11 KiB
Python
325 lines
11 KiB
Python
|
"""Support for skip/xfail functions and markers."""
|
||
|
import os
|
||
|
import platform
|
||
|
import sys
|
||
|
import traceback
|
||
|
from collections.abc import Mapping
|
||
|
from typing import Generator
|
||
|
from typing import Optional
|
||
|
from typing import Tuple
|
||
|
from typing import Type
|
||
|
|
||
|
import attr
|
||
|
|
||
|
from _pytest.config import Config
|
||
|
from _pytest.config import hookimpl
|
||
|
from _pytest.config.argparsing import Parser
|
||
|
from _pytest.mark.structures import Mark
|
||
|
from _pytest.nodes import Item
|
||
|
from _pytest.outcomes import fail
|
||
|
from _pytest.outcomes import skip
|
||
|
from _pytest.outcomes import xfail
|
||
|
from _pytest.reports import BaseReport
|
||
|
from _pytest.runner import CallInfo
|
||
|
from _pytest.store import StoreKey
|
||
|
|
||
|
|
||
|
def pytest_addoption(parser: Parser) -> None:
|
||
|
group = parser.getgroup("general")
|
||
|
group.addoption(
|
||
|
"--runxfail",
|
||
|
action="store_true",
|
||
|
dest="runxfail",
|
||
|
default=False,
|
||
|
help="report the results of xfail tests as if they were not marked",
|
||
|
)
|
||
|
|
||
|
parser.addini(
|
||
|
"xfail_strict",
|
||
|
"default for the strict parameter of xfail "
|
||
|
"markers when not given explicitly (default: False)",
|
||
|
default=False,
|
||
|
type="bool",
|
||
|
)
|
||
|
|
||
|
|
||
|
def pytest_configure(config: Config) -> None:
|
||
|
if config.option.runxfail:
|
||
|
# yay a hack
|
||
|
import pytest
|
||
|
|
||
|
old = pytest.xfail
|
||
|
config._cleanup.append(lambda: setattr(pytest, "xfail", old))
|
||
|
|
||
|
def nop(*args, **kwargs):
|
||
|
pass
|
||
|
|
||
|
nop.Exception = xfail.Exception # type: ignore[attr-defined]
|
||
|
setattr(pytest, "xfail", nop)
|
||
|
|
||
|
config.addinivalue_line(
|
||
|
"markers",
|
||
|
"skip(reason=None): skip the given test function with an optional reason. "
|
||
|
'Example: skip(reason="no way of currently testing this") skips the '
|
||
|
"test.",
|
||
|
)
|
||
|
config.addinivalue_line(
|
||
|
"markers",
|
||
|
"skipif(condition, ..., *, reason=...): "
|
||
|
"skip the given test function if any of the conditions evaluate to True. "
|
||
|
"Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. "
|
||
|
"See https://docs.pytest.org/en/stable/reference.html#pytest-mark-skipif",
|
||
|
)
|
||
|
config.addinivalue_line(
|
||
|
"markers",
|
||
|
"xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): "
|
||
|
"mark the test function as an expected failure if any of the conditions "
|
||
|
"evaluate to True. Optionally specify a reason for better reporting "
|
||
|
"and run=False if you don't even want to execute the test function. "
|
||
|
"If only specific exception(s) are expected, you can list them in "
|
||
|
"raises, and if the test fails in other ways, it will be reported as "
|
||
|
"a true failure. See https://docs.pytest.org/en/stable/reference.html#pytest-mark-xfail",
|
||
|
)
|
||
|
|
||
|
|
||
|
def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool, str]:
|
||
|
"""Evaluate a single skipif/xfail condition.
|
||
|
|
||
|
If an old-style string condition is given, it is eval()'d, otherwise the
|
||
|
condition is bool()'d. If this fails, an appropriately formatted pytest.fail
|
||
|
is raised.
|
||
|
|
||
|
Returns (result, reason). The reason is only relevant if the result is True.
|
||
|
"""
|
||
|
# String condition.
|
||
|
if isinstance(condition, str):
|
||
|
globals_ = {
|
||
|
"os": os,
|
||
|
"sys": sys,
|
||
|
"platform": platform,
|
||
|
"config": item.config,
|
||
|
}
|
||
|
for dictionary in reversed(
|
||
|
item.ihook.pytest_markeval_namespace(config=item.config)
|
||
|
):
|
||
|
if not isinstance(dictionary, Mapping):
|
||
|
raise ValueError(
|
||
|
"pytest_markeval_namespace() needs to return a dict, got {!r}".format(
|
||
|
dictionary
|
||
|
)
|
||
|
)
|
||
|
globals_.update(dictionary)
|
||
|
if hasattr(item, "obj"):
|
||
|
globals_.update(item.obj.__globals__) # type: ignore[attr-defined]
|
||
|
try:
|
||
|
filename = f"<{mark.name} condition>"
|
||
|
condition_code = compile(condition, filename, "eval")
|
||
|
result = eval(condition_code, globals_)
|
||
|
except SyntaxError as exc:
|
||
|
msglines = [
|
||
|
"Error evaluating %r condition" % mark.name,
|
||
|
" " + condition,
|
||
|
" " + " " * (exc.offset or 0) + "^",
|
||
|
"SyntaxError: invalid syntax",
|
||
|
]
|
||
|
fail("\n".join(msglines), pytrace=False)
|
||
|
except Exception as exc:
|
||
|
msglines = [
|
||
|
"Error evaluating %r condition" % mark.name,
|
||
|
" " + condition,
|
||
|
*traceback.format_exception_only(type(exc), exc),
|
||
|
]
|
||
|
fail("\n".join(msglines), pytrace=False)
|
||
|
|
||
|
# Boolean condition.
|
||
|
else:
|
||
|
try:
|
||
|
result = bool(condition)
|
||
|
except Exception as exc:
|
||
|
msglines = [
|
||
|
"Error evaluating %r condition as a boolean" % mark.name,
|
||
|
*traceback.format_exception_only(type(exc), exc),
|
||
|
]
|
||
|
fail("\n".join(msglines), pytrace=False)
|
||
|
|
||
|
reason = mark.kwargs.get("reason", None)
|
||
|
if reason is None:
|
||
|
if isinstance(condition, str):
|
||
|
reason = "condition: " + condition
|
||
|
else:
|
||
|
# XXX better be checked at collection time
|
||
|
msg = (
|
||
|
"Error evaluating %r: " % mark.name
|
||
|
+ "you need to specify reason=STRING when using booleans as conditions."
|
||
|
)
|
||
|
fail(msg, pytrace=False)
|
||
|
|
||
|
return result, reason
|
||
|
|
||
|
|
||
|
@attr.s(slots=True, frozen=True)
|
||
|
class Skip:
|
||
|
"""The result of evaluate_skip_marks()."""
|
||
|
|
||
|
reason = attr.ib(type=str)
|
||
|
|
||
|
|
||
|
def evaluate_skip_marks(item: Item) -> Optional[Skip]:
|
||
|
"""Evaluate skip and skipif marks on item, returning Skip if triggered."""
|
||
|
for mark in item.iter_markers(name="skipif"):
|
||
|
if "condition" not in mark.kwargs:
|
||
|
conditions = mark.args
|
||
|
else:
|
||
|
conditions = (mark.kwargs["condition"],)
|
||
|
|
||
|
# Unconditional.
|
||
|
if not conditions:
|
||
|
reason = mark.kwargs.get("reason", "")
|
||
|
return Skip(reason)
|
||
|
|
||
|
# If any of the conditions are true.
|
||
|
for condition in conditions:
|
||
|
result, reason = evaluate_condition(item, mark, condition)
|
||
|
if result:
|
||
|
return Skip(reason)
|
||
|
|
||
|
for mark in item.iter_markers(name="skip"):
|
||
|
if "reason" in mark.kwargs:
|
||
|
reason = mark.kwargs["reason"]
|
||
|
elif mark.args:
|
||
|
reason = mark.args[0]
|
||
|
else:
|
||
|
reason = "unconditional skip"
|
||
|
return Skip(reason)
|
||
|
|
||
|
return None
|
||
|
|
||
|
|
||
|
@attr.s(slots=True, frozen=True)
|
||
|
class Xfail:
|
||
|
"""The result of evaluate_xfail_marks()."""
|
||
|
|
||
|
reason = attr.ib(type=str)
|
||
|
run = attr.ib(type=bool)
|
||
|
strict = attr.ib(type=bool)
|
||
|
raises = attr.ib(type=Optional[Tuple[Type[BaseException], ...]])
|
||
|
|
||
|
|
||
|
def evaluate_xfail_marks(item: Item) -> Optional[Xfail]:
|
||
|
"""Evaluate xfail marks on item, returning Xfail if triggered."""
|
||
|
for mark in item.iter_markers(name="xfail"):
|
||
|
run = mark.kwargs.get("run", True)
|
||
|
strict = mark.kwargs.get("strict", item.config.getini("xfail_strict"))
|
||
|
raises = mark.kwargs.get("raises", None)
|
||
|
if "condition" not in mark.kwargs:
|
||
|
conditions = mark.args
|
||
|
else:
|
||
|
conditions = (mark.kwargs["condition"],)
|
||
|
|
||
|
# Unconditional.
|
||
|
if not conditions:
|
||
|
reason = mark.kwargs.get("reason", "")
|
||
|
return Xfail(reason, run, strict, raises)
|
||
|
|
||
|
# If any of the conditions are true.
|
||
|
for condition in conditions:
|
||
|
result, reason = evaluate_condition(item, mark, condition)
|
||
|
if result:
|
||
|
return Xfail(reason, run, strict, raises)
|
||
|
|
||
|
return None
|
||
|
|
||
|
|
||
|
# Whether skipped due to skip or skipif marks.
|
||
|
skipped_by_mark_key = StoreKey[bool]()
|
||
|
# Saves the xfail mark evaluation. Can be refreshed during call if None.
|
||
|
xfailed_key = StoreKey[Optional[Xfail]]()
|
||
|
unexpectedsuccess_key = StoreKey[str]()
|
||
|
|
||
|
|
||
|
@hookimpl(tryfirst=True)
|
||
|
def pytest_runtest_setup(item: Item) -> None:
|
||
|
skipped = evaluate_skip_marks(item)
|
||
|
item._store[skipped_by_mark_key] = skipped is not None
|
||
|
if skipped:
|
||
|
skip(skipped.reason)
|
||
|
|
||
|
item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
|
||
|
if xfailed and not item.config.option.runxfail and not xfailed.run:
|
||
|
xfail("[NOTRUN] " + xfailed.reason)
|
||
|
|
||
|
|
||
|
@hookimpl(hookwrapper=True)
|
||
|
def pytest_runtest_call(item: Item) -> Generator[None, None, None]:
|
||
|
xfailed = item._store.get(xfailed_key, None)
|
||
|
if xfailed is None:
|
||
|
item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
|
||
|
|
||
|
if xfailed and not item.config.option.runxfail and not xfailed.run:
|
||
|
xfail("[NOTRUN] " + xfailed.reason)
|
||
|
|
||
|
yield
|
||
|
|
||
|
# The test run may have added an xfail mark dynamically.
|
||
|
xfailed = item._store.get(xfailed_key, None)
|
||
|
if xfailed is None:
|
||
|
item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
|
||
|
|
||
|
|
||
|
@hookimpl(hookwrapper=True)
|
||
|
def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
|
||
|
outcome = yield
|
||
|
rep = outcome.get_result()
|
||
|
xfailed = item._store.get(xfailed_key, None)
|
||
|
# unittest special case, see setting of unexpectedsuccess_key
|
||
|
if unexpectedsuccess_key in item._store and rep.when == "call":
|
||
|
reason = item._store[unexpectedsuccess_key]
|
||
|
if reason:
|
||
|
rep.longrepr = f"Unexpected success: {reason}"
|
||
|
else:
|
||
|
rep.longrepr = "Unexpected success"
|
||
|
rep.outcome = "failed"
|
||
|
elif item.config.option.runxfail:
|
||
|
pass # don't interfere
|
||
|
elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception):
|
||
|
assert call.excinfo.value.msg is not None
|
||
|
rep.wasxfail = "reason: " + call.excinfo.value.msg
|
||
|
rep.outcome = "skipped"
|
||
|
elif not rep.skipped and xfailed:
|
||
|
if call.excinfo:
|
||
|
raises = xfailed.raises
|
||
|
if raises is not None and not isinstance(call.excinfo.value, raises):
|
||
|
rep.outcome = "failed"
|
||
|
else:
|
||
|
rep.outcome = "skipped"
|
||
|
rep.wasxfail = xfailed.reason
|
||
|
elif call.when == "call":
|
||
|
if xfailed.strict:
|
||
|
rep.outcome = "failed"
|
||
|
rep.longrepr = "[XPASS(strict)] " + xfailed.reason
|
||
|
else:
|
||
|
rep.outcome = "passed"
|
||
|
rep.wasxfail = xfailed.reason
|
||
|
|
||
|
if (
|
||
|
item._store.get(skipped_by_mark_key, True)
|
||
|
and rep.skipped
|
||
|
and type(rep.longrepr) is tuple
|
||
|
):
|
||
|
# Skipped by mark.skipif; change the location of the failure
|
||
|
# to point to the item definition, otherwise it will display
|
||
|
# the location of where the skip exception was raised within pytest.
|
||
|
_, _, reason = rep.longrepr
|
||
|
filename, line = item.reportinfo()[:2]
|
||
|
assert line is not None
|
||
|
rep.longrepr = str(filename), line + 1, reason
|
||
|
|
||
|
|
||
|
def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]:
|
||
|
if hasattr(report, "wasxfail"):
|
||
|
if report.skipped:
|
||
|
return "xfailed", "x", "XFAIL"
|
||
|
elif report.passed:
|
||
|
return "xpassed", "X", "XPASS"
|
||
|
return None
|