You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

325 lines
11KB

  1. """Support for skip/xfail functions and markers."""
  2. import os
  3. import platform
  4. import sys
  5. import traceback
  6. from collections.abc import Mapping
  7. from typing import Generator
  8. from typing import Optional
  9. from typing import Tuple
  10. from typing import Type
  11. import attr
  12. from _pytest.config import Config
  13. from _pytest.config import hookimpl
  14. from _pytest.config.argparsing import Parser
  15. from _pytest.mark.structures import Mark
  16. from _pytest.nodes import Item
  17. from _pytest.outcomes import fail
  18. from _pytest.outcomes import skip
  19. from _pytest.outcomes import xfail
  20. from _pytest.reports import BaseReport
  21. from _pytest.runner import CallInfo
  22. from _pytest.store import StoreKey
  23. def pytest_addoption(parser: Parser) -> None:
  24. group = parser.getgroup("general")
  25. group.addoption(
  26. "--runxfail",
  27. action="store_true",
  28. dest="runxfail",
  29. default=False,
  30. help="report the results of xfail tests as if they were not marked",
  31. )
  32. parser.addini(
  33. "xfail_strict",
  34. "default for the strict parameter of xfail "
  35. "markers when not given explicitly (default: False)",
  36. default=False,
  37. type="bool",
  38. )
  39. def pytest_configure(config: Config) -> None:
  40. if config.option.runxfail:
  41. # yay a hack
  42. import pytest
  43. old = pytest.xfail
  44. config._cleanup.append(lambda: setattr(pytest, "xfail", old))
  45. def nop(*args, **kwargs):
  46. pass
  47. nop.Exception = xfail.Exception # type: ignore[attr-defined]
  48. setattr(pytest, "xfail", nop)
  49. config.addinivalue_line(
  50. "markers",
  51. "skip(reason=None): skip the given test function with an optional reason. "
  52. 'Example: skip(reason="no way of currently testing this") skips the '
  53. "test.",
  54. )
  55. config.addinivalue_line(
  56. "markers",
  57. "skipif(condition, ..., *, reason=...): "
  58. "skip the given test function if any of the conditions evaluate to True. "
  59. "Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. "
  60. "See https://docs.pytest.org/en/stable/reference.html#pytest-mark-skipif",
  61. )
  62. config.addinivalue_line(
  63. "markers",
  64. "xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): "
  65. "mark the test function as an expected failure if any of the conditions "
  66. "evaluate to True. Optionally specify a reason for better reporting "
  67. "and run=False if you don't even want to execute the test function. "
  68. "If only specific exception(s) are expected, you can list them in "
  69. "raises, and if the test fails in other ways, it will be reported as "
  70. "a true failure. See https://docs.pytest.org/en/stable/reference.html#pytest-mark-xfail",
  71. )
  72. def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool, str]:
  73. """Evaluate a single skipif/xfail condition.
  74. If an old-style string condition is given, it is eval()'d, otherwise the
  75. condition is bool()'d. If this fails, an appropriately formatted pytest.fail
  76. is raised.
  77. Returns (result, reason). The reason is only relevant if the result is True.
  78. """
  79. # String condition.
  80. if isinstance(condition, str):
  81. globals_ = {
  82. "os": os,
  83. "sys": sys,
  84. "platform": platform,
  85. "config": item.config,
  86. }
  87. for dictionary in reversed(
  88. item.ihook.pytest_markeval_namespace(config=item.config)
  89. ):
  90. if not isinstance(dictionary, Mapping):
  91. raise ValueError(
  92. "pytest_markeval_namespace() needs to return a dict, got {!r}".format(
  93. dictionary
  94. )
  95. )
  96. globals_.update(dictionary)
  97. if hasattr(item, "obj"):
  98. globals_.update(item.obj.__globals__) # type: ignore[attr-defined]
  99. try:
  100. filename = f"<{mark.name} condition>"
  101. condition_code = compile(condition, filename, "eval")
  102. result = eval(condition_code, globals_)
  103. except SyntaxError as exc:
  104. msglines = [
  105. "Error evaluating %r condition" % mark.name,
  106. " " + condition,
  107. " " + " " * (exc.offset or 0) + "^",
  108. "SyntaxError: invalid syntax",
  109. ]
  110. fail("\n".join(msglines), pytrace=False)
  111. except Exception as exc:
  112. msglines = [
  113. "Error evaluating %r condition" % mark.name,
  114. " " + condition,
  115. *traceback.format_exception_only(type(exc), exc),
  116. ]
  117. fail("\n".join(msglines), pytrace=False)
  118. # Boolean condition.
  119. else:
  120. try:
  121. result = bool(condition)
  122. except Exception as exc:
  123. msglines = [
  124. "Error evaluating %r condition as a boolean" % mark.name,
  125. *traceback.format_exception_only(type(exc), exc),
  126. ]
  127. fail("\n".join(msglines), pytrace=False)
  128. reason = mark.kwargs.get("reason", None)
  129. if reason is None:
  130. if isinstance(condition, str):
  131. reason = "condition: " + condition
  132. else:
  133. # XXX better be checked at collection time
  134. msg = (
  135. "Error evaluating %r: " % mark.name
  136. + "you need to specify reason=STRING when using booleans as conditions."
  137. )
  138. fail(msg, pytrace=False)
  139. return result, reason
  140. @attr.s(slots=True, frozen=True)
  141. class Skip:
  142. """The result of evaluate_skip_marks()."""
  143. reason = attr.ib(type=str)
  144. def evaluate_skip_marks(item: Item) -> Optional[Skip]:
  145. """Evaluate skip and skipif marks on item, returning Skip if triggered."""
  146. for mark in item.iter_markers(name="skipif"):
  147. if "condition" not in mark.kwargs:
  148. conditions = mark.args
  149. else:
  150. conditions = (mark.kwargs["condition"],)
  151. # Unconditional.
  152. if not conditions:
  153. reason = mark.kwargs.get("reason", "")
  154. return Skip(reason)
  155. # If any of the conditions are true.
  156. for condition in conditions:
  157. result, reason = evaluate_condition(item, mark, condition)
  158. if result:
  159. return Skip(reason)
  160. for mark in item.iter_markers(name="skip"):
  161. if "reason" in mark.kwargs:
  162. reason = mark.kwargs["reason"]
  163. elif mark.args:
  164. reason = mark.args[0]
  165. else:
  166. reason = "unconditional skip"
  167. return Skip(reason)
  168. return None
  169. @attr.s(slots=True, frozen=True)
  170. class Xfail:
  171. """The result of evaluate_xfail_marks()."""
  172. reason = attr.ib(type=str)
  173. run = attr.ib(type=bool)
  174. strict = attr.ib(type=bool)
  175. raises = attr.ib(type=Optional[Tuple[Type[BaseException], ...]])
  176. def evaluate_xfail_marks(item: Item) -> Optional[Xfail]:
  177. """Evaluate xfail marks on item, returning Xfail if triggered."""
  178. for mark in item.iter_markers(name="xfail"):
  179. run = mark.kwargs.get("run", True)
  180. strict = mark.kwargs.get("strict", item.config.getini("xfail_strict"))
  181. raises = mark.kwargs.get("raises", None)
  182. if "condition" not in mark.kwargs:
  183. conditions = mark.args
  184. else:
  185. conditions = (mark.kwargs["condition"],)
  186. # Unconditional.
  187. if not conditions:
  188. reason = mark.kwargs.get("reason", "")
  189. return Xfail(reason, run, strict, raises)
  190. # If any of the conditions are true.
  191. for condition in conditions:
  192. result, reason = evaluate_condition(item, mark, condition)
  193. if result:
  194. return Xfail(reason, run, strict, raises)
  195. return None
  196. # Whether skipped due to skip or skipif marks.
  197. skipped_by_mark_key = StoreKey[bool]()
  198. # Saves the xfail mark evaluation. Can be refreshed during call if None.
  199. xfailed_key = StoreKey[Optional[Xfail]]()
  200. unexpectedsuccess_key = StoreKey[str]()
  201. @hookimpl(tryfirst=True)
  202. def pytest_runtest_setup(item: Item) -> None:
  203. skipped = evaluate_skip_marks(item)
  204. item._store[skipped_by_mark_key] = skipped is not None
  205. if skipped:
  206. skip(skipped.reason)
  207. item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
  208. if xfailed and not item.config.option.runxfail and not xfailed.run:
  209. xfail("[NOTRUN] " + xfailed.reason)
  210. @hookimpl(hookwrapper=True)
  211. def pytest_runtest_call(item: Item) -> Generator[None, None, None]:
  212. xfailed = item._store.get(xfailed_key, None)
  213. if xfailed is None:
  214. item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
  215. if xfailed and not item.config.option.runxfail and not xfailed.run:
  216. xfail("[NOTRUN] " + xfailed.reason)
  217. yield
  218. # The test run may have added an xfail mark dynamically.
  219. xfailed = item._store.get(xfailed_key, None)
  220. if xfailed is None:
  221. item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
  222. @hookimpl(hookwrapper=True)
  223. def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
  224. outcome = yield
  225. rep = outcome.get_result()
  226. xfailed = item._store.get(xfailed_key, None)
  227. # unittest special case, see setting of unexpectedsuccess_key
  228. if unexpectedsuccess_key in item._store and rep.when == "call":
  229. reason = item._store[unexpectedsuccess_key]
  230. if reason:
  231. rep.longrepr = f"Unexpected success: {reason}"
  232. else:
  233. rep.longrepr = "Unexpected success"
  234. rep.outcome = "failed"
  235. elif item.config.option.runxfail:
  236. pass # don't interfere
  237. elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception):
  238. assert call.excinfo.value.msg is not None
  239. rep.wasxfail = "reason: " + call.excinfo.value.msg
  240. rep.outcome = "skipped"
  241. elif not rep.skipped and xfailed:
  242. if call.excinfo:
  243. raises = xfailed.raises
  244. if raises is not None and not isinstance(call.excinfo.value, raises):
  245. rep.outcome = "failed"
  246. else:
  247. rep.outcome = "skipped"
  248. rep.wasxfail = xfailed.reason
  249. elif call.when == "call":
  250. if xfailed.strict:
  251. rep.outcome = "failed"
  252. rep.longrepr = "[XPASS(strict)] " + xfailed.reason
  253. else:
  254. rep.outcome = "passed"
  255. rep.wasxfail = xfailed.reason
  256. if (
  257. item._store.get(skipped_by_mark_key, True)
  258. and rep.skipped
  259. and type(rep.longrepr) is tuple
  260. ):
  261. # Skipped by mark.skipif; change the location of the failure
  262. # to point to the item definition, otherwise it will display
  263. # the location of where the skip exception was raised within pytest.
  264. _, _, reason = rep.longrepr
  265. filename, line = item.reportinfo()[:2]
  266. assert line is not None
  267. rep.longrepr = str(filename), line + 1, reason
  268. def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]:
  269. if hasattr(report, "wasxfail"):
  270. if report.skipped:
  271. return "xfailed", "x", "XFAIL"
  272. elif report.passed:
  273. return "xpassed", "X", "XPASS"
  274. return None