pytest-isolated 0.4.0__py3-none-any.whl → 0.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pytest_isolated/__init__.py +1 -1
- pytest_isolated/config.py +81 -0
- pytest_isolated/execution.py +504 -0
- pytest_isolated/grouping.py +87 -0
- pytest_isolated/plugin.py +16 -675
- pytest_isolated/reporting.py +173 -0
- {pytest_isolated-0.4.0.dist-info → pytest_isolated-0.4.1.dist-info}/METADATA +22 -8
- pytest_isolated-0.4.1.dist-info/RECORD +13 -0
- pytest_isolated-0.4.0.dist-info/RECORD +0 -9
- {pytest_isolated-0.4.0.dist-info → pytest_isolated-0.4.1.dist-info}/WHEEL +0 -0
- {pytest_isolated-0.4.0.dist-info → pytest_isolated-0.4.1.dist-info}/entry_points.txt +0 -0
- {pytest_isolated-0.4.0.dist-info → pytest_isolated-0.4.1.dist-info}/licenses/LICENSE +0 -0
- {pytest_isolated-0.4.0.dist-info → pytest_isolated-0.4.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
"""Test result reporting for pytest-isolated."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import os
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Any, Literal, TypedDict
|
|
9
|
+
|
|
10
|
+
import pytest
|
|
11
|
+
|
|
12
|
+
from .config import SUBPROC_REPORT_PATH
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class _TestRecord(TypedDict, total=False):
|
|
16
|
+
"""Structure for test phase results from subprocess."""
|
|
17
|
+
|
|
18
|
+
nodeid: str
|
|
19
|
+
when: Literal["setup", "call", "teardown"]
|
|
20
|
+
outcome: Literal["passed", "failed", "skipped"]
|
|
21
|
+
longrepr: str
|
|
22
|
+
duration: float
|
|
23
|
+
stdout: str
|
|
24
|
+
stderr: str
|
|
25
|
+
keywords: list[str]
|
|
26
|
+
sections: list[tuple[str, str]]
|
|
27
|
+
user_properties: list[tuple[str, Any]]
|
|
28
|
+
wasxfail: bool
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _format_crash_reason(returncode: int) -> str:
|
|
32
|
+
"""Format a human-readable crash reason from a return code.
|
|
33
|
+
|
|
34
|
+
On Unix, negative return codes indicate signal numbers.
|
|
35
|
+
On Windows, we report the exit code directly.
|
|
36
|
+
"""
|
|
37
|
+
if returncode < 0:
|
|
38
|
+
# Unix: negative return code is -signal_number
|
|
39
|
+
return f"crashed with signal {-returncode}"
|
|
40
|
+
# Windows or other: positive exit code
|
|
41
|
+
return f"crashed with exit code {returncode}"
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def _format_crash_message(
|
|
45
|
+
returncode: int,
|
|
46
|
+
context: str,
|
|
47
|
+
stderr_text: str = "",
|
|
48
|
+
) -> str:
|
|
49
|
+
"""Build a complete crash error message with optional stderr output.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
returncode: The subprocess return code.
|
|
53
|
+
context: Description of when the crash occurred (e.g., "during test execution").
|
|
54
|
+
stderr_text: Optional captured stderr from the subprocess.
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
A formatted error message suitable for test failure reports.
|
|
58
|
+
"""
|
|
59
|
+
reason = _format_crash_reason(returncode)
|
|
60
|
+
msg = f"Subprocess {reason} {context}."
|
|
61
|
+
if stderr_text:
|
|
62
|
+
msg += f"\n\nSubprocess stderr:\n{stderr_text}"
|
|
63
|
+
return msg
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def pytest_runtest_logreport(report: pytest.TestReport) -> None:
|
|
67
|
+
"""Write test phase results to a JSONL file when running in subprocess mode."""
|
|
68
|
+
path = os.environ.get(SUBPROC_REPORT_PATH)
|
|
69
|
+
if not path:
|
|
70
|
+
return
|
|
71
|
+
|
|
72
|
+
# Capture ALL phases (setup, call, teardown), not just call
|
|
73
|
+
rec: _TestRecord = {
|
|
74
|
+
"nodeid": report.nodeid,
|
|
75
|
+
"when": report.when, # setup, call, or teardown
|
|
76
|
+
"outcome": report.outcome, # passed/failed/skipped
|
|
77
|
+
"longrepr": str(report.longrepr) if report.longrepr else "",
|
|
78
|
+
"duration": getattr(report, "duration", 0.0),
|
|
79
|
+
"stdout": getattr(report, "capstdout", "") or "",
|
|
80
|
+
"stderr": getattr(report, "capstderr", "") or "",
|
|
81
|
+
# Preserve test metadata for proper reporting
|
|
82
|
+
"keywords": list(report.keywords),
|
|
83
|
+
"sections": getattr(report, "sections", []), # captured logs, etc.
|
|
84
|
+
"user_properties": getattr(report, "user_properties", []),
|
|
85
|
+
"wasxfail": hasattr(report, "wasxfail"),
|
|
86
|
+
}
|
|
87
|
+
with Path(path).open("a", encoding="utf-8") as f:
|
|
88
|
+
f.write(json.dumps(rec) + "\n")
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def _emit_report(
|
|
92
|
+
item: pytest.Item,
|
|
93
|
+
*,
|
|
94
|
+
when: Literal["setup", "call", "teardown"],
|
|
95
|
+
outcome: Literal["passed", "failed", "skipped"],
|
|
96
|
+
longrepr: str = "",
|
|
97
|
+
duration: float = 0.0,
|
|
98
|
+
stdout: str = "",
|
|
99
|
+
stderr: str = "",
|
|
100
|
+
sections: list[tuple[str, str]] | None = None,
|
|
101
|
+
user_properties: list[tuple[str, Any]] | None = None,
|
|
102
|
+
wasxfail: bool = False,
|
|
103
|
+
) -> None:
|
|
104
|
+
"""Emit a test report for a specific test phase."""
|
|
105
|
+
call = pytest.CallInfo.from_call(lambda: None, when=when)
|
|
106
|
+
rep = pytest.TestReport.from_item_and_call(item, call)
|
|
107
|
+
rep.outcome = outcome
|
|
108
|
+
rep.duration = duration
|
|
109
|
+
|
|
110
|
+
if user_properties:
|
|
111
|
+
rep.user_properties = user_properties
|
|
112
|
+
|
|
113
|
+
if wasxfail:
|
|
114
|
+
rep.wasxfail = "reason: xfail"
|
|
115
|
+
|
|
116
|
+
# For skipped tests, longrepr needs to be a tuple (path, lineno, reason)
|
|
117
|
+
if outcome == "skipped" and longrepr:
|
|
118
|
+
# Parse longrepr or create simple tuple
|
|
119
|
+
lineno = item.location[1] if item.location[1] is not None else -1
|
|
120
|
+
rep.longrepr = (str(item.fspath), lineno, longrepr) # type: ignore[assignment]
|
|
121
|
+
elif outcome == "failed" and longrepr:
|
|
122
|
+
rep.longrepr = longrepr
|
|
123
|
+
|
|
124
|
+
# Attach captured output to the report when present.
|
|
125
|
+
# Pytest's reporting layer will decide whether to display it based on
|
|
126
|
+
# test outcome, verbosity, and capture settings (--capture, -s, etc.)
|
|
127
|
+
all_sections = list(sections) if sections else []
|
|
128
|
+
if stdout:
|
|
129
|
+
all_sections.append(("Captured stdout call", stdout))
|
|
130
|
+
if stderr:
|
|
131
|
+
all_sections.append(("Captured stderr call", stderr))
|
|
132
|
+
if all_sections:
|
|
133
|
+
rep.sections = all_sections
|
|
134
|
+
|
|
135
|
+
item.ihook.pytest_runtest_logreport(report=rep)
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def _emit_failure_for_items(
|
|
139
|
+
items: list[pytest.Item],
|
|
140
|
+
error_message: str,
|
|
141
|
+
session: pytest.Session,
|
|
142
|
+
) -> None:
|
|
143
|
+
"""Emit synthetic failure reports when subprocess execution fails.
|
|
144
|
+
|
|
145
|
+
When a subprocess crashes, times out, or fails during collection, we emit
|
|
146
|
+
synthetic test phase reports to mark affected tests as failed. We report
|
|
147
|
+
setup="passed" and teardown="passed" (even though these phases never ran)
|
|
148
|
+
to ensure pytest categorizes the test as FAILED rather than ERROR. The actual
|
|
149
|
+
failure is reported in the call phase with the error message.
|
|
150
|
+
|
|
151
|
+
For xfail tests, call is reported as skipped with wasxfail=True to maintain
|
|
152
|
+
proper xfail semantics.
|
|
153
|
+
"""
|
|
154
|
+
for it in items:
|
|
155
|
+
xfail_marker = it.get_closest_marker("xfail")
|
|
156
|
+
_emit_report(it, when="setup", outcome="passed")
|
|
157
|
+
if xfail_marker:
|
|
158
|
+
_emit_report(
|
|
159
|
+
it,
|
|
160
|
+
when="call",
|
|
161
|
+
outcome="skipped",
|
|
162
|
+
longrepr=error_message,
|
|
163
|
+
wasxfail=True,
|
|
164
|
+
)
|
|
165
|
+
else:
|
|
166
|
+
_emit_report(
|
|
167
|
+
it,
|
|
168
|
+
when="call",
|
|
169
|
+
outcome="failed",
|
|
170
|
+
longrepr=error_message,
|
|
171
|
+
)
|
|
172
|
+
session.testsfailed += 1
|
|
173
|
+
_emit_report(it, when="teardown", outcome="passed")
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pytest-isolated
|
|
3
|
-
Version: 0.4.
|
|
3
|
+
Version: 0.4.1
|
|
4
4
|
Summary: Run marked pytest tests in grouped subprocesses (cross-platform).
|
|
5
5
|
Author: pytest-isolated contributors
|
|
6
6
|
License-Expression: MIT
|
|
@@ -20,11 +20,11 @@ Description-Content-Type: text/markdown
|
|
|
20
20
|
License-File: LICENSE
|
|
21
21
|
Requires-Dist: pytest>=7.0
|
|
22
22
|
Provides-Extra: dev
|
|
23
|
-
Requires-Dist: pre-commit; extra == "dev"
|
|
24
23
|
Requires-Dist: build; extra == "dev"
|
|
24
|
+
Requires-Dist: mypy; extra == "dev"
|
|
25
|
+
Requires-Dist: pre-commit; extra == "dev"
|
|
26
|
+
Requires-Dist: pytest-timeout; extra == "dev"
|
|
25
27
|
Requires-Dist: ruff; extra == "dev"
|
|
26
|
-
Provides-Extra: test
|
|
27
|
-
Requires-Dist: pytest-timeout; extra == "test"
|
|
28
28
|
Dynamic: license-file
|
|
29
29
|
|
|
30
30
|
# pytest-isolated
|
|
@@ -39,7 +39,7 @@ A cross-platform pytest plugin that runs marked tests in isolated subprocesses w
|
|
|
39
39
|
- Run tests in fresh Python subprocesses to prevent state pollution
|
|
40
40
|
- Group related tests to run together in the same subprocess
|
|
41
41
|
- Handles crashes, timeouts, and setup/teardown failures
|
|
42
|
-
-
|
|
42
|
+
- Respects pytest's standard output capture settings (`-s`, `--capture`)
|
|
43
43
|
- Works with pytest reporters (JUnit XML, etc.)
|
|
44
44
|
- Configurable timeouts to prevent hanging subprocesses
|
|
45
45
|
- Cross-platform: Linux, macOS, Windows
|
|
@@ -143,6 +143,10 @@ pytest --isolated-timeout=60
|
|
|
143
143
|
# Disable subprocess isolation for debugging
|
|
144
144
|
pytest --no-isolation
|
|
145
145
|
|
|
146
|
+
# Control output capture (standard pytest flags work with isolated tests)
|
|
147
|
+
pytest -s # Disable capture, show all output
|
|
148
|
+
pytest --capture=sys # Capture at sys.stdout/stderr level
|
|
149
|
+
|
|
146
150
|
# Combine with pytest debugger
|
|
147
151
|
pytest --no-isolation --pdb
|
|
148
152
|
```
|
|
@@ -152,7 +156,6 @@ pytest --no-isolation --pdb
|
|
|
152
156
|
```ini
|
|
153
157
|
[pytest]
|
|
154
158
|
isolated_timeout = 300
|
|
155
|
-
isolated_capture_passed = false
|
|
156
159
|
```
|
|
157
160
|
|
|
158
161
|
Or in `pyproject.toml`:
|
|
@@ -160,7 +163,6 @@ Or in `pyproject.toml`:
|
|
|
160
163
|
```toml
|
|
161
164
|
[tool.pytest.ini_options]
|
|
162
165
|
isolated_timeout = "300"
|
|
163
|
-
isolated_capture_passed = false
|
|
164
166
|
```
|
|
165
167
|
|
|
166
168
|
## Use Cases
|
|
@@ -232,6 +234,18 @@ pytest --junitxml=report.xml --durations=10
|
|
|
232
234
|
|
|
233
235
|
## Advanced
|
|
234
236
|
|
|
237
|
+
### Coverage Integration
|
|
238
|
+
|
|
239
|
+
To collect coverage from isolated tests, enable subprocess tracking in `pyproject.toml`:
|
|
240
|
+
|
|
241
|
+
```toml
|
|
242
|
+
[tool.coverage.run]
|
|
243
|
+
parallel = true
|
|
244
|
+
concurrency = ["subprocess"]
|
|
245
|
+
```
|
|
246
|
+
|
|
247
|
+
See the [coverage.py subprocess documentation](https://coverage.readthedocs.io/en/latest/subprocess.html) for details.
|
|
248
|
+
|
|
235
249
|
### Timeout Handling
|
|
236
250
|
|
|
237
251
|
```bash
|
|
@@ -258,7 +272,7 @@ if os.environ.get("PYTEST_RUNNING_IN_SUBPROCESS") == "1":
|
|
|
258
272
|
|
|
259
273
|
**Tests timing out**: Increase timeout with `--isolated-timeout=600`
|
|
260
274
|
|
|
261
|
-
**Missing output**:
|
|
275
|
+
**Missing output**: Use `-s` or `--capture=no` to see output from passing tests, or `-v` for verbose output. pytest-isolated respects pytest's standard capture settings.
|
|
262
276
|
|
|
263
277
|
**Subprocess crashes**: Check for segfaults, OOM, or signal issues. Run with `-v` for details.
|
|
264
278
|
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
pytest_isolated/__init__.py,sha256=Rx5JwthqrahraDXAeDQkxTV9WXyw29nlQCrL-6RC72s,89
|
|
2
|
+
pytest_isolated/config.py,sha256=z8g5Pw5NUKuCuTosRtxR9IkzE6WaHhG2Zh8jJgBqwBU,2383
|
|
3
|
+
pytest_isolated/execution.py,sha256=tzYGwrbtfOAZHEX3mu52Du1iTm7T5fuiHNZRcIx6EWg,16644
|
|
4
|
+
pytest_isolated/grouping.py,sha256=jWYX8cdhqjIXVnq5DrSYImulPDsolPSb1DFGNUGBGvU,3248
|
|
5
|
+
pytest_isolated/plugin.py,sha256=Ieubhh2-CcmCS7IYf5HrGYFGstMyHzjNRpYTrWr-ghs,562
|
|
6
|
+
pytest_isolated/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
7
|
+
pytest_isolated/reporting.py,sha256=VXM8rc1o5-ug2aj02nkv87DtxDs43dejkN-udUPh59U,5869
|
|
8
|
+
pytest_isolated-0.4.1.dist-info/licenses/LICENSE,sha256=WECJyowi685PZSnKcA4Tqs7jukfzbnk7iMPLnm_q4JI,1067
|
|
9
|
+
pytest_isolated-0.4.1.dist-info/METADATA,sha256=S68b4OkZct1vVMJ3RAUml3us44kNox0ivnOILDSzr_k,7259
|
|
10
|
+
pytest_isolated-0.4.1.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
11
|
+
pytest_isolated-0.4.1.dist-info/entry_points.txt,sha256=HgRNPjIGoPBF1pkhma4UtaSwhpOVB8oZRZ0L1FcZXgk,45
|
|
12
|
+
pytest_isolated-0.4.1.dist-info/top_level.txt,sha256=FAtpozhvI-YaiFoZMepi9JAm6e87mW-TM1Ovu5xLOxg,16
|
|
13
|
+
pytest_isolated-0.4.1.dist-info/RECORD,,
|
|
@@ -1,9 +0,0 @@
|
|
|
1
|
-
pytest_isolated/__init__.py,sha256=sBrN76YMZ6pbqIZzb-Yz5SyDPluz0TCQcNJJ3kYSSc0,89
|
|
2
|
-
pytest_isolated/plugin.py,sha256=33KIXSKlaC6Usq8ZrJmy2fjSTyVYj7q85777y7jWAGU,25698
|
|
3
|
-
pytest_isolated/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
|
-
pytest_isolated-0.4.0.dist-info/licenses/LICENSE,sha256=WECJyowi685PZSnKcA4Tqs7jukfzbnk7iMPLnm_q4JI,1067
|
|
5
|
-
pytest_isolated-0.4.0.dist-info/METADATA,sha256=wQ548E9j84pUfBNIYzDtbdcAh0CaGLXCiq7XGkXFI28,6679
|
|
6
|
-
pytest_isolated-0.4.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
7
|
-
pytest_isolated-0.4.0.dist-info/entry_points.txt,sha256=HgRNPjIGoPBF1pkhma4UtaSwhpOVB8oZRZ0L1FcZXgk,45
|
|
8
|
-
pytest_isolated-0.4.0.dist-info/top_level.txt,sha256=FAtpozhvI-YaiFoZMepi9JAm6e87mW-TM1Ovu5xLOxg,16
|
|
9
|
-
pytest_isolated-0.4.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|