pytest-isolated 0.3.0__py3-none-any.whl → 0.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pytest_isolated/__init__.py +1 -1
- pytest_isolated/config.py +81 -0
- pytest_isolated/execution.py +504 -0
- pytest_isolated/grouping.py +87 -0
- pytest_isolated/plugin.py +15 -410
- pytest_isolated/reporting.py +173 -0
- {pytest_isolated-0.3.0.dist-info → pytest_isolated-0.4.1.dist-info}/METADATA +72 -10
- pytest_isolated-0.4.1.dist-info/RECORD +13 -0
- pytest_isolated-0.3.0.dist-info/RECORD +0 -9
- {pytest_isolated-0.3.0.dist-info → pytest_isolated-0.4.1.dist-info}/WHEEL +0 -0
- {pytest_isolated-0.3.0.dist-info → pytest_isolated-0.4.1.dist-info}/entry_points.txt +0 -0
- {pytest_isolated-0.3.0.dist-info → pytest_isolated-0.4.1.dist-info}/licenses/LICENSE +0 -0
- {pytest_isolated-0.3.0.dist-info → pytest_isolated-0.4.1.dist-info}/top_level.txt +0 -0
pytest_isolated/__init__.py
CHANGED
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
"""Configuration and CLI options for pytest-isolated."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Final
|
|
6
|
+
|
|
7
|
+
import pytest
|
|
8
|
+
|
|
9
|
+
# Guard to prevent infinite recursion (parent spawns child; child must not spawn again)
|
|
10
|
+
SUBPROC_ENV: Final = "PYTEST_RUNNING_IN_SUBPROCESS"
|
|
11
|
+
|
|
12
|
+
# Parent tells child where to write JSONL records per test call
|
|
13
|
+
SUBPROC_REPORT_PATH: Final = "PYTEST_SUBPROCESS_REPORT_PATH"
|
|
14
|
+
|
|
15
|
+
# Default timeout for isolated test groups (seconds)
|
|
16
|
+
DEFAULT_TIMEOUT: Final = 300
|
|
17
|
+
|
|
18
|
+
# Config attribute names (stored on pytest.Config object)
|
|
19
|
+
CONFIG_ATTR_GROUPS: Final = "_subprocess_groups"
|
|
20
|
+
CONFIG_ATTR_GROUP_TIMEOUTS: Final = "_subprocess_group_timeouts"
|
|
21
|
+
|
|
22
|
+
# Options that should be forwarded to subprocess (flags without values)
|
|
23
|
+
_FORWARD_FLAGS: Final = {
|
|
24
|
+
"-v",
|
|
25
|
+
"--verbose",
|
|
26
|
+
"-q",
|
|
27
|
+
"--quiet",
|
|
28
|
+
"-s", # disable output capturing
|
|
29
|
+
"-l",
|
|
30
|
+
"--showlocals",
|
|
31
|
+
"--strict-markers",
|
|
32
|
+
"--strict-config",
|
|
33
|
+
"-x", # exit on first failure
|
|
34
|
+
"--exitfirst",
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
# Options that should be forwarded to subprocess (options with values)
|
|
38
|
+
_FORWARD_OPTIONS_WITH_VALUE: Final = {
|
|
39
|
+
"--tb", # traceback style
|
|
40
|
+
"-r", # show extra test summary info
|
|
41
|
+
"--capture", # output capture method (fd, sys, no, tee-sys)
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def pytest_addoption(parser: pytest.Parser) -> None:
|
|
46
|
+
group = parser.getgroup("isolated")
|
|
47
|
+
group.addoption(
|
|
48
|
+
"--isolated",
|
|
49
|
+
action="store_true",
|
|
50
|
+
default=False,
|
|
51
|
+
help="Run all tests in isolated subprocesses",
|
|
52
|
+
)
|
|
53
|
+
group.addoption(
|
|
54
|
+
"--isolated-timeout",
|
|
55
|
+
type=int,
|
|
56
|
+
default=None,
|
|
57
|
+
help=(
|
|
58
|
+
f"Timeout in seconds for isolated test groups (default: {DEFAULT_TIMEOUT})"
|
|
59
|
+
),
|
|
60
|
+
)
|
|
61
|
+
group.addoption(
|
|
62
|
+
"--no-isolation",
|
|
63
|
+
action="store_true",
|
|
64
|
+
default=False,
|
|
65
|
+
help="Disable subprocess isolation (for debugging)",
|
|
66
|
+
)
|
|
67
|
+
parser.addini(
|
|
68
|
+
"isolated_timeout",
|
|
69
|
+
type="string",
|
|
70
|
+
default=str(DEFAULT_TIMEOUT),
|
|
71
|
+
help="Default timeout in seconds for isolated test groups",
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def pytest_configure(config: pytest.Config) -> None:
|
|
76
|
+
config.addinivalue_line(
|
|
77
|
+
"markers",
|
|
78
|
+
"isolated(group=None, timeout=None): run this test in a grouped "
|
|
79
|
+
"fresh Python subprocess; tests with the same group run together in "
|
|
80
|
+
"one subprocess. timeout (seconds) overrides global --isolated-timeout.",
|
|
81
|
+
)
|
|
@@ -0,0 +1,504 @@
|
|
|
1
|
+
"""Subprocess execution for pytest-isolated."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import contextlib
|
|
6
|
+
import json
|
|
7
|
+
import os
|
|
8
|
+
import subprocess
|
|
9
|
+
import sys
|
|
10
|
+
import tempfile
|
|
11
|
+
import time
|
|
12
|
+
from collections import OrderedDict
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from typing import Literal, NamedTuple, TypeAlias, cast
|
|
15
|
+
|
|
16
|
+
import pytest
|
|
17
|
+
|
|
18
|
+
from .config import (
|
|
19
|
+
_FORWARD_FLAGS,
|
|
20
|
+
_FORWARD_OPTIONS_WITH_VALUE,
|
|
21
|
+
CONFIG_ATTR_GROUP_TIMEOUTS,
|
|
22
|
+
CONFIG_ATTR_GROUPS,
|
|
23
|
+
DEFAULT_TIMEOUT,
|
|
24
|
+
SUBPROC_ENV,
|
|
25
|
+
SUBPROC_REPORT_PATH,
|
|
26
|
+
)
|
|
27
|
+
from .reporting import (
|
|
28
|
+
_emit_failure_for_items,
|
|
29
|
+
_emit_report,
|
|
30
|
+
_format_crash_message,
|
|
31
|
+
_TestRecord,
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
# Type aliases for clarity
|
|
35
|
+
Phase: TypeAlias = Literal["setup", "call", "teardown"]
|
|
36
|
+
TestResults: TypeAlias = dict[str, dict[str, _TestRecord]]
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class SubprocessResult(NamedTuple):
|
|
40
|
+
"""Result from running a subprocess."""
|
|
41
|
+
|
|
42
|
+
returncode: int
|
|
43
|
+
stderr: bytes
|
|
44
|
+
timed_out: bool
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class ExecutionContext(NamedTuple):
|
|
48
|
+
"""Context for test execution."""
|
|
49
|
+
|
|
50
|
+
session: pytest.Session
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def _build_forwarded_args(config: pytest.Config) -> list[str]:
|
|
54
|
+
"""Build list of pytest arguments to forward to subprocess."""
|
|
55
|
+
forwarded_args: list[str] = []
|
|
56
|
+
i = 0
|
|
57
|
+
args = config.invocation_params.args
|
|
58
|
+
while i < len(args):
|
|
59
|
+
arg = args[i]
|
|
60
|
+
|
|
61
|
+
# Forward only explicitly allowed options
|
|
62
|
+
if arg in _FORWARD_FLAGS:
|
|
63
|
+
forwarded_args.append(arg)
|
|
64
|
+
i += 1
|
|
65
|
+
elif arg in _FORWARD_OPTIONS_WITH_VALUE:
|
|
66
|
+
forwarded_args.append(arg)
|
|
67
|
+
# Next arg is the value - forward it too
|
|
68
|
+
if i + 1 < len(args):
|
|
69
|
+
forwarded_args.append(args[i + 1])
|
|
70
|
+
i += 2
|
|
71
|
+
else:
|
|
72
|
+
i += 1
|
|
73
|
+
elif arg.startswith(tuple(f"{opt}=" for opt in _FORWARD_OPTIONS_WITH_VALUE)):
|
|
74
|
+
forwarded_args.append(arg)
|
|
75
|
+
i += 1
|
|
76
|
+
else:
|
|
77
|
+
# Skip everything else (positional args, test paths,
|
|
78
|
+
# unknown options)
|
|
79
|
+
i += 1
|
|
80
|
+
return forwarded_args
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def _run_subprocess(
|
|
84
|
+
cmd: list[str],
|
|
85
|
+
env: dict[str, str],
|
|
86
|
+
timeout: int,
|
|
87
|
+
cwd: str | None,
|
|
88
|
+
) -> SubprocessResult:
|
|
89
|
+
"""Run subprocess and return result."""
|
|
90
|
+
try:
|
|
91
|
+
proc = subprocess.run(
|
|
92
|
+
cmd,
|
|
93
|
+
env=env,
|
|
94
|
+
timeout=timeout,
|
|
95
|
+
capture_output=True,
|
|
96
|
+
check=False,
|
|
97
|
+
cwd=cwd,
|
|
98
|
+
)
|
|
99
|
+
return SubprocessResult(
|
|
100
|
+
returncode=proc.returncode,
|
|
101
|
+
stderr=proc.stderr or b"",
|
|
102
|
+
timed_out=False,
|
|
103
|
+
)
|
|
104
|
+
except subprocess.TimeoutExpired as exc:
|
|
105
|
+
return SubprocessResult(
|
|
106
|
+
returncode=-1,
|
|
107
|
+
stderr=exc.stderr or b"",
|
|
108
|
+
timed_out=True,
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def _parse_results(report_path: str) -> TestResults:
|
|
113
|
+
"""Parse JSONL results file into dict[nodeid][phase] structure."""
|
|
114
|
+
results: TestResults = {}
|
|
115
|
+
report_file = Path(report_path)
|
|
116
|
+
if report_file.exists():
|
|
117
|
+
with report_file.open(encoding="utf-8") as f:
|
|
118
|
+
for line in f:
|
|
119
|
+
file_line = line.strip()
|
|
120
|
+
if not file_line:
|
|
121
|
+
continue
|
|
122
|
+
rec = cast(_TestRecord, json.loads(file_line))
|
|
123
|
+
nodeid = rec["nodeid"]
|
|
124
|
+
when = rec["when"]
|
|
125
|
+
|
|
126
|
+
if nodeid not in results:
|
|
127
|
+
results[nodeid] = {}
|
|
128
|
+
results[nodeid][when] = rec
|
|
129
|
+
with contextlib.suppress(OSError):
|
|
130
|
+
report_file.unlink()
|
|
131
|
+
return results
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def _handle_xfail_crash(
|
|
135
|
+
returncode: int,
|
|
136
|
+
results: TestResults,
|
|
137
|
+
group_items: list[pytest.Item],
|
|
138
|
+
ctx: ExecutionContext,
|
|
139
|
+
) -> bool:
|
|
140
|
+
"""Check if crash should be treated as xfail. Returns True if handled."""
|
|
141
|
+
if returncode < 0 and results:
|
|
142
|
+
# Check if all tests in this group are marked xfail
|
|
143
|
+
all_xfail = all(it.get_closest_marker("xfail") for it in group_items)
|
|
144
|
+
if all_xfail:
|
|
145
|
+
# Override any results from subprocess - crash is the expected outcome
|
|
146
|
+
msg = (
|
|
147
|
+
f"Subprocess crashed with signal {-returncode} "
|
|
148
|
+
f"(expected for xfail test)"
|
|
149
|
+
)
|
|
150
|
+
_emit_failure_for_items(group_items, msg, ctx.session)
|
|
151
|
+
return True
|
|
152
|
+
return False
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def _handle_timeout(
|
|
156
|
+
timed_out: bool,
|
|
157
|
+
group_name: str,
|
|
158
|
+
group_timeout: int,
|
|
159
|
+
execution_time: float,
|
|
160
|
+
group_items: list[pytest.Item],
|
|
161
|
+
ctx: ExecutionContext,
|
|
162
|
+
) -> bool:
|
|
163
|
+
"""Handle subprocess timeout. Returns True if handled."""
|
|
164
|
+
if timed_out:
|
|
165
|
+
msg = (
|
|
166
|
+
f"Subprocess group={group_name!r} timed out after {group_timeout} "
|
|
167
|
+
f"seconds (execution time: {execution_time:.2f}s). "
|
|
168
|
+
f"Increase timeout with --isolated-timeout, isolated_timeout ini, "
|
|
169
|
+
f"or @pytest.mark.isolated(timeout=N)."
|
|
170
|
+
)
|
|
171
|
+
_emit_failure_for_items(group_items, msg, ctx.session)
|
|
172
|
+
return True
|
|
173
|
+
return False
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
def _handle_collection_crash(
|
|
177
|
+
returncode: int,
|
|
178
|
+
results: TestResults,
|
|
179
|
+
group_name: str,
|
|
180
|
+
proc_stderr: bytes,
|
|
181
|
+
group_items: list[pytest.Item],
|
|
182
|
+
ctx: ExecutionContext,
|
|
183
|
+
) -> bool:
|
|
184
|
+
"""Handle crash during collection (no results produced). Returns True if handled."""
|
|
185
|
+
if not results:
|
|
186
|
+
stderr_text = proc_stderr.decode("utf-8", errors="replace").strip()
|
|
187
|
+
msg = (
|
|
188
|
+
f"Subprocess group={group_name!r} exited with code {returncode} "
|
|
189
|
+
f"and produced no per-test report. The subprocess may have "
|
|
190
|
+
f"crashed during collection."
|
|
191
|
+
)
|
|
192
|
+
if stderr_text:
|
|
193
|
+
msg += f"\n\nSubprocess stderr:\n{stderr_text}"
|
|
194
|
+
_emit_failure_for_items(group_items, msg, ctx.session)
|
|
195
|
+
return True
|
|
196
|
+
return False
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
def _detect_crashed_tests(
|
|
200
|
+
group_items: list[pytest.Item],
|
|
201
|
+
results: TestResults,
|
|
202
|
+
) -> tuple[list[pytest.Item], list[pytest.Item]]:
|
|
203
|
+
"""Detect crashed and not-run tests. Returns (crashed_items, not_run_items)."""
|
|
204
|
+
crashed_items: list[pytest.Item] = []
|
|
205
|
+
|
|
206
|
+
for it in group_items:
|
|
207
|
+
node_results = results.get(it.nodeid, {})
|
|
208
|
+
# Test started (setup passed) but crashed before call completed.
|
|
209
|
+
# If setup was skipped or failed, no call phase is expected.
|
|
210
|
+
if node_results and "call" not in node_results:
|
|
211
|
+
setup_result = node_results.get("setup", {})
|
|
212
|
+
setup_outcome = setup_result.get("outcome", "")
|
|
213
|
+
if setup_outcome == "passed":
|
|
214
|
+
crashed_items.append(it)
|
|
215
|
+
|
|
216
|
+
# If we detected crashed tests, also find tests that never ran
|
|
217
|
+
# (they come after the crashing test in the same group)
|
|
218
|
+
not_run_items: list[pytest.Item] = []
|
|
219
|
+
if crashed_items:
|
|
220
|
+
for it in group_items:
|
|
221
|
+
node_results = results.get(it.nodeid, {})
|
|
222
|
+
# Test never started (no results at all)
|
|
223
|
+
if not node_results:
|
|
224
|
+
not_run_items.append(it)
|
|
225
|
+
|
|
226
|
+
return crashed_items, not_run_items
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
def _handle_mid_test_crash(
|
|
230
|
+
returncode: int,
|
|
231
|
+
proc_stderr: bytes,
|
|
232
|
+
group_items: list[pytest.Item],
|
|
233
|
+
results: TestResults,
|
|
234
|
+
ctx: ExecutionContext,
|
|
235
|
+
) -> bool:
|
|
236
|
+
"""Handle crash during test execution. Returns True if handled."""
|
|
237
|
+
crashed_items, not_run_items = _detect_crashed_tests(group_items, results)
|
|
238
|
+
|
|
239
|
+
if not (crashed_items or not_run_items):
|
|
240
|
+
return False
|
|
241
|
+
|
|
242
|
+
stderr_text = proc_stderr.decode("utf-8", errors="replace").strip()
|
|
243
|
+
|
|
244
|
+
# Emit failures for crashed tests
|
|
245
|
+
if crashed_items:
|
|
246
|
+
crash_msg = _format_crash_message(
|
|
247
|
+
returncode, "during test execution", stderr_text
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
for it in crashed_items:
|
|
251
|
+
node_results = results.get(it.nodeid, {})
|
|
252
|
+
# Emit setup phase if it was recorded
|
|
253
|
+
if "setup" in node_results:
|
|
254
|
+
rec = node_results["setup"]
|
|
255
|
+
_emit_report(
|
|
256
|
+
it,
|
|
257
|
+
when="setup",
|
|
258
|
+
outcome=rec["outcome"],
|
|
259
|
+
longrepr=rec.get("longrepr", ""),
|
|
260
|
+
duration=rec.get("duration", 0.0),
|
|
261
|
+
)
|
|
262
|
+
else:
|
|
263
|
+
_emit_report(
|
|
264
|
+
it,
|
|
265
|
+
when="setup",
|
|
266
|
+
outcome="passed",
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
# Emit call phase as failed with crash info
|
|
270
|
+
xfail_marker = it.get_closest_marker("xfail")
|
|
271
|
+
if xfail_marker:
|
|
272
|
+
_emit_report(
|
|
273
|
+
it,
|
|
274
|
+
when="call",
|
|
275
|
+
outcome="skipped",
|
|
276
|
+
longrepr=crash_msg,
|
|
277
|
+
wasxfail=True,
|
|
278
|
+
)
|
|
279
|
+
else:
|
|
280
|
+
_emit_report(
|
|
281
|
+
it,
|
|
282
|
+
when="call",
|
|
283
|
+
outcome="failed",
|
|
284
|
+
longrepr=crash_msg,
|
|
285
|
+
)
|
|
286
|
+
ctx.session.testsfailed += 1
|
|
287
|
+
|
|
288
|
+
_emit_report(
|
|
289
|
+
it,
|
|
290
|
+
when="teardown",
|
|
291
|
+
outcome="passed",
|
|
292
|
+
)
|
|
293
|
+
# Remove from results so they're not processed again
|
|
294
|
+
results.pop(it.nodeid, None)
|
|
295
|
+
|
|
296
|
+
# Emit failures for tests that never ran due to earlier crash
|
|
297
|
+
if not_run_items:
|
|
298
|
+
not_run_msg = _format_crash_message(
|
|
299
|
+
returncode, "during earlier test execution", stderr_text
|
|
300
|
+
)
|
|
301
|
+
not_run_msg = f"Test did not run - {not_run_msg}"
|
|
302
|
+
_emit_failure_for_items(not_run_items, not_run_msg, ctx.session)
|
|
303
|
+
for it in not_run_items:
|
|
304
|
+
results.pop(it.nodeid, None)
|
|
305
|
+
|
|
306
|
+
return True
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
def _emit_all_results(
|
|
310
|
+
group_items: list[pytest.Item],
|
|
311
|
+
results: TestResults,
|
|
312
|
+
ctx: ExecutionContext,
|
|
313
|
+
) -> None:
|
|
314
|
+
"""Emit per-test results for all test phases."""
|
|
315
|
+
phases: list[Phase] = ["setup", "call", "teardown"]
|
|
316
|
+
|
|
317
|
+
for it in group_items:
|
|
318
|
+
node_results = results.get(it.nodeid, {})
|
|
319
|
+
|
|
320
|
+
# Skip tests that were already handled by crash detection
|
|
321
|
+
if it.nodeid not in results:
|
|
322
|
+
continue
|
|
323
|
+
|
|
324
|
+
# Check if setup passed (to determine if missing call is expected)
|
|
325
|
+
setup_passed = (
|
|
326
|
+
"setup" in node_results and node_results["setup"]["outcome"] == "passed"
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
# Emit setup, call, teardown in order
|
|
330
|
+
for when in phases:
|
|
331
|
+
if when not in node_results:
|
|
332
|
+
# If missing call phase AND setup passed, emit a failure
|
|
333
|
+
# (crash detection should handle most cases, but this
|
|
334
|
+
# is a safety net for unexpected situations)
|
|
335
|
+
# If setup failed, missing call is expected (pytest skips call)
|
|
336
|
+
if when == "call" and setup_passed:
|
|
337
|
+
msg = f"Missing 'call' phase result from subprocess for {it.nodeid}"
|
|
338
|
+
_emit_report(
|
|
339
|
+
it,
|
|
340
|
+
when="call",
|
|
341
|
+
outcome="failed",
|
|
342
|
+
longrepr=msg,
|
|
343
|
+
)
|
|
344
|
+
ctx.session.testsfailed += 1
|
|
345
|
+
continue
|
|
346
|
+
|
|
347
|
+
rec = node_results[when]
|
|
348
|
+
_emit_report(
|
|
349
|
+
it,
|
|
350
|
+
when=when,
|
|
351
|
+
outcome=rec.get("outcome", "failed"), # type: ignore[arg-type]
|
|
352
|
+
longrepr=rec.get("longrepr", ""),
|
|
353
|
+
duration=rec.get("duration", 0.0),
|
|
354
|
+
stdout=rec.get("stdout", ""),
|
|
355
|
+
stderr=rec.get("stderr", ""),
|
|
356
|
+
sections=rec.get("sections"),
|
|
357
|
+
user_properties=rec.get("user_properties"),
|
|
358
|
+
wasxfail=rec.get("wasxfail", False),
|
|
359
|
+
)
|
|
360
|
+
|
|
361
|
+
if when == "call" and rec["outcome"] == "failed":
|
|
362
|
+
ctx.session.testsfailed += 1
|
|
363
|
+
|
|
364
|
+
|
|
365
|
+
def pytest_runtestloop(session: pytest.Session) -> int | None:
|
|
366
|
+
"""Execute isolated test groups in subprocesses and remaining tests in-process.
|
|
367
|
+
|
|
368
|
+
Any subprocess timeouts are caught and reported as test failures; the
|
|
369
|
+
subprocess.TimeoutExpired exception is not propagated to the caller.
|
|
370
|
+
"""
|
|
371
|
+
if os.environ.get(SUBPROC_ENV) == "1":
|
|
372
|
+
return None # child runs the normal loop
|
|
373
|
+
|
|
374
|
+
config = session.config
|
|
375
|
+
groups = getattr(config, CONFIG_ATTR_GROUPS, OrderedDict())
|
|
376
|
+
group_timeouts: dict[str, int | None] = getattr(
|
|
377
|
+
config, CONFIG_ATTR_GROUP_TIMEOUTS, {}
|
|
378
|
+
)
|
|
379
|
+
|
|
380
|
+
# session.items contains the final filtered and ordered
|
|
381
|
+
# list (after -k, -m, --ff, etc.)
|
|
382
|
+
# We need to:
|
|
383
|
+
# 1. Filter groups to only include items in session.items
|
|
384
|
+
# 2. Preserve the order from session.items (important for --ff, --nf, ...)
|
|
385
|
+
|
|
386
|
+
# Build a mapping from nodeid to (item, group_name) for isolated tests
|
|
387
|
+
nodeid_to_group: dict[str, tuple[pytest.Item, str]] = {}
|
|
388
|
+
for group_name, group_items in groups.items():
|
|
389
|
+
for it in group_items:
|
|
390
|
+
nodeid_to_group[it.nodeid] = (it, group_name)
|
|
391
|
+
|
|
392
|
+
# Rebuild groups in session.items order
|
|
393
|
+
filtered_groups: OrderedDict[str, list[pytest.Item]] = OrderedDict()
|
|
394
|
+
isolated_nodeids: set[str] = set()
|
|
395
|
+
|
|
396
|
+
for it in session.items:
|
|
397
|
+
if it.nodeid in nodeid_to_group:
|
|
398
|
+
_, group_name = nodeid_to_group[it.nodeid]
|
|
399
|
+
if group_name not in filtered_groups:
|
|
400
|
+
filtered_groups[group_name] = []
|
|
401
|
+
filtered_groups[group_name].append(it)
|
|
402
|
+
isolated_nodeids.add(it.nodeid)
|
|
403
|
+
|
|
404
|
+
groups = filtered_groups
|
|
405
|
+
|
|
406
|
+
# Normal items are those in session.items but not in isolated groups
|
|
407
|
+
normal_items = [it for it in session.items if it.nodeid not in isolated_nodeids]
|
|
408
|
+
|
|
409
|
+
# Get default timeout configuration
|
|
410
|
+
timeout_opt = config.getoption("isolated_timeout", None)
|
|
411
|
+
timeout_ini = config.getini("isolated_timeout")
|
|
412
|
+
default_timeout = timeout_opt or (
|
|
413
|
+
int(timeout_ini) if timeout_ini else DEFAULT_TIMEOUT
|
|
414
|
+
)
|
|
415
|
+
|
|
416
|
+
# Create execution context
|
|
417
|
+
ctx = ExecutionContext(session=session)
|
|
418
|
+
|
|
419
|
+
# Run groups
|
|
420
|
+
for group_name, group_items in groups.items():
|
|
421
|
+
nodeids = [it.nodeid for it in group_items]
|
|
422
|
+
|
|
423
|
+
# Get timeout for this group (marker timeout > global timeout)
|
|
424
|
+
group_timeout = group_timeouts.get(group_name) or default_timeout
|
|
425
|
+
|
|
426
|
+
# file where the child will append JSONL records
|
|
427
|
+
with tempfile.NamedTemporaryFile(
|
|
428
|
+
prefix="pytest-subproc-", suffix=".jsonl", delete=False
|
|
429
|
+
) as tf:
|
|
430
|
+
report_path = tf.name
|
|
431
|
+
|
|
432
|
+
env = os.environ.copy()
|
|
433
|
+
env[SUBPROC_ENV] = "1"
|
|
434
|
+
env[SUBPROC_REPORT_PATH] = report_path
|
|
435
|
+
|
|
436
|
+
# Build forwarded args and subprocess command
|
|
437
|
+
forwarded_args = _build_forwarded_args(config)
|
|
438
|
+
cmd = [sys.executable, "-m", "pytest"]
|
|
439
|
+
cmd.extend(forwarded_args)
|
|
440
|
+
|
|
441
|
+
# Pass rootdir to subprocess to ensure it uses the same project root
|
|
442
|
+
if config.rootpath:
|
|
443
|
+
cmd.extend(["--rootdir", str(config.rootpath)])
|
|
444
|
+
|
|
445
|
+
# Add the test nodeids
|
|
446
|
+
cmd.extend(nodeids)
|
|
447
|
+
|
|
448
|
+
# Determine the working directory for the subprocess
|
|
449
|
+
# Use rootpath if set, otherwise use invocation directory
|
|
450
|
+
# This ensures nodeids (which are relative to rootpath) can be resolved
|
|
451
|
+
if config.rootpath:
|
|
452
|
+
subprocess_cwd = str(config.rootpath)
|
|
453
|
+
else:
|
|
454
|
+
subprocess_cwd = str(config.invocation_params.dir)
|
|
455
|
+
|
|
456
|
+
# Run subprocess
|
|
457
|
+
start_time = time.time()
|
|
458
|
+
result = _run_subprocess(cmd, env, group_timeout, subprocess_cwd)
|
|
459
|
+
execution_time = time.time() - start_time
|
|
460
|
+
|
|
461
|
+
# Parse results
|
|
462
|
+
results = _parse_results(report_path)
|
|
463
|
+
|
|
464
|
+
# Handle various failure conditions
|
|
465
|
+
if _handle_xfail_crash(result.returncode, results, group_items, ctx):
|
|
466
|
+
continue
|
|
467
|
+
|
|
468
|
+
if _handle_timeout(
|
|
469
|
+
result.timed_out,
|
|
470
|
+
group_name,
|
|
471
|
+
group_timeout,
|
|
472
|
+
execution_time,
|
|
473
|
+
group_items,
|
|
474
|
+
ctx,
|
|
475
|
+
):
|
|
476
|
+
continue
|
|
477
|
+
|
|
478
|
+
if _handle_collection_crash(
|
|
479
|
+
result.returncode, results, group_name, result.stderr, group_items, ctx
|
|
480
|
+
):
|
|
481
|
+
continue
|
|
482
|
+
|
|
483
|
+
if _handle_mid_test_crash(
|
|
484
|
+
result.returncode, result.stderr, group_items, results, ctx
|
|
485
|
+
):
|
|
486
|
+
pass # Continue to emit remaining results
|
|
487
|
+
|
|
488
|
+
# Emit normal test results
|
|
489
|
+
_emit_all_results(group_items, results, ctx)
|
|
490
|
+
|
|
491
|
+
# Check if we should exit early due to maxfail/exitfirst
|
|
492
|
+
if (
|
|
493
|
+
session.testsfailed
|
|
494
|
+
and session.config.option.maxfail
|
|
495
|
+
and session.testsfailed >= session.config.option.maxfail
|
|
496
|
+
):
|
|
497
|
+
return 1
|
|
498
|
+
|
|
499
|
+
# Run normal tests in-process
|
|
500
|
+
for idx, item in enumerate(normal_items):
|
|
501
|
+
nextitem = normal_items[idx + 1] if idx + 1 < len(normal_items) else None
|
|
502
|
+
item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
|
|
503
|
+
|
|
504
|
+
return 1 if session.testsfailed else 0
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
"""Test grouping logic for pytest-isolated."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
from collections import OrderedDict
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
import pytest
|
|
10
|
+
|
|
11
|
+
from .config import CONFIG_ATTR_GROUP_TIMEOUTS, CONFIG_ATTR_GROUPS, SUBPROC_ENV
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def _has_isolated_marker(obj: Any) -> bool:
|
|
15
|
+
"""Check if an object has the isolated marker in its pytestmark."""
|
|
16
|
+
markers = getattr(obj, "pytestmark", [])
|
|
17
|
+
if not isinstance(markers, list):
|
|
18
|
+
markers = [markers]
|
|
19
|
+
return any(getattr(m, "name", None) == "isolated" for m in markers)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def pytest_collection_modifyitems(
|
|
23
|
+
config: pytest.Config, items: list[pytest.Item]
|
|
24
|
+
) -> None:
|
|
25
|
+
if os.environ.get(SUBPROC_ENV) == "1":
|
|
26
|
+
return # child should not do grouping
|
|
27
|
+
|
|
28
|
+
# If --no-isolation is set, treat all tests as normal (no subprocess isolation)
|
|
29
|
+
if config.getoption("no_isolation", False):
|
|
30
|
+
setattr(config, CONFIG_ATTR_GROUPS, OrderedDict())
|
|
31
|
+
return
|
|
32
|
+
|
|
33
|
+
# If --isolated is set, run all tests in isolation
|
|
34
|
+
run_all_isolated = config.getoption("isolated", False)
|
|
35
|
+
|
|
36
|
+
groups: OrderedDict[str, list[pytest.Item]] = OrderedDict()
|
|
37
|
+
group_timeouts: dict[str, int | None] = {} # Track timeout per group
|
|
38
|
+
|
|
39
|
+
for item in items:
|
|
40
|
+
m = item.get_closest_marker("isolated")
|
|
41
|
+
|
|
42
|
+
# Skip non-isolated tests unless --isolated flag is set
|
|
43
|
+
if not m and not run_all_isolated:
|
|
44
|
+
continue
|
|
45
|
+
|
|
46
|
+
# Get group from marker (positional arg, keyword arg, or default)
|
|
47
|
+
group = None
|
|
48
|
+
if m:
|
|
49
|
+
# Support @pytest.mark.isolated("groupname") - positional arg
|
|
50
|
+
if m.args:
|
|
51
|
+
group = m.args[0]
|
|
52
|
+
# Support @pytest.mark.isolated(group="groupname") - keyword arg
|
|
53
|
+
elif "group" in m.kwargs:
|
|
54
|
+
group = m.kwargs["group"]
|
|
55
|
+
|
|
56
|
+
# Default grouping logic
|
|
57
|
+
if group is None:
|
|
58
|
+
# If --isolated flag is used (no explicit marker), use unique nodeid
|
|
59
|
+
if not m:
|
|
60
|
+
group = item.nodeid
|
|
61
|
+
# Check if marker was applied to a class or module
|
|
62
|
+
elif isinstance(item, pytest.Function):
|
|
63
|
+
if item.cls is not None and _has_isolated_marker(item.cls):
|
|
64
|
+
# Group by class name (module::class)
|
|
65
|
+
parts = item.nodeid.split("::")
|
|
66
|
+
group = "::".join(parts[:2]) if len(parts) >= 3 else item.nodeid
|
|
67
|
+
elif _has_isolated_marker(item.module):
|
|
68
|
+
# Group by module name (first part of nodeid)
|
|
69
|
+
parts = item.nodeid.split("::")
|
|
70
|
+
group = parts[0]
|
|
71
|
+
else:
|
|
72
|
+
# Explicit marker on function uses unique nodeid
|
|
73
|
+
group = item.nodeid
|
|
74
|
+
else:
|
|
75
|
+
# Non-Function items use unique nodeid
|
|
76
|
+
group = item.nodeid
|
|
77
|
+
|
|
78
|
+
# Store group-specific timeout (first marker wins)
|
|
79
|
+
group_key = str(group)
|
|
80
|
+
if group_key not in group_timeouts:
|
|
81
|
+
timeout = m.kwargs.get("timeout") if m else None
|
|
82
|
+
group_timeouts[group_key] = timeout
|
|
83
|
+
|
|
84
|
+
groups.setdefault(group_key, []).append(item)
|
|
85
|
+
|
|
86
|
+
setattr(config, CONFIG_ATTR_GROUPS, groups)
|
|
87
|
+
setattr(config, CONFIG_ATTR_GROUP_TIMEOUTS, group_timeouts)
|