pytest-isolated 0.3.0__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pytest_isolated/plugin.py CHANGED
@@ -1,414 +1,19 @@
1
- from __future__ import annotations
2
-
3
- import contextlib
4
- import json
5
- import os
6
- import subprocess
7
- import sys
8
- import tempfile
9
- import time
10
- from collections import OrderedDict
11
- from pathlib import Path
12
- from typing import Any, Final, Literal, TypedDict, cast
13
-
14
- import pytest
1
+ """pytest-isolated plugin - Run tests in isolated subprocesses.
15
2
 
16
- # Guard to prevent infinite recursion (parent spawns child; child must not spawn again)
17
- SUBPROC_ENV: Final = "PYTEST_RUNNING_IN_SUBPROCESS"
3
+ This plugin allows running tests in isolated subprocesses to prevent state leakage.
4
+ """
18
5
 
19
- # Parent tells child where to write JSONL records per test call
20
- SUBPROC_REPORT_PATH: Final = "PYTEST_SUBPROCESS_REPORT_PATH"
6
+ from __future__ import annotations
21
7
 
22
- # Arguments to exclude when forwarding options to subprocess
23
- _EXCLUDED_ARG_PREFIXES: Final = (
24
- "--junitxml=",
25
- "--html=",
26
- "--result-log=",
27
- "--collect-only",
28
- "--setup-only",
29
- "--setup-plan",
30
- "-x",
31
- "--exitfirst",
32
- "--maxfail=",
8
+ from .config import pytest_addoption, pytest_configure
9
+ from .execution import pytest_runtestloop
10
+ from .grouping import pytest_collection_modifyitems
11
+ from .reporting import pytest_runtest_logreport
12
+
13
+ __all__: tuple[str, ...] = (
14
+ "pytest_addoption",
15
+ "pytest_collection_modifyitems",
16
+ "pytest_configure",
17
+ "pytest_runtest_logreport",
18
+ "pytest_runtestloop",
33
19
  )
34
-
35
- # Plugin-specific options that take values and should not be forwarded
36
- _PLUGIN_OPTIONS_WITH_VALUE: Final = ("--isolated-timeout",)
37
-
38
- # Plugin-specific flag options that should not be forwarded
39
- _PLUGIN_FLAGS: Final = ("--no-isolation",)
40
-
41
-
42
- class _TestRecord(TypedDict, total=False):
43
- """Structure for test phase results from subprocess."""
44
-
45
- nodeid: str
46
- when: Literal["setup", "call", "teardown"]
47
- outcome: Literal["passed", "failed", "skipped"]
48
- longrepr: str
49
- duration: float
50
- stdout: str
51
- stderr: str
52
- keywords: list[str]
53
- sections: list[tuple[str, str]]
54
- user_properties: list[tuple[str, Any]]
55
- wasxfail: bool
56
-
57
-
58
- def pytest_addoption(parser: pytest.Parser) -> None:
59
- group = parser.getgroup("isolated")
60
- group.addoption(
61
- "--isolated-timeout",
62
- type=int,
63
- default=None,
64
- help="Timeout in seconds for isolated test groups (default: 300)",
65
- )
66
- group.addoption(
67
- "--no-isolation",
68
- action="store_true",
69
- default=False,
70
- help="Disable subprocess isolation (for debugging)",
71
- )
72
- parser.addini(
73
- "isolated_timeout",
74
- type="string",
75
- default="300",
76
- help="Default timeout in seconds for isolated test groups",
77
- )
78
- parser.addini(
79
- "isolated_capture_passed",
80
- type="bool",
81
- default=False,
82
- help="Capture output for passed tests (default: False)",
83
- )
84
-
85
-
86
- def pytest_configure(config: pytest.Config) -> None:
87
- config.addinivalue_line(
88
- "markers",
89
- "isolated(group=None, timeout=None): run this test in a grouped "
90
- "fresh Python subprocess; tests with the same group run together in "
91
- "one subprocess. timeout (seconds) overrides global --isolated-timeout.",
92
- )
93
-
94
-
95
- # ----------------------------
96
- # CHILD MODE: record results + captured output per test phase
97
- # ----------------------------
98
- def pytest_runtest_logreport(report: pytest.TestReport) -> None:
99
- """Write test phase results to a JSONL file when running in subprocess mode."""
100
- path = os.environ.get(SUBPROC_REPORT_PATH)
101
- if not path:
102
- return
103
-
104
- # Capture ALL phases (setup, call, teardown), not just call
105
- rec: _TestRecord = {
106
- "nodeid": report.nodeid,
107
- "when": report.when, # setup, call, or teardown
108
- "outcome": report.outcome, # passed/failed/skipped
109
- "longrepr": str(report.longrepr) if report.longrepr else "",
110
- "duration": getattr(report, "duration", 0.0),
111
- "stdout": getattr(report, "capstdout", "") or "",
112
- "stderr": getattr(report, "capstderr", "") or "",
113
- # Preserve test metadata for proper reporting
114
- "keywords": list(report.keywords),
115
- "sections": getattr(report, "sections", []), # captured logs, etc.
116
- "user_properties": getattr(report, "user_properties", []),
117
- "wasxfail": hasattr(report, "wasxfail"),
118
- }
119
- with Path(path).open("a", encoding="utf-8") as f:
120
- f.write(json.dumps(rec) + "\n")
121
-
122
-
123
- # ----------------------------
124
- # PARENT MODE: group marked tests
125
- # ----------------------------
126
- def pytest_collection_modifyitems(
127
- config: pytest.Config, items: list[pytest.Item]
128
- ) -> None:
129
- if os.environ.get(SUBPROC_ENV) == "1":
130
- return # child should not do grouping
131
-
132
- # If --no-isolation is set, treat all tests as normal (no subprocess isolation)
133
- if config.getoption("no_isolation", False):
134
- config._subprocess_groups = OrderedDict() # type: ignore[attr-defined]
135
- config._subprocess_normal_items = items # type: ignore[attr-defined]
136
- return
137
-
138
- groups: OrderedDict[str, list[pytest.Item]] = OrderedDict()
139
- group_timeouts: dict[str, int | None] = {} # Track timeout per group
140
- normal: list[pytest.Item] = []
141
-
142
- for item in items:
143
- m = item.get_closest_marker("isolated")
144
- if not m:
145
- normal.append(item)
146
- continue
147
-
148
- group = m.kwargs.get("group")
149
- # Default grouping to module path (so you don't accidentally group everything)
150
- if group is None:
151
- group = item.nodeid.split("::")[0]
152
-
153
- # Store group-specific timeout (first marker wins)
154
- group_key = str(group)
155
- if group_key not in group_timeouts:
156
- group_timeouts[group_key] = m.kwargs.get("timeout")
157
-
158
- groups.setdefault(group_key, []).append(item)
159
-
160
- config._subprocess_groups = groups # type: ignore[attr-defined]
161
- config._subprocess_group_timeouts = group_timeouts # type: ignore[attr-defined]
162
- config._subprocess_normal_items = normal # type: ignore[attr-defined]
163
-
164
-
165
- def pytest_runtestloop(session: pytest.Session) -> int | None:
166
- """Execute isolated test groups in subprocesses and remaining tests in-process.
167
-
168
- Any subprocess timeouts are caught and reported as test failures; the
169
- subprocess.TimeoutExpired exception is not propagated to the caller.
170
- """
171
- if os.environ.get(SUBPROC_ENV) == "1":
172
- return None # child runs the normal loop
173
-
174
- config = session.config
175
- groups = getattr(config, "_subprocess_groups", OrderedDict())
176
- if not isinstance(groups, OrderedDict):
177
- groups = OrderedDict()
178
- group_timeouts: dict[str, int | None] = getattr(
179
- config, "_subprocess_group_timeouts", {}
180
- )
181
- normal_items: list[pytest.Item] = getattr(
182
- config, "_subprocess_normal_items", session.items
183
- )
184
-
185
- # Get default timeout configuration
186
- timeout_opt = config.getoption("isolated_timeout", None)
187
- timeout_ini = config.getini("isolated_timeout")
188
- default_timeout = timeout_opt or (int(timeout_ini) if timeout_ini else 300)
189
-
190
- # Get capture configuration
191
- capture_passed = config.getini("isolated_capture_passed")
192
-
193
- def emit_report(
194
- item: pytest.Item,
195
- when: Literal["setup", "call", "teardown"],
196
- outcome: Literal["passed", "failed", "skipped"],
197
- longrepr: str = "",
198
- duration: float = 0.0,
199
- stdout: str = "",
200
- stderr: str = "",
201
- sections: list[tuple[str, str]] | None = None,
202
- user_properties: list[tuple[str, Any]] | None = None,
203
- wasxfail: bool = False,
204
- ) -> None:
205
- call = pytest.CallInfo.from_call(lambda: None, when=when)
206
- rep = pytest.TestReport.from_item_and_call(item, call)
207
- rep.outcome = outcome
208
- rep.duration = duration
209
-
210
- if user_properties:
211
- rep.user_properties = user_properties
212
-
213
- if wasxfail:
214
- rep.wasxfail = "reason: xfail"
215
-
216
- # For skipped tests, longrepr needs to be a tuple (path, lineno, reason)
217
- if outcome == "skipped" and longrepr:
218
- # Parse longrepr or create simple tuple
219
- lineno = item.location[1] if item.location[1] is not None else -1
220
- rep.longrepr = (str(item.fspath), lineno, longrepr) # type: ignore[assignment]
221
- elif outcome == "failed" and longrepr:
222
- rep.longrepr = longrepr
223
-
224
- # Add captured output as sections (capstdout/capstderr are read-only)
225
- if outcome == "failed" or (outcome == "passed" and capture_passed):
226
- all_sections = list(sections) if sections else []
227
- if stdout:
228
- all_sections.append(("Captured stdout call", stdout))
229
- if stderr:
230
- all_sections.append(("Captured stderr call", stderr))
231
- if all_sections:
232
- rep.sections = all_sections
233
-
234
- item.ihook.pytest_runtest_logreport(report=rep)
235
-
236
- # Run groups
237
- for group_name, group_items in groups.items():
238
- nodeids = [it.nodeid for it in group_items]
239
-
240
- # Get timeout for this group (marker timeout > global timeout)
241
- group_timeout = group_timeouts.get(group_name) or default_timeout
242
-
243
- # file where the child will append JSONL records
244
- with tempfile.NamedTemporaryFile(
245
- prefix="pytest-subproc-", suffix=".jsonl", delete=False
246
- ) as tf:
247
- report_path = tf.name
248
-
249
- env = os.environ.copy()
250
- env[SUBPROC_ENV] = "1"
251
- env[SUBPROC_REPORT_PATH] = report_path
252
-
253
- # Run pytest in subprocess with timeout, tracking execution time
254
- # Preserve rootdir and run subprocess from correct directory to ensure
255
- # nodeids can be resolved
256
- cmd = [sys.executable, "-m", "pytest"]
257
-
258
- # Forward relevant pytest options to subprocess for consistency
259
- # We filter out options that would interfere with subprocess execution
260
- if hasattr(config, "invocation_params") and hasattr(
261
- config.invocation_params, "args"
262
- ):
263
- forwarded_args = []
264
- skip_next = False
265
-
266
- for arg in config.invocation_params.args:
267
- if skip_next:
268
- skip_next = False
269
- continue
270
-
271
- # Skip our own plugin options
272
- if arg in _PLUGIN_OPTIONS_WITH_VALUE:
273
- skip_next = True
274
- continue
275
- if arg in _PLUGIN_FLAGS:
276
- continue
277
-
278
- # Skip output/reporting options that would conflict
279
- if any(arg.startswith(prefix) for prefix in _EXCLUDED_ARG_PREFIXES):
280
- continue
281
- if arg in ("-x", "--exitfirst"):
282
- continue
283
-
284
- # Skip test file paths and nodeids - we provide our own
285
- if not arg.startswith("-") and ("::" in arg or arg.endswith(".py")):
286
- continue
287
-
288
- forwarded_args.append(arg)
289
-
290
- cmd.extend(forwarded_args)
291
-
292
- # Pass rootdir to subprocess to ensure it uses the same project root
293
- # (config.rootpath is available in pytest 7.0+, which is our minimum version)
294
- if config.rootpath:
295
- cmd.extend(["--rootdir", str(config.rootpath)])
296
-
297
- # Add the test nodeids
298
- cmd.extend(nodeids)
299
-
300
- start_time = time.time()
301
-
302
- # Determine the working directory for the subprocess
303
- # Use rootpath if set, otherwise use invocation directory
304
- # This ensures nodeids (which are relative to rootpath) can be resolved
305
- subprocess_cwd = None
306
- if config.rootpath:
307
- subprocess_cwd = str(config.rootpath)
308
- elif hasattr(config, "invocation_params") and hasattr(
309
- config.invocation_params, "dir"
310
- ):
311
- subprocess_cwd = str(config.invocation_params.dir)
312
-
313
- try:
314
- proc = subprocess.run(
315
- cmd,
316
- env=env,
317
- timeout=group_timeout,
318
- capture_output=False,
319
- check=False,
320
- cwd=subprocess_cwd,
321
- )
322
- returncode = proc.returncode
323
- timed_out = False
324
- except subprocess.TimeoutExpired:
325
- returncode = -1
326
- timed_out = True
327
-
328
- execution_time = time.time() - start_time
329
-
330
- # Gather results from JSONL file
331
- results: dict[str, dict[str, _TestRecord]] = {}
332
- report_file = Path(report_path)
333
- if report_file.exists():
334
- with report_file.open(encoding="utf-8") as f:
335
- for line in f:
336
- file_line = line.strip()
337
- if not file_line:
338
- continue
339
- rec = cast(_TestRecord, json.loads(file_line))
340
- nodeid = rec["nodeid"]
341
- when = rec["when"]
342
-
343
- if nodeid not in results:
344
- results[nodeid] = {}
345
- results[nodeid][when] = rec
346
- with contextlib.suppress(OSError):
347
- report_file.unlink()
348
-
349
- # Handle timeout or crash
350
- if timed_out:
351
- msg = (
352
- f"Subprocess group={group_name!r} timed out after {group_timeout} "
353
- f"seconds (execution time: {execution_time:.2f}s). "
354
- f"Increase timeout with --isolated-timeout, isolated_timeout ini, "
355
- f"or @pytest.mark.isolated(timeout=N)."
356
- )
357
- for it in group_items:
358
- emit_report(it, "call", "failed", longrepr=msg)
359
- session.testsfailed += 1
360
- continue
361
-
362
- if not results:
363
- msg = (
364
- f"Subprocess group={group_name!r} exited with code {returncode} "
365
- f"and produced no per-test report. The subprocess may have "
366
- f"crashed during collection."
367
- )
368
- for it in group_items:
369
- emit_report(it, "call", "failed", longrepr=msg)
370
- session.testsfailed += 1
371
- continue
372
-
373
- # Emit per-test results into parent (all phases)
374
- for it in group_items:
375
- node_results = results.get(it.nodeid, {})
376
-
377
- # Emit setup, call, teardown in order
378
- for when in ["setup", "call", "teardown"]: # type: ignore[assignment]
379
- if when not in node_results:
380
- # If missing a phase, synthesize a passing one
381
- if when == "call" and not node_results:
382
- # Test completely missing - mark as failed
383
- emit_report(
384
- it,
385
- "call",
386
- "failed",
387
- longrepr=f"Missing result from subprocess for {it.nodeid}",
388
- )
389
- session.testsfailed += 1
390
- continue
391
-
392
- rec = node_results[when]
393
- emit_report(
394
- it,
395
- when=when, # type: ignore[arg-type]
396
- outcome=rec["outcome"],
397
- longrepr=rec.get("longrepr", ""),
398
- duration=rec.get("duration", 0.0),
399
- stdout=rec.get("stdout", ""),
400
- stderr=rec.get("stderr", ""),
401
- sections=rec.get("sections"),
402
- user_properties=rec.get("user_properties"),
403
- wasxfail=rec.get("wasxfail", False),
404
- )
405
-
406
- if when == "call" and rec["outcome"] == "failed":
407
- session.testsfailed += 1
408
-
409
- # Run normal tests in-process
410
- for idx, item in enumerate(normal_items):
411
- nextitem = normal_items[idx + 1] if idx + 1 < len(normal_items) else None
412
- item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
413
-
414
- return 1 if session.testsfailed else 0
@@ -0,0 +1,173 @@
1
+ """Test result reporting for pytest-isolated."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ import os
7
+ from pathlib import Path
8
+ from typing import Any, Literal, TypedDict
9
+
10
+ import pytest
11
+
12
+ from .config import SUBPROC_REPORT_PATH
13
+
14
+
15
+ class _TestRecord(TypedDict, total=False):
16
+ """Structure for test phase results from subprocess."""
17
+
18
+ nodeid: str
19
+ when: Literal["setup", "call", "teardown"]
20
+ outcome: Literal["passed", "failed", "skipped"]
21
+ longrepr: str
22
+ duration: float
23
+ stdout: str
24
+ stderr: str
25
+ keywords: list[str]
26
+ sections: list[tuple[str, str]]
27
+ user_properties: list[tuple[str, Any]]
28
+ wasxfail: bool
29
+
30
+
31
+ def _format_crash_reason(returncode: int) -> str:
32
+ """Format a human-readable crash reason from a return code.
33
+
34
+ On Unix, negative return codes indicate signal numbers.
35
+ On Windows, we report the exit code directly.
36
+ """
37
+ if returncode < 0:
38
+ # Unix: negative return code is -signal_number
39
+ return f"crashed with signal {-returncode}"
40
+ # Windows or other: positive exit code
41
+ return f"crashed with exit code {returncode}"
42
+
43
+
44
+ def _format_crash_message(
45
+ returncode: int,
46
+ context: str,
47
+ stderr_text: str = "",
48
+ ) -> str:
49
+ """Build a complete crash error message with optional stderr output.
50
+
51
+ Args:
52
+ returncode: The subprocess return code.
53
+ context: Description of when the crash occurred (e.g., "during test execution").
54
+ stderr_text: Optional captured stderr from the subprocess.
55
+
56
+ Returns:
57
+ A formatted error message suitable for test failure reports.
58
+ """
59
+ reason = _format_crash_reason(returncode)
60
+ msg = f"Subprocess {reason} {context}."
61
+ if stderr_text:
62
+ msg += f"\n\nSubprocess stderr:\n{stderr_text}"
63
+ return msg
64
+
65
+
66
+ def pytest_runtest_logreport(report: pytest.TestReport) -> None:
67
+ """Write test phase results to a JSONL file when running in subprocess mode."""
68
+ path = os.environ.get(SUBPROC_REPORT_PATH)
69
+ if not path:
70
+ return
71
+
72
+ # Capture ALL phases (setup, call, teardown), not just call
73
+ rec: _TestRecord = {
74
+ "nodeid": report.nodeid,
75
+ "when": report.when, # setup, call, or teardown
76
+ "outcome": report.outcome, # passed/failed/skipped
77
+ "longrepr": str(report.longrepr) if report.longrepr else "",
78
+ "duration": getattr(report, "duration", 0.0),
79
+ "stdout": getattr(report, "capstdout", "") or "",
80
+ "stderr": getattr(report, "capstderr", "") or "",
81
+ # Preserve test metadata for proper reporting
82
+ "keywords": list(report.keywords),
83
+ "sections": getattr(report, "sections", []), # captured logs, etc.
84
+ "user_properties": getattr(report, "user_properties", []),
85
+ "wasxfail": hasattr(report, "wasxfail"),
86
+ }
87
+ with Path(path).open("a", encoding="utf-8") as f:
88
+ f.write(json.dumps(rec) + "\n")
89
+
90
+
91
+ def _emit_report(
92
+ item: pytest.Item,
93
+ *,
94
+ when: Literal["setup", "call", "teardown"],
95
+ outcome: Literal["passed", "failed", "skipped"],
96
+ longrepr: str = "",
97
+ duration: float = 0.0,
98
+ stdout: str = "",
99
+ stderr: str = "",
100
+ sections: list[tuple[str, str]] | None = None,
101
+ user_properties: list[tuple[str, Any]] | None = None,
102
+ wasxfail: bool = False,
103
+ ) -> None:
104
+ """Emit a test report for a specific test phase."""
105
+ call = pytest.CallInfo.from_call(lambda: None, when=when)
106
+ rep = pytest.TestReport.from_item_and_call(item, call)
107
+ rep.outcome = outcome
108
+ rep.duration = duration
109
+
110
+ if user_properties:
111
+ rep.user_properties = user_properties
112
+
113
+ if wasxfail:
114
+ rep.wasxfail = "reason: xfail"
115
+
116
+ # For skipped tests, longrepr needs to be a tuple (path, lineno, reason)
117
+ if outcome == "skipped" and longrepr:
118
+ # Parse longrepr or create simple tuple
119
+ lineno = item.location[1] if item.location[1] is not None else -1
120
+ rep.longrepr = (str(item.fspath), lineno, longrepr) # type: ignore[assignment]
121
+ elif outcome == "failed" and longrepr:
122
+ rep.longrepr = longrepr
123
+
124
+ # Attach captured output to the report when present.
125
+ # Pytest's reporting layer will decide whether to display it based on
126
+ # test outcome, verbosity, and capture settings (--capture, -s, etc.)
127
+ all_sections = list(sections) if sections else []
128
+ if stdout:
129
+ all_sections.append(("Captured stdout call", stdout))
130
+ if stderr:
131
+ all_sections.append(("Captured stderr call", stderr))
132
+ if all_sections:
133
+ rep.sections = all_sections
134
+
135
+ item.ihook.pytest_runtest_logreport(report=rep)
136
+
137
+
138
+ def _emit_failure_for_items(
139
+ items: list[pytest.Item],
140
+ error_message: str,
141
+ session: pytest.Session,
142
+ ) -> None:
143
+ """Emit synthetic failure reports when subprocess execution fails.
144
+
145
+ When a subprocess crashes, times out, or fails during collection, we emit
146
+ synthetic test phase reports to mark affected tests as failed. We report
147
+ setup="passed" and teardown="passed" (even though these phases never ran)
148
+ to ensure pytest categorizes the test as FAILED rather than ERROR. The actual
149
+ failure is reported in the call phase with the error message.
150
+
151
+ For xfail tests, call is reported as skipped with wasxfail=True to maintain
152
+ proper xfail semantics.
153
+ """
154
+ for it in items:
155
+ xfail_marker = it.get_closest_marker("xfail")
156
+ _emit_report(it, when="setup", outcome="passed")
157
+ if xfail_marker:
158
+ _emit_report(
159
+ it,
160
+ when="call",
161
+ outcome="skipped",
162
+ longrepr=error_message,
163
+ wasxfail=True,
164
+ )
165
+ else:
166
+ _emit_report(
167
+ it,
168
+ when="call",
169
+ outcome="failed",
170
+ longrepr=error_message,
171
+ )
172
+ session.testsfailed += 1
173
+ _emit_report(it, when="teardown", outcome="passed")