pytest-isolated 0.2.0__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,3 +1,3 @@
1
1
  """pytest-isolated: Run pytest tests in isolated subprocesses."""
2
2
 
3
- __version__ = "0.2.0"
3
+ __version__ = "0.4.0"
pytest_isolated/plugin.py CHANGED
@@ -9,20 +9,111 @@ import tempfile
9
9
  import time
10
10
  from collections import OrderedDict
11
11
  from pathlib import Path
12
- from typing import Any
12
+ from typing import Any, Final, Literal, TypedDict, cast
13
13
 
14
14
  import pytest
15
15
 
16
16
  # Guard to prevent infinite recursion (parent spawns child; child must not spawn again)
17
- SUBPROC_ENV = "PYTEST_RUNNING_IN_SUBPROCESS"
17
+ SUBPROC_ENV: Final = "PYTEST_RUNNING_IN_SUBPROCESS"
18
18
 
19
19
  # Parent tells child where to write JSONL records per test call
20
- SUBPROC_REPORT_PATH = "PYTEST_SUBPROCESS_REPORT_PATH"
20
+ SUBPROC_REPORT_PATH: Final = "PYTEST_SUBPROCESS_REPORT_PATH"
21
+
22
+ # Options that should be forwarded to subprocess (flags without values)
23
+ _FORWARD_FLAGS: Final = {
24
+ "-v",
25
+ "--verbose",
26
+ "-q",
27
+ "--quiet",
28
+ "-s", # disable output capturing
29
+ "-l",
30
+ "--showlocals",
31
+ "--strict-markers",
32
+ "--strict-config",
33
+ "-x", # exit on first failure
34
+ "--exitfirst",
35
+ }
36
+
37
+ # Options that should be forwarded to subprocess (options with values)
38
+ _FORWARD_OPTIONS_WITH_VALUE: Final = {
39
+ "--tb", # traceback style
40
+ "-r", # show extra test summary info
41
+ "--capture", # capture method (fd/sys/no/tee-sys)
42
+ }
43
+
44
+
45
+ def _has_isolated_marker(obj: Any) -> bool:
46
+ """Check if an object has the isolated marker in its pytestmark."""
47
+ markers = getattr(obj, "pytestmark", [])
48
+ if not isinstance(markers, list):
49
+ markers = [markers]
50
+ return any(getattr(m, "name", None) == "isolated" for m in markers)
51
+
52
+
53
+ # ---------------------------------------------------------------------------
54
+ # Cross-platform crash detection helpers
55
+ # ---------------------------------------------------------------------------
56
+
57
+
58
+ def _format_crash_reason(returncode: int) -> str:
59
+ """Format a human-readable crash reason from a return code.
60
+
61
+ On Unix, negative return codes indicate signal numbers.
62
+ On Windows, we report the exit code directly.
63
+ """
64
+ if returncode < 0:
65
+ # Unix: negative return code is -signal_number
66
+ return f"crashed with signal {-returncode}"
67
+ # Windows or other: positive exit code
68
+ return f"crashed with exit code {returncode}"
69
+
70
+
71
+ def _format_crash_message(
72
+ returncode: int,
73
+ context: str,
74
+ stderr_text: str = "",
75
+ ) -> str:
76
+ """Build a complete crash error message with optional stderr output.
77
+
78
+ Args:
79
+ returncode: The subprocess return code.
80
+ context: Description of when the crash occurred (e.g., "during test execution").
81
+ stderr_text: Optional captured stderr from the subprocess.
82
+
83
+ Returns:
84
+ A formatted error message suitable for test failure reports.
85
+ """
86
+ reason = _format_crash_reason(returncode)
87
+ msg = f"Subprocess {reason} {context}."
88
+ if stderr_text:
89
+ msg += f"\n\nSubprocess stderr:\n{stderr_text}"
90
+ return msg
91
+
92
+
93
+ class _TestRecord(TypedDict, total=False):
94
+ """Structure for test phase results from subprocess."""
95
+
96
+ nodeid: str
97
+ when: Literal["setup", "call", "teardown"]
98
+ outcome: Literal["passed", "failed", "skipped"]
99
+ longrepr: str
100
+ duration: float
101
+ stdout: str
102
+ stderr: str
103
+ keywords: list[str]
104
+ sections: list[tuple[str, str]]
105
+ user_properties: list[tuple[str, Any]]
106
+ wasxfail: bool
21
107
 
22
108
 
23
109
  def pytest_addoption(parser: pytest.Parser) -> None:
24
- """Add configuration options for subprocess isolation."""
25
110
  group = parser.getgroup("isolated")
111
+ group.addoption(
112
+ "--isolated",
113
+ action="store_true",
114
+ default=False,
115
+ help="Run all tests in isolated subprocesses",
116
+ )
26
117
  group.addoption(
27
118
  "--isolated-timeout",
28
119
  type=int,
@@ -62,17 +153,13 @@ def pytest_configure(config: pytest.Config) -> None:
62
153
  # CHILD MODE: record results + captured output per test phase
63
154
  # ----------------------------
64
155
  def pytest_runtest_logreport(report: pytest.TestReport) -> None:
65
- """
66
- In the child process, write one JSON line per test phase (setup/call/teardown)
67
- containing outcome, captured stdout/stderr, duration, and other metadata.
68
- The parent will aggregate and re-emit this info.
69
- """
156
+ """Write test phase results to a JSONL file when running in subprocess mode."""
70
157
  path = os.environ.get(SUBPROC_REPORT_PATH)
71
158
  if not path:
72
159
  return
73
160
 
74
161
  # Capture ALL phases (setup, call, teardown), not just call
75
- rec = {
162
+ rec: _TestRecord = {
76
163
  "nodeid": report.nodeid,
77
164
  "when": report.when, # setup, call, or teardown
78
165
  "outcome": report.outcome, # passed/failed/skipped
@@ -96,9 +183,6 @@ def pytest_runtest_logreport(report: pytest.TestReport) -> None:
96
183
  def pytest_collection_modifyitems(
97
184
  config: pytest.Config, items: list[pytest.Item]
98
185
  ) -> None:
99
- """
100
- Partition items into subprocess groups + normal items and stash on config.
101
- """
102
186
  if os.environ.get(SUBPROC_ENV) == "1":
103
187
  return # child should not do grouping
104
188
 
@@ -108,25 +192,58 @@ def pytest_collection_modifyitems(
108
192
  config._subprocess_normal_items = items # type: ignore[attr-defined]
109
193
  return
110
194
 
195
+ # If --isolated is set, run all tests in isolation
196
+ run_all_isolated = config.getoption("isolated", False)
197
+
111
198
  groups: OrderedDict[str, list[pytest.Item]] = OrderedDict()
112
199
  group_timeouts: dict[str, int | None] = {} # Track timeout per group
113
200
  normal: list[pytest.Item] = []
114
201
 
115
202
  for item in items:
116
203
  m = item.get_closest_marker("isolated")
117
- if not m:
204
+
205
+ # Skip non-isolated tests unless --isolated flag is set
206
+ if not m and not run_all_isolated:
118
207
  normal.append(item)
119
208
  continue
120
209
 
121
- group = m.kwargs.get("group")
122
- # Default grouping to module path (so you don't accidentally group everything)
210
+ # Get group from marker (positional arg, keyword arg, or default)
211
+ group = None
212
+ if m:
213
+ # Support @pytest.mark.isolated("groupname") - positional arg
214
+ if m.args:
215
+ group = m.args[0]
216
+ # Support @pytest.mark.isolated(group="groupname") - keyword arg
217
+ elif "group" in m.kwargs:
218
+ group = m.kwargs["group"]
219
+
220
+ # Default grouping logic
123
221
  if group is None:
124
- group = item.nodeid.split("::")[0]
222
+ # If --isolated flag is used (no explicit marker), use unique nodeid
223
+ if not m:
224
+ group = item.nodeid
225
+ # Check if marker was applied to a class or module
226
+ elif isinstance(item, pytest.Function):
227
+ if item.cls is not None and _has_isolated_marker(item.cls):
228
+ # Group by class name (module::class)
229
+ parts = item.nodeid.split("::")
230
+ group = "::".join(parts[:2]) if len(parts) >= 3 else item.nodeid
231
+ elif _has_isolated_marker(item.module):
232
+ # Group by module name (first part of nodeid)
233
+ parts = item.nodeid.split("::")
234
+ group = parts[0]
235
+ else:
236
+ # Explicit marker on function uses unique nodeid
237
+ group = item.nodeid
238
+ else:
239
+ # Non-Function items use unique nodeid
240
+ group = item.nodeid
125
241
 
126
242
  # Store group-specific timeout (first marker wins)
127
243
  group_key = str(group)
128
244
  if group_key not in group_timeouts:
129
- group_timeouts[group_key] = m.kwargs.get("timeout")
245
+ timeout = m.kwargs.get("timeout") if m else None
246
+ group_timeouts[group_key] = timeout
130
247
 
131
248
  groups.setdefault(group_key, []).append(item)
132
249
 
@@ -135,24 +252,109 @@ def pytest_collection_modifyitems(
135
252
  config._subprocess_normal_items = normal # type: ignore[attr-defined]
136
253
 
137
254
 
138
- def pytest_runtestloop(session: pytest.Session) -> int | None:
255
+ def _emit_report(
256
+ item: pytest.Item,
257
+ *,
258
+ when: Literal["setup", "call", "teardown"],
259
+ outcome: Literal["passed", "failed", "skipped"],
260
+ longrepr: str = "",
261
+ duration: float = 0.0,
262
+ stdout: str = "",
263
+ stderr: str = "",
264
+ sections: list[tuple[str, str]] | None = None,
265
+ user_properties: list[tuple[str, Any]] | None = None,
266
+ wasxfail: bool = False,
267
+ capture_passed: bool = False,
268
+ ) -> None:
269
+ """Emit a test report for a specific test phase."""
270
+ call = pytest.CallInfo.from_call(lambda: None, when=when)
271
+ rep = pytest.TestReport.from_item_and_call(item, call)
272
+ rep.outcome = outcome
273
+ rep.duration = duration
274
+
275
+ if user_properties:
276
+ rep.user_properties = user_properties
277
+
278
+ if wasxfail:
279
+ rep.wasxfail = "reason: xfail"
280
+
281
+ # For skipped tests, longrepr needs to be a tuple (path, lineno, reason)
282
+ if outcome == "skipped" and longrepr:
283
+ # Parse longrepr or create simple tuple
284
+ lineno = item.location[1] if item.location[1] is not None else -1
285
+ rep.longrepr = (str(item.fspath), lineno, longrepr) # type: ignore[assignment]
286
+ elif outcome == "failed" and longrepr:
287
+ rep.longrepr = longrepr
288
+
289
+ # Add captured output as sections (capstdout/capstderr are read-only)
290
+ if outcome == "failed" or (outcome == "passed" and capture_passed):
291
+ all_sections = list(sections) if sections else []
292
+ if stdout:
293
+ all_sections.append(("Captured stdout call", stdout))
294
+ if stderr:
295
+ all_sections.append(("Captured stderr call", stderr))
296
+ if all_sections:
297
+ rep.sections = all_sections
298
+
299
+ item.ihook.pytest_runtest_logreport(report=rep)
300
+
301
+
302
+ def _emit_failure_for_items(
303
+ items: list[pytest.Item],
304
+ error_message: str,
305
+ session: pytest.Session,
306
+ capture_passed: bool = False,
307
+ ) -> None:
308
+ """Emit synthetic failure reports when subprocess execution fails.
309
+
310
+ When a subprocess crashes, times out, or fails during collection, we emit
311
+ synthetic test phase reports to mark affected tests as failed. We report
312
+ setup="passed" and teardown="passed" (even though these phases never ran)
313
+ to ensure pytest categorizes the test as FAILED rather than ERROR. The actual
314
+ failure is reported in the call phase with the error message.
315
+
316
+ For xfail tests, call is reported as skipped with wasxfail=True to maintain
317
+ proper xfail semantics.
139
318
  """
140
- Run each subprocess group in its own subprocess once;
141
- then run normal tests in-process.
142
-
143
- Enhanced to:
144
- - Capture all test phases (setup, call, teardown)
145
- - Support configurable timeouts
146
- - Properly handle crashes and missing results
147
- - Integrate with pytest's reporting system
319
+ for it in items:
320
+ xfail_marker = it.get_closest_marker("xfail")
321
+ _emit_report(it, when="setup", outcome="passed", capture_passed=capture_passed)
322
+ if xfail_marker:
323
+ _emit_report(
324
+ it,
325
+ when="call",
326
+ outcome="skipped",
327
+ longrepr=error_message,
328
+ wasxfail=True,
329
+ capture_passed=capture_passed,
330
+ )
331
+ else:
332
+ _emit_report(
333
+ it,
334
+ when="call",
335
+ outcome="failed",
336
+ longrepr=error_message,
337
+ capture_passed=capture_passed,
338
+ )
339
+ session.testsfailed += 1
340
+ _emit_report(
341
+ it, when="teardown", outcome="passed", capture_passed=capture_passed
342
+ )
343
+
344
+
345
+ def pytest_runtestloop(session: pytest.Session) -> int | None:
346
+ """Execute isolated test groups in subprocesses and remaining tests in-process.
347
+
348
+ Any subprocess timeouts are caught and reported as test failures; the
349
+ subprocess.TimeoutExpired exception is not propagated to the caller.
148
350
  """
149
351
  if os.environ.get(SUBPROC_ENV) == "1":
150
352
  return None # child runs the normal loop
151
353
 
152
354
  config = session.config
153
- groups: OrderedDict[str, list[pytest.Item]] = getattr(
154
- config, "_subprocess_groups", OrderedDict()
155
- )
355
+ groups = getattr(config, "_subprocess_groups", OrderedDict())
356
+ if not isinstance(groups, OrderedDict):
357
+ groups = OrderedDict()
156
358
  group_timeouts: dict[str, int | None] = getattr(
157
359
  config, "_subprocess_group_timeouts", {}
158
360
  )
@@ -168,52 +370,6 @@ def pytest_runtestloop(session: pytest.Session) -> int | None:
168
370
  # Get capture configuration
169
371
  capture_passed = config.getini("isolated_capture_passed")
170
372
 
171
- def emit_report(
172
- item: pytest.Item,
173
- when: str,
174
- outcome: str,
175
- longrepr: str = "",
176
- duration: float = 0.0,
177
- stdout: str = "",
178
- stderr: str = "",
179
- sections: list[tuple[str, str]] | None = None,
180
- user_properties: list[tuple[str, Any]] | None = None,
181
- wasxfail: bool = False,
182
- ) -> None:
183
- """
184
- Emit a synthetic report for the given item and phase.
185
- Attach captured output based on outcome and configuration.
186
- """
187
- call = pytest.CallInfo.from_call(lambda: None, when=when)
188
- rep = pytest.TestReport.from_item_and_call(item, call)
189
- rep.outcome = outcome
190
- rep.duration = duration
191
-
192
- if user_properties:
193
- rep.user_properties = user_properties
194
-
195
- if wasxfail:
196
- rep.wasxfail = "reason: xfail"
197
-
198
- # For skipped tests, longrepr needs to be a tuple (path, lineno, reason)
199
- if outcome == "skipped" and longrepr:
200
- # Parse longrepr or create simple tuple
201
- rep.longrepr = (str(item.fspath), item.location[1], longrepr)
202
- elif outcome == "failed" and longrepr:
203
- rep.longrepr = longrepr
204
-
205
- # Add captured output as sections (capstdout/capstderr are read-only)
206
- if outcome == "failed" or (outcome == "passed" and capture_passed):
207
- all_sections = list(sections) if sections else []
208
- if stdout:
209
- all_sections.append(("Captured stdout call", stdout))
210
- if stderr:
211
- all_sections.append(("Captured stderr call", stderr))
212
- if all_sections:
213
- rep.sections = all_sections
214
-
215
- item.ihook.pytest_runtest_logreport(report=rep)
216
-
217
373
  # Run groups
218
374
  for group_name, group_items in groups.items():
219
375
  nodeids = [it.nodeid for it in group_items]
@@ -231,24 +387,78 @@ def pytest_runtestloop(session: pytest.Session) -> int | None:
231
387
  env[SUBPROC_ENV] = "1"
232
388
  env[SUBPROC_REPORT_PATH] = report_path
233
389
 
234
- # Run pytest in subprocess with timeout, tracking execution time
235
- cmd = [sys.executable, "-m", "pytest", *nodeids]
390
+ # Forward relevant pytest options to subprocess for consistency
391
+ # Only forward specific options that affect test execution behavior
392
+ forwarded_args = []
393
+ if hasattr(config, "invocation_params") and hasattr(
394
+ config.invocation_params, "args"
395
+ ):
396
+ skip_next = False
397
+
398
+ for arg in config.invocation_params.args:
399
+ if skip_next:
400
+ skip_next = False
401
+ continue
402
+
403
+ # Forward only explicitly allowed options
404
+ if arg in _FORWARD_FLAGS:
405
+ forwarded_args.append(arg)
406
+ elif arg in _FORWARD_OPTIONS_WITH_VALUE:
407
+ forwarded_args.append(arg)
408
+ skip_next = True # Next arg is the value
409
+ elif arg.startswith(
410
+ tuple(f"{opt}=" for opt in _FORWARD_OPTIONS_WITH_VALUE)
411
+ ):
412
+ forwarded_args.append(arg)
413
+
414
+ # Skip everything else (positional args, test paths, unknown options)
415
+
416
+ # Build pytest command for subprocess
417
+ cmd = [sys.executable, "-m", "pytest"]
418
+ cmd.extend(forwarded_args)
419
+
420
+ # Pass rootdir to subprocess to ensure it uses the same project root
421
+ if config.rootpath:
422
+ cmd.extend(["--rootdir", str(config.rootpath)])
423
+
424
+ # Add the test nodeids
425
+ cmd.extend(nodeids)
426
+
236
427
  start_time = time.time()
237
428
 
429
+ # Determine the working directory for the subprocess
430
+ # Use rootpath if set, otherwise use invocation directory
431
+ # This ensures nodeids (which are relative to rootpath) can be resolved
432
+ subprocess_cwd = None
433
+ if config.rootpath:
434
+ subprocess_cwd = str(config.rootpath)
435
+ elif hasattr(config, "invocation_params") and hasattr(
436
+ config.invocation_params, "dir"
437
+ ):
438
+ subprocess_cwd = str(config.invocation_params.dir)
439
+
440
+ proc_stderr = b""
238
441
  try:
239
442
  proc = subprocess.run(
240
- cmd, env=env, timeout=group_timeout, capture_output=False, check=False
443
+ cmd,
444
+ env=env,
445
+ timeout=group_timeout,
446
+ capture_output=True,
447
+ check=False,
448
+ cwd=subprocess_cwd,
241
449
  )
242
450
  returncode = proc.returncode
451
+ proc_stderr = proc.stderr or b""
243
452
  timed_out = False
244
- except subprocess.TimeoutExpired:
453
+ except subprocess.TimeoutExpired as exc:
245
454
  returncode = -1
455
+ proc_stderr = exc.stderr or b""
246
456
  timed_out = True
247
457
 
248
458
  execution_time = time.time() - start_time
249
459
 
250
460
  # Gather results from JSONL file
251
- results: dict[str, dict[str, Any]] = {}
461
+ results: dict[str, dict[str, _TestRecord]] = {}
252
462
  report_file = Path(report_path)
253
463
  if report_file.exists():
254
464
  with report_file.open(encoding="utf-8") as f:
@@ -256,7 +466,7 @@ def pytest_runtestloop(session: pytest.Session) -> int | None:
256
466
  file_line = line.strip()
257
467
  if not file_line:
258
468
  continue
259
- rec = json.loads(file_line)
469
+ rec = cast(_TestRecord, json.loads(file_line))
260
470
  nodeid = rec["nodeid"]
261
471
  when = rec["when"]
262
472
 
@@ -266,7 +476,20 @@ def pytest_runtestloop(session: pytest.Session) -> int | None:
266
476
  with contextlib.suppress(OSError):
267
477
  report_file.unlink()
268
478
 
269
- # Handle timeout or crash
479
+ # For crashes (negative returncode), check if we should treat as xfail
480
+ if returncode < 0 and results:
481
+ # Check if all tests in this group are marked xfail
482
+ all_xfail = all(it.get_closest_marker("xfail") for it in group_items)
483
+ if all_xfail:
484
+ # Override any results from subprocess - crash is the expected outcome
485
+ msg = (
486
+ f"Subprocess crashed with signal {-returncode} "
487
+ f"(expected for xfail test)"
488
+ )
489
+ _emit_failure_for_items(group_items, msg, session, capture_passed)
490
+ continue
491
+
492
+ # Handle timeout
270
493
  if timed_out:
271
494
  msg = (
272
495
  f"Subprocess group={group_name!r} timed out after {group_timeout} "
@@ -274,50 +497,163 @@ def pytest_runtestloop(session: pytest.Session) -> int | None:
274
497
  f"Increase timeout with --isolated-timeout, isolated_timeout ini, "
275
498
  f"or @pytest.mark.isolated(timeout=N)."
276
499
  )
277
- for it in group_items:
278
- emit_report(it, "call", "failed", longrepr=msg)
279
- session.testsfailed += 1
500
+ _emit_failure_for_items(group_items, msg, session, capture_passed)
280
501
  continue
281
502
 
503
+ # Handle crash during collection (no results produced)
282
504
  if not results:
505
+ stderr_text = proc_stderr.decode("utf-8", errors="replace").strip()
283
506
  msg = (
284
507
  f"Subprocess group={group_name!r} exited with code {returncode} "
285
508
  f"and produced no per-test report. The subprocess may have "
286
509
  f"crashed during collection."
287
510
  )
288
- for it in group_items:
289
- emit_report(it, "call", "failed", longrepr=msg)
290
- session.testsfailed += 1
511
+ if stderr_text:
512
+ msg += f"\n\nSubprocess stderr:\n{stderr_text}"
513
+ _emit_failure_for_items(group_items, msg, session, capture_passed)
291
514
  continue
292
515
 
516
+ # Handle mid-test crash: detect tests with incomplete phases
517
+ # (e.g., setup recorded but call missing indicates crash during test)
518
+ crashed_items: list[pytest.Item] = []
519
+
520
+ for it in group_items:
521
+ node_results = results.get(it.nodeid, {})
522
+ # Test started (setup passed) but crashed before call completed.
523
+ # If setup was skipped or failed, no call phase is expected.
524
+ if node_results and "call" not in node_results:
525
+ setup_result = node_results.get("setup", {})
526
+ setup_outcome = setup_result.get("outcome", "")
527
+ if setup_outcome == "passed":
528
+ crashed_items.append(it)
529
+
530
+ # If we detected crashed tests, also find tests that never ran
531
+ # (they come after the crashing test in the same group)
532
+ not_run_items: list[pytest.Item] = []
533
+ if crashed_items:
534
+ for it in group_items:
535
+ node_results = results.get(it.nodeid, {})
536
+ # Test never started (no results at all)
537
+ if not node_results:
538
+ not_run_items.append(it)
539
+
540
+ if crashed_items or not_run_items:
541
+ stderr_text = proc_stderr.decode("utf-8", errors="replace").strip()
542
+
543
+ # Emit failures for crashed tests
544
+ if crashed_items:
545
+ crash_msg = _format_crash_message(
546
+ returncode, "during test execution", stderr_text
547
+ )
548
+
549
+ for it in crashed_items:
550
+ node_results = results.get(it.nodeid, {})
551
+ # Emit setup phase if it was recorded
552
+ if "setup" in node_results:
553
+ rec = node_results["setup"]
554
+ _emit_report(
555
+ it,
556
+ when="setup",
557
+ outcome=rec["outcome"],
558
+ longrepr=rec.get("longrepr", ""),
559
+ duration=rec.get("duration", 0.0),
560
+ capture_passed=capture_passed,
561
+ )
562
+ else:
563
+ _emit_report(
564
+ it,
565
+ when="setup",
566
+ outcome="passed",
567
+ capture_passed=capture_passed,
568
+ )
569
+
570
+ # Emit call phase as failed with crash info
571
+ xfail_marker = it.get_closest_marker("xfail")
572
+ if xfail_marker:
573
+ _emit_report(
574
+ it,
575
+ when="call",
576
+ outcome="skipped",
577
+ longrepr=crash_msg,
578
+ wasxfail=True,
579
+ capture_passed=capture_passed,
580
+ )
581
+ else:
582
+ _emit_report(
583
+ it,
584
+ when="call",
585
+ outcome="failed",
586
+ longrepr=crash_msg,
587
+ capture_passed=capture_passed,
588
+ )
589
+ session.testsfailed += 1
590
+
591
+ _emit_report(
592
+ it,
593
+ when="teardown",
594
+ outcome="passed",
595
+ capture_passed=capture_passed,
596
+ )
597
+ # Remove from results so they're not processed again
598
+ results.pop(it.nodeid, None)
599
+
600
+ # Emit failures for tests that never ran due to earlier crash
601
+ if not_run_items:
602
+ not_run_msg = _format_crash_message(
603
+ returncode, "during earlier test execution", stderr_text
604
+ )
605
+ not_run_msg = f"Test did not run - {not_run_msg}"
606
+ _emit_failure_for_items(
607
+ not_run_items, not_run_msg, session, capture_passed
608
+ )
609
+ for it in not_run_items:
610
+ results.pop(it.nodeid, None)
611
+
293
612
  # Emit per-test results into parent (all phases)
294
613
  for it in group_items:
295
614
  node_results = results.get(it.nodeid, {})
296
615
 
616
+ # Skip tests that were already handled by crash detection above
617
+ if it.nodeid not in results:
618
+ continue
619
+
620
+ # Check if setup passed (to determine if missing call is expected)
621
+ setup_passed = (
622
+ "setup" in node_results and node_results["setup"]["outcome"] == "passed"
623
+ )
624
+
297
625
  # Emit setup, call, teardown in order
298
- for when in ["setup", "call", "teardown"]:
626
+ for when in ["setup", "call", "teardown"]: # type: ignore[assignment]
299
627
  if when not in node_results:
300
- # If missing a phase, synthesize a passing one
301
- if when == "call" and not node_results:
302
- # Test completely missing - mark as failed
303
- emit_report(
628
+ # If missing call phase AND setup passed, emit a failure
629
+ # (crash detection above should handle most cases, but this
630
+ # is a safety net for unexpected situations)
631
+ # If setup failed, missing call is expected (pytest skips call)
632
+ if when == "call" and setup_passed:
633
+ msg = (
634
+ "Missing 'call' phase result"
635
+ f" from subprocess for {it.nodeid}"
636
+ )
637
+ _emit_report(
304
638
  it,
305
- "call",
306
- "failed",
307
- longrepr=f"Missing result from subprocess for {it.nodeid}",
639
+ when="call",
640
+ outcome="failed",
641
+ longrepr=msg,
642
+ capture_passed=capture_passed,
308
643
  )
309
644
  session.testsfailed += 1
310
645
  continue
311
646
 
312
647
  rec = node_results[when]
313
- emit_report(
648
+ _emit_report(
314
649
  it,
315
- when=when,
316
- outcome=rec["outcome"],
650
+ when=when, # type: ignore[arg-type]
651
+ outcome=rec.get("outcome", "failed"), # type: ignore[arg-type]
317
652
  longrepr=rec.get("longrepr", ""),
318
653
  duration=rec.get("duration", 0.0),
319
654
  stdout=rec.get("stdout", ""),
320
655
  stderr=rec.get("stderr", ""),
656
+ capture_passed=capture_passed,
321
657
  sections=rec.get("sections"),
322
658
  user_properties=rec.get("user_properties"),
323
659
  wasxfail=rec.get("wasxfail", False),
@@ -326,6 +662,14 @@ def pytest_runtestloop(session: pytest.Session) -> int | None:
326
662
  if when == "call" and rec["outcome"] == "failed":
327
663
  session.testsfailed += 1
328
664
 
665
+ # Check if we should exit early due to maxfail/exitfirst
666
+ if (
667
+ session.testsfailed
668
+ and session.config.option.maxfail
669
+ and session.testsfailed >= session.config.option.maxfail
670
+ ):
671
+ return 1
672
+
329
673
  # Run normal tests in-process
330
674
  for idx, item in enumerate(normal_items):
331
675
  nextitem = normal_items[idx + 1] if idx + 1 < len(normal_items) else None
@@ -1,9 +1,12 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pytest-isolated
3
- Version: 0.2.0
3
+ Version: 0.4.0
4
4
  Summary: Run marked pytest tests in grouped subprocesses (cross-platform).
5
5
  Author: pytest-isolated contributors
6
6
  License-Expression: MIT
7
+ Project-URL: Homepage, https://github.com/dyollb/pytest-isolated
8
+ Project-URL: Repository, https://github.com/dyollb/pytest-isolated
9
+ Project-URL: Issues, https://github.com/dyollb/pytest-isolated/issues
7
10
  Classifier: Development Status :: 4 - Beta
8
11
  Classifier: Framework :: Pytest
9
12
  Classifier: Intended Audience :: Developers
@@ -29,7 +32,7 @@ Dynamic: license-file
29
32
  [![Tests](https://github.com/dyollb/pytest-isolated/actions/workflows/test.yml/badge.svg)](https://github.com/dyollb/pytest-isolated/actions/workflows/test.yml)
30
33
  [![PyPI](https://img.shields.io/pypi/v/pytest-isolated.svg)](https://pypi.org/project/pytest-isolated/)
31
34
 
32
- A pytest plugin that runs marked tests in isolated subprocesses with intelligent grouping.
35
+ A cross-platform pytest plugin that runs marked tests in isolated subprocesses with intelligent grouping.
33
36
 
34
37
  ## Features
35
38
 
@@ -41,6 +44,10 @@ A pytest plugin that runs marked tests in isolated subprocesses with intelligent
41
44
  - Configurable timeouts to prevent hanging subprocesses
42
45
  - Cross-platform: Linux, macOS, Windows
43
46
 
47
+ ## Cheatsheet for pytest-forked users
48
+
49
+ This plugin is inspired by [pytest-forked](https://github.com/pytest-dev/pytest-forked). See [pytest-forked migration guide](docs/pytest-forked-migration.md) for a quick reference comparing features.
50
+
44
51
  ## Installation
45
52
 
46
53
  ```bash
@@ -63,6 +70,7 @@ def test_isolated():
63
70
  Tests with the same group run together in one subprocess:
64
71
 
65
72
  ```python
73
+ # Using keyword argument
66
74
  @pytest.mark.isolated(group="mygroup")
67
75
  def test_one():
68
76
  shared_state.append(1)
@@ -71,6 +79,11 @@ def test_one():
71
79
  def test_two():
72
80
  # Sees state from test_one
73
81
  assert len(shared_state) == 2
82
+
83
+ # Or using positional argument
84
+ @pytest.mark.isolated("mygroup")
85
+ def test_three():
86
+ shared_state.append(3)
74
87
  ```
75
88
 
76
89
  Set timeout per test group:
@@ -82,13 +95,48 @@ def test_with_timeout():
82
95
  expensive_operation()
83
96
  ```
84
97
 
85
- Tests without an explicit group are automatically grouped by module.
98
+ **Note:** Tests without an explicit `group` parameter each run in their own unique subprocess for maximum isolation.
99
+
100
+ ### Class and Module Markers
101
+
102
+ Apply to entire classes to share state between methods:
103
+
104
+ ```python
105
+ @pytest.mark.isolated
106
+ class TestDatabase:
107
+ def test_setup(self):
108
+ self.db = create_database()
109
+
110
+ def test_query(self):
111
+ # Shares state with test_setup
112
+ result = self.db.query("SELECT 1")
113
+ assert result
114
+ ```
115
+
116
+ Apply to entire modules using `pytestmark`:
117
+
118
+ ```python
119
+ import pytest
120
+
121
+ pytestmark = pytest.mark.isolated
122
+
123
+ def test_one():
124
+ # Runs in isolated subprocess
125
+ pass
126
+
127
+ def test_two():
128
+ # Shares subprocess with test_one
129
+ pass
130
+ ```
86
131
 
87
132
  ## Configuration
88
133
 
89
134
  ### Command Line
90
135
 
91
136
  ```bash
137
+ # Run all tests in isolation (even without @pytest.mark.isolated)
138
+ pytest --isolated
139
+
92
140
  # Set isolated test timeout (seconds)
93
141
  pytest --isolated-timeout=60
94
142
 
@@ -0,0 +1,9 @@
1
+ pytest_isolated/__init__.py,sha256=sBrN76YMZ6pbqIZzb-Yz5SyDPluz0TCQcNJJ3kYSSc0,89
2
+ pytest_isolated/plugin.py,sha256=33KIXSKlaC6Usq8ZrJmy2fjSTyVYj7q85777y7jWAGU,25698
3
+ pytest_isolated/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ pytest_isolated-0.4.0.dist-info/licenses/LICENSE,sha256=WECJyowi685PZSnKcA4Tqs7jukfzbnk7iMPLnm_q4JI,1067
5
+ pytest_isolated-0.4.0.dist-info/METADATA,sha256=wQ548E9j84pUfBNIYzDtbdcAh0CaGLXCiq7XGkXFI28,6679
6
+ pytest_isolated-0.4.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
7
+ pytest_isolated-0.4.0.dist-info/entry_points.txt,sha256=HgRNPjIGoPBF1pkhma4UtaSwhpOVB8oZRZ0L1FcZXgk,45
8
+ pytest_isolated-0.4.0.dist-info/top_level.txt,sha256=FAtpozhvI-YaiFoZMepi9JAm6e87mW-TM1Ovu5xLOxg,16
9
+ pytest_isolated-0.4.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.9.0)
2
+ Generator: setuptools (80.10.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,9 +0,0 @@
1
- pytest_isolated/__init__.py,sha256=PN_IEdfxCUz6vL1vf0Ka9CGmCq9ppFk33fVGirSVtMc,89
2
- pytest_isolated/plugin.py,sha256=0AKRTWmdLaiwZdwRicRd3TMLrIeisGBMlFqCDOnJRX0,12206
3
- pytest_isolated/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- pytest_isolated-0.2.0.dist-info/licenses/LICENSE,sha256=WECJyowi685PZSnKcA4Tqs7jukfzbnk7iMPLnm_q4JI,1067
5
- pytest_isolated-0.2.0.dist-info/METADATA,sha256=q_E02kvtbnt1QgQc-ATrghLcJka7Y_2Zx50tmatVGCM,5384
6
- pytest_isolated-0.2.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
7
- pytest_isolated-0.2.0.dist-info/entry_points.txt,sha256=HgRNPjIGoPBF1pkhma4UtaSwhpOVB8oZRZ0L1FcZXgk,45
8
- pytest_isolated-0.2.0.dist-info/top_level.txt,sha256=FAtpozhvI-YaiFoZMepi9JAm6e87mW-TM1Ovu5xLOxg,16
9
- pytest_isolated-0.2.0.dist-info/RECORD,,