pytest-isolated 0.3.0__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,3 +1,3 @@
1
1
  """pytest-isolated: Run pytest tests in isolated subprocesses."""
2
2
 
3
- __version__ = "0.3.0"
3
+ __version__ = "0.4.0"
pytest_isolated/plugin.py CHANGED
@@ -19,24 +19,75 @@ SUBPROC_ENV: Final = "PYTEST_RUNNING_IN_SUBPROCESS"
19
19
  # Parent tells child where to write JSONL records per test call
20
20
  SUBPROC_REPORT_PATH: Final = "PYTEST_SUBPROCESS_REPORT_PATH"
21
21
 
22
- # Arguments to exclude when forwarding options to subprocess
23
- _EXCLUDED_ARG_PREFIXES: Final = (
24
- "--junitxml=",
25
- "--html=",
26
- "--result-log=",
27
- "--collect-only",
28
- "--setup-only",
29
- "--setup-plan",
30
- "-x",
22
+ # Options that should be forwarded to subprocess (flags without values)
23
+ _FORWARD_FLAGS: Final = {
24
+ "-v",
25
+ "--verbose",
26
+ "-q",
27
+ "--quiet",
28
+ "-s", # disable output capturing
29
+ "-l",
30
+ "--showlocals",
31
+ "--strict-markers",
32
+ "--strict-config",
33
+ "-x", # exit on first failure
31
34
  "--exitfirst",
32
- "--maxfail=",
33
- )
35
+ }
34
36
 
35
- # Plugin-specific options that take values and should not be forwarded
36
- _PLUGIN_OPTIONS_WITH_VALUE: Final = ("--isolated-timeout",)
37
+ # Options that should be forwarded to subprocess (options with values)
38
+ _FORWARD_OPTIONS_WITH_VALUE: Final = {
39
+ "--tb", # traceback style
40
+ "-r", # show extra test summary info
41
+ "--capture", # capture method (fd/sys/no/tee-sys)
42
+ }
37
43
 
38
- # Plugin-specific flag options that should not be forwarded
39
- _PLUGIN_FLAGS: Final = ("--no-isolation",)
44
+
45
+ def _has_isolated_marker(obj: Any) -> bool:
46
+ """Check if an object has the isolated marker in its pytestmark."""
47
+ markers = getattr(obj, "pytestmark", [])
48
+ if not isinstance(markers, list):
49
+ markers = [markers]
50
+ return any(getattr(m, "name", None) == "isolated" for m in markers)
51
+
52
+
53
+ # ---------------------------------------------------------------------------
54
+ # Cross-platform crash detection helpers
55
+ # ---------------------------------------------------------------------------
56
+
57
+
58
+ def _format_crash_reason(returncode: int) -> str:
59
+ """Format a human-readable crash reason from a return code.
60
+
61
+ On Unix, negative return codes indicate signal numbers.
62
+ On Windows, we report the exit code directly.
63
+ """
64
+ if returncode < 0:
65
+ # Unix: negative return code is -signal_number
66
+ return f"crashed with signal {-returncode}"
67
+ # Windows or other: positive exit code
68
+ return f"crashed with exit code {returncode}"
69
+
70
+
71
+ def _format_crash_message(
72
+ returncode: int,
73
+ context: str,
74
+ stderr_text: str = "",
75
+ ) -> str:
76
+ """Build a complete crash error message with optional stderr output.
77
+
78
+ Args:
79
+ returncode: The subprocess return code.
80
+ context: Description of when the crash occurred (e.g., "during test execution").
81
+ stderr_text: Optional captured stderr from the subprocess.
82
+
83
+ Returns:
84
+ A formatted error message suitable for test failure reports.
85
+ """
86
+ reason = _format_crash_reason(returncode)
87
+ msg = f"Subprocess {reason} {context}."
88
+ if stderr_text:
89
+ msg += f"\n\nSubprocess stderr:\n{stderr_text}"
90
+ return msg
40
91
 
41
92
 
42
93
  class _TestRecord(TypedDict, total=False):
@@ -57,6 +108,12 @@ class _TestRecord(TypedDict, total=False):
57
108
 
58
109
  def pytest_addoption(parser: pytest.Parser) -> None:
59
110
  group = parser.getgroup("isolated")
111
+ group.addoption(
112
+ "--isolated",
113
+ action="store_true",
114
+ default=False,
115
+ help="Run all tests in isolated subprocesses",
116
+ )
60
117
  group.addoption(
61
118
  "--isolated-timeout",
62
119
  type=int,
@@ -135,25 +192,58 @@ def pytest_collection_modifyitems(
135
192
  config._subprocess_normal_items = items # type: ignore[attr-defined]
136
193
  return
137
194
 
195
+ # If --isolated is set, run all tests in isolation
196
+ run_all_isolated = config.getoption("isolated", False)
197
+
138
198
  groups: OrderedDict[str, list[pytest.Item]] = OrderedDict()
139
199
  group_timeouts: dict[str, int | None] = {} # Track timeout per group
140
200
  normal: list[pytest.Item] = []
141
201
 
142
202
  for item in items:
143
203
  m = item.get_closest_marker("isolated")
144
- if not m:
204
+
205
+ # Skip non-isolated tests unless --isolated flag is set
206
+ if not m and not run_all_isolated:
145
207
  normal.append(item)
146
208
  continue
147
209
 
148
- group = m.kwargs.get("group")
149
- # Default grouping to module path (so you don't accidentally group everything)
210
+ # Get group from marker (positional arg, keyword arg, or default)
211
+ group = None
212
+ if m:
213
+ # Support @pytest.mark.isolated("groupname") - positional arg
214
+ if m.args:
215
+ group = m.args[0]
216
+ # Support @pytest.mark.isolated(group="groupname") - keyword arg
217
+ elif "group" in m.kwargs:
218
+ group = m.kwargs["group"]
219
+
220
+ # Default grouping logic
150
221
  if group is None:
151
- group = item.nodeid.split("::")[0]
222
+ # If --isolated flag is used (no explicit marker), use unique nodeid
223
+ if not m:
224
+ group = item.nodeid
225
+ # Check if marker was applied to a class or module
226
+ elif isinstance(item, pytest.Function):
227
+ if item.cls is not None and _has_isolated_marker(item.cls):
228
+ # Group by class name (module::class)
229
+ parts = item.nodeid.split("::")
230
+ group = "::".join(parts[:2]) if len(parts) >= 3 else item.nodeid
231
+ elif _has_isolated_marker(item.module):
232
+ # Group by module name (first part of nodeid)
233
+ parts = item.nodeid.split("::")
234
+ group = parts[0]
235
+ else:
236
+ # Explicit marker on function uses unique nodeid
237
+ group = item.nodeid
238
+ else:
239
+ # Non-Function items use unique nodeid
240
+ group = item.nodeid
152
241
 
153
242
  # Store group-specific timeout (first marker wins)
154
243
  group_key = str(group)
155
244
  if group_key not in group_timeouts:
156
- group_timeouts[group_key] = m.kwargs.get("timeout")
245
+ timeout = m.kwargs.get("timeout") if m else None
246
+ group_timeouts[group_key] = timeout
157
247
 
158
248
  groups.setdefault(group_key, []).append(item)
159
249
 
@@ -162,6 +252,96 @@ def pytest_collection_modifyitems(
162
252
  config._subprocess_normal_items = normal # type: ignore[attr-defined]
163
253
 
164
254
 
255
+ def _emit_report(
256
+ item: pytest.Item,
257
+ *,
258
+ when: Literal["setup", "call", "teardown"],
259
+ outcome: Literal["passed", "failed", "skipped"],
260
+ longrepr: str = "",
261
+ duration: float = 0.0,
262
+ stdout: str = "",
263
+ stderr: str = "",
264
+ sections: list[tuple[str, str]] | None = None,
265
+ user_properties: list[tuple[str, Any]] | None = None,
266
+ wasxfail: bool = False,
267
+ capture_passed: bool = False,
268
+ ) -> None:
269
+ """Emit a test report for a specific test phase."""
270
+ call = pytest.CallInfo.from_call(lambda: None, when=when)
271
+ rep = pytest.TestReport.from_item_and_call(item, call)
272
+ rep.outcome = outcome
273
+ rep.duration = duration
274
+
275
+ if user_properties:
276
+ rep.user_properties = user_properties
277
+
278
+ if wasxfail:
279
+ rep.wasxfail = "reason: xfail"
280
+
281
+ # For skipped tests, longrepr needs to be a tuple (path, lineno, reason)
282
+ if outcome == "skipped" and longrepr:
283
+ # Parse longrepr or create simple tuple
284
+ lineno = item.location[1] if item.location[1] is not None else -1
285
+ rep.longrepr = (str(item.fspath), lineno, longrepr) # type: ignore[assignment]
286
+ elif outcome == "failed" and longrepr:
287
+ rep.longrepr = longrepr
288
+
289
+ # Add captured output as sections (capstdout/capstderr are read-only)
290
+ if outcome == "failed" or (outcome == "passed" and capture_passed):
291
+ all_sections = list(sections) if sections else []
292
+ if stdout:
293
+ all_sections.append(("Captured stdout call", stdout))
294
+ if stderr:
295
+ all_sections.append(("Captured stderr call", stderr))
296
+ if all_sections:
297
+ rep.sections = all_sections
298
+
299
+ item.ihook.pytest_runtest_logreport(report=rep)
300
+
301
+
302
+ def _emit_failure_for_items(
303
+ items: list[pytest.Item],
304
+ error_message: str,
305
+ session: pytest.Session,
306
+ capture_passed: bool = False,
307
+ ) -> None:
308
+ """Emit synthetic failure reports when subprocess execution fails.
309
+
310
+ When a subprocess crashes, times out, or fails during collection, we emit
311
+ synthetic test phase reports to mark affected tests as failed. We report
312
+ setup="passed" and teardown="passed" (even though these phases never ran)
313
+ to ensure pytest categorizes the test as FAILED rather than ERROR. The actual
314
+ failure is reported in the call phase with the error message.
315
+
316
+ For xfail tests, call is reported as skipped with wasxfail=True to maintain
317
+ proper xfail semantics.
318
+ """
319
+ for it in items:
320
+ xfail_marker = it.get_closest_marker("xfail")
321
+ _emit_report(it, when="setup", outcome="passed", capture_passed=capture_passed)
322
+ if xfail_marker:
323
+ _emit_report(
324
+ it,
325
+ when="call",
326
+ outcome="skipped",
327
+ longrepr=error_message,
328
+ wasxfail=True,
329
+ capture_passed=capture_passed,
330
+ )
331
+ else:
332
+ _emit_report(
333
+ it,
334
+ when="call",
335
+ outcome="failed",
336
+ longrepr=error_message,
337
+ capture_passed=capture_passed,
338
+ )
339
+ session.testsfailed += 1
340
+ _emit_report(
341
+ it, when="teardown", outcome="passed", capture_passed=capture_passed
342
+ )
343
+
344
+
165
345
  def pytest_runtestloop(session: pytest.Session) -> int | None:
166
346
  """Execute isolated test groups in subprocesses and remaining tests in-process.
167
347
 
@@ -190,49 +370,6 @@ def pytest_runtestloop(session: pytest.Session) -> int | None:
190
370
  # Get capture configuration
191
371
  capture_passed = config.getini("isolated_capture_passed")
192
372
 
193
- def emit_report(
194
- item: pytest.Item,
195
- when: Literal["setup", "call", "teardown"],
196
- outcome: Literal["passed", "failed", "skipped"],
197
- longrepr: str = "",
198
- duration: float = 0.0,
199
- stdout: str = "",
200
- stderr: str = "",
201
- sections: list[tuple[str, str]] | None = None,
202
- user_properties: list[tuple[str, Any]] | None = None,
203
- wasxfail: bool = False,
204
- ) -> None:
205
- call = pytest.CallInfo.from_call(lambda: None, when=when)
206
- rep = pytest.TestReport.from_item_and_call(item, call)
207
- rep.outcome = outcome
208
- rep.duration = duration
209
-
210
- if user_properties:
211
- rep.user_properties = user_properties
212
-
213
- if wasxfail:
214
- rep.wasxfail = "reason: xfail"
215
-
216
- # For skipped tests, longrepr needs to be a tuple (path, lineno, reason)
217
- if outcome == "skipped" and longrepr:
218
- # Parse longrepr or create simple tuple
219
- lineno = item.location[1] if item.location[1] is not None else -1
220
- rep.longrepr = (str(item.fspath), lineno, longrepr) # type: ignore[assignment]
221
- elif outcome == "failed" and longrepr:
222
- rep.longrepr = longrepr
223
-
224
- # Add captured output as sections (capstdout/capstderr are read-only)
225
- if outcome == "failed" or (outcome == "passed" and capture_passed):
226
- all_sections = list(sections) if sections else []
227
- if stdout:
228
- all_sections.append(("Captured stdout call", stdout))
229
- if stderr:
230
- all_sections.append(("Captured stderr call", stderr))
231
- if all_sections:
232
- rep.sections = all_sections
233
-
234
- item.ihook.pytest_runtest_logreport(report=rep)
235
-
236
373
  # Run groups
237
374
  for group_name, group_items in groups.items():
238
375
  nodeids = [it.nodeid for it in group_items]
@@ -250,17 +387,12 @@ def pytest_runtestloop(session: pytest.Session) -> int | None:
250
387
  env[SUBPROC_ENV] = "1"
251
388
  env[SUBPROC_REPORT_PATH] = report_path
252
389
 
253
- # Run pytest in subprocess with timeout, tracking execution time
254
- # Preserve rootdir and run subprocess from correct directory to ensure
255
- # nodeids can be resolved
256
- cmd = [sys.executable, "-m", "pytest"]
257
-
258
390
  # Forward relevant pytest options to subprocess for consistency
259
- # We filter out options that would interfere with subprocess execution
391
+ # Only forward specific options that affect test execution behavior
392
+ forwarded_args = []
260
393
  if hasattr(config, "invocation_params") and hasattr(
261
394
  config.invocation_params, "args"
262
395
  ):
263
- forwarded_args = []
264
396
  skip_next = False
265
397
 
266
398
  for arg in config.invocation_params.args:
@@ -268,29 +400,24 @@ def pytest_runtestloop(session: pytest.Session) -> int | None:
268
400
  skip_next = False
269
401
  continue
270
402
 
271
- # Skip our own plugin options
272
- if arg in _PLUGIN_OPTIONS_WITH_VALUE:
273
- skip_next = True
274
- continue
275
- if arg in _PLUGIN_FLAGS:
276
- continue
277
-
278
- # Skip output/reporting options that would conflict
279
- if any(arg.startswith(prefix) for prefix in _EXCLUDED_ARG_PREFIXES):
280
- continue
281
- if arg in ("-x", "--exitfirst"):
282
- continue
283
-
284
- # Skip test file paths and nodeids - we provide our own
285
- if not arg.startswith("-") and ("::" in arg or arg.endswith(".py")):
286
- continue
403
+ # Forward only explicitly allowed options
404
+ if arg in _FORWARD_FLAGS:
405
+ forwarded_args.append(arg)
406
+ elif arg in _FORWARD_OPTIONS_WITH_VALUE:
407
+ forwarded_args.append(arg)
408
+ skip_next = True # Next arg is the value
409
+ elif arg.startswith(
410
+ tuple(f"{opt}=" for opt in _FORWARD_OPTIONS_WITH_VALUE)
411
+ ):
412
+ forwarded_args.append(arg)
287
413
 
288
- forwarded_args.append(arg)
414
+ # Skip everything else (positional args, test paths, unknown options)
289
415
 
290
- cmd.extend(forwarded_args)
416
+ # Build pytest command for subprocess
417
+ cmd = [sys.executable, "-m", "pytest"]
418
+ cmd.extend(forwarded_args)
291
419
 
292
420
  # Pass rootdir to subprocess to ensure it uses the same project root
293
- # (config.rootpath is available in pytest 7.0+, which is our minimum version)
294
421
  if config.rootpath:
295
422
  cmd.extend(["--rootdir", str(config.rootpath)])
296
423
 
@@ -310,19 +437,22 @@ def pytest_runtestloop(session: pytest.Session) -> int | None:
310
437
  ):
311
438
  subprocess_cwd = str(config.invocation_params.dir)
312
439
 
440
+ proc_stderr = b""
313
441
  try:
314
442
  proc = subprocess.run(
315
443
  cmd,
316
444
  env=env,
317
445
  timeout=group_timeout,
318
- capture_output=False,
446
+ capture_output=True,
319
447
  check=False,
320
448
  cwd=subprocess_cwd,
321
449
  )
322
450
  returncode = proc.returncode
451
+ proc_stderr = proc.stderr or b""
323
452
  timed_out = False
324
- except subprocess.TimeoutExpired:
453
+ except subprocess.TimeoutExpired as exc:
325
454
  returncode = -1
455
+ proc_stderr = exc.stderr or b""
326
456
  timed_out = True
327
457
 
328
458
  execution_time = time.time() - start_time
@@ -346,7 +476,20 @@ def pytest_runtestloop(session: pytest.Session) -> int | None:
346
476
  with contextlib.suppress(OSError):
347
477
  report_file.unlink()
348
478
 
349
- # Handle timeout or crash
479
+ # For crashes (negative returncode), check if we should treat as xfail
480
+ if returncode < 0 and results:
481
+ # Check if all tests in this group are marked xfail
482
+ all_xfail = all(it.get_closest_marker("xfail") for it in group_items)
483
+ if all_xfail:
484
+ # Override any results from subprocess - crash is the expected outcome
485
+ msg = (
486
+ f"Subprocess crashed with signal {-returncode} "
487
+ f"(expected for xfail test)"
488
+ )
489
+ _emit_failure_for_items(group_items, msg, session, capture_passed)
490
+ continue
491
+
492
+ # Handle timeout
350
493
  if timed_out:
351
494
  msg = (
352
495
  f"Subprocess group={group_name!r} timed out after {group_timeout} "
@@ -354,50 +497,163 @@ def pytest_runtestloop(session: pytest.Session) -> int | None:
354
497
  f"Increase timeout with --isolated-timeout, isolated_timeout ini, "
355
498
  f"or @pytest.mark.isolated(timeout=N)."
356
499
  )
357
- for it in group_items:
358
- emit_report(it, "call", "failed", longrepr=msg)
359
- session.testsfailed += 1
500
+ _emit_failure_for_items(group_items, msg, session, capture_passed)
360
501
  continue
361
502
 
503
+ # Handle crash during collection (no results produced)
362
504
  if not results:
505
+ stderr_text = proc_stderr.decode("utf-8", errors="replace").strip()
363
506
  msg = (
364
507
  f"Subprocess group={group_name!r} exited with code {returncode} "
365
508
  f"and produced no per-test report. The subprocess may have "
366
509
  f"crashed during collection."
367
510
  )
368
- for it in group_items:
369
- emit_report(it, "call", "failed", longrepr=msg)
370
- session.testsfailed += 1
511
+ if stderr_text:
512
+ msg += f"\n\nSubprocess stderr:\n{stderr_text}"
513
+ _emit_failure_for_items(group_items, msg, session, capture_passed)
371
514
  continue
372
515
 
516
+ # Handle mid-test crash: detect tests with incomplete phases
517
+ # (e.g., setup recorded but call missing indicates crash during test)
518
+ crashed_items: list[pytest.Item] = []
519
+
520
+ for it in group_items:
521
+ node_results = results.get(it.nodeid, {})
522
+ # Test started (setup passed) but crashed before call completed.
523
+ # If setup was skipped or failed, no call phase is expected.
524
+ if node_results and "call" not in node_results:
525
+ setup_result = node_results.get("setup", {})
526
+ setup_outcome = setup_result.get("outcome", "")
527
+ if setup_outcome == "passed":
528
+ crashed_items.append(it)
529
+
530
+ # If we detected crashed tests, also find tests that never ran
531
+ # (they come after the crashing test in the same group)
532
+ not_run_items: list[pytest.Item] = []
533
+ if crashed_items:
534
+ for it in group_items:
535
+ node_results = results.get(it.nodeid, {})
536
+ # Test never started (no results at all)
537
+ if not node_results:
538
+ not_run_items.append(it)
539
+
540
+ if crashed_items or not_run_items:
541
+ stderr_text = proc_stderr.decode("utf-8", errors="replace").strip()
542
+
543
+ # Emit failures for crashed tests
544
+ if crashed_items:
545
+ crash_msg = _format_crash_message(
546
+ returncode, "during test execution", stderr_text
547
+ )
548
+
549
+ for it in crashed_items:
550
+ node_results = results.get(it.nodeid, {})
551
+ # Emit setup phase if it was recorded
552
+ if "setup" in node_results:
553
+ rec = node_results["setup"]
554
+ _emit_report(
555
+ it,
556
+ when="setup",
557
+ outcome=rec["outcome"],
558
+ longrepr=rec.get("longrepr", ""),
559
+ duration=rec.get("duration", 0.0),
560
+ capture_passed=capture_passed,
561
+ )
562
+ else:
563
+ _emit_report(
564
+ it,
565
+ when="setup",
566
+ outcome="passed",
567
+ capture_passed=capture_passed,
568
+ )
569
+
570
+ # Emit call phase as failed with crash info
571
+ xfail_marker = it.get_closest_marker("xfail")
572
+ if xfail_marker:
573
+ _emit_report(
574
+ it,
575
+ when="call",
576
+ outcome="skipped",
577
+ longrepr=crash_msg,
578
+ wasxfail=True,
579
+ capture_passed=capture_passed,
580
+ )
581
+ else:
582
+ _emit_report(
583
+ it,
584
+ when="call",
585
+ outcome="failed",
586
+ longrepr=crash_msg,
587
+ capture_passed=capture_passed,
588
+ )
589
+ session.testsfailed += 1
590
+
591
+ _emit_report(
592
+ it,
593
+ when="teardown",
594
+ outcome="passed",
595
+ capture_passed=capture_passed,
596
+ )
597
+ # Remove from results so they're not processed again
598
+ results.pop(it.nodeid, None)
599
+
600
+ # Emit failures for tests that never ran due to earlier crash
601
+ if not_run_items:
602
+ not_run_msg = _format_crash_message(
603
+ returncode, "during earlier test execution", stderr_text
604
+ )
605
+ not_run_msg = f"Test did not run - {not_run_msg}"
606
+ _emit_failure_for_items(
607
+ not_run_items, not_run_msg, session, capture_passed
608
+ )
609
+ for it in not_run_items:
610
+ results.pop(it.nodeid, None)
611
+
373
612
  # Emit per-test results into parent (all phases)
374
613
  for it in group_items:
375
614
  node_results = results.get(it.nodeid, {})
376
615
 
616
+ # Skip tests that were already handled by crash detection above
617
+ if it.nodeid not in results:
618
+ continue
619
+
620
+ # Check if setup passed (to determine if missing call is expected)
621
+ setup_passed = (
622
+ "setup" in node_results and node_results["setup"]["outcome"] == "passed"
623
+ )
624
+
377
625
  # Emit setup, call, teardown in order
378
626
  for when in ["setup", "call", "teardown"]: # type: ignore[assignment]
379
627
  if when not in node_results:
380
- # If missing a phase, synthesize a passing one
381
- if when == "call" and not node_results:
382
- # Test completely missing - mark as failed
383
- emit_report(
628
+ # If missing call phase AND setup passed, emit a failure
629
+ # (crash detection above should handle most cases, but this
630
+ # is a safety net for unexpected situations)
631
+ # If setup failed, missing call is expected (pytest skips call)
632
+ if when == "call" and setup_passed:
633
+ msg = (
634
+ "Missing 'call' phase result"
635
+ f" from subprocess for {it.nodeid}"
636
+ )
637
+ _emit_report(
384
638
  it,
385
- "call",
386
- "failed",
387
- longrepr=f"Missing result from subprocess for {it.nodeid}",
639
+ when="call",
640
+ outcome="failed",
641
+ longrepr=msg,
642
+ capture_passed=capture_passed,
388
643
  )
389
644
  session.testsfailed += 1
390
645
  continue
391
646
 
392
647
  rec = node_results[when]
393
- emit_report(
648
+ _emit_report(
394
649
  it,
395
650
  when=when, # type: ignore[arg-type]
396
- outcome=rec["outcome"],
651
+ outcome=rec.get("outcome", "failed"), # type: ignore[arg-type]
397
652
  longrepr=rec.get("longrepr", ""),
398
653
  duration=rec.get("duration", 0.0),
399
654
  stdout=rec.get("stdout", ""),
400
655
  stderr=rec.get("stderr", ""),
656
+ capture_passed=capture_passed,
401
657
  sections=rec.get("sections"),
402
658
  user_properties=rec.get("user_properties"),
403
659
  wasxfail=rec.get("wasxfail", False),
@@ -406,6 +662,14 @@ def pytest_runtestloop(session: pytest.Session) -> int | None:
406
662
  if when == "call" and rec["outcome"] == "failed":
407
663
  session.testsfailed += 1
408
664
 
665
+ # Check if we should exit early due to maxfail/exitfirst
666
+ if (
667
+ session.testsfailed
668
+ and session.config.option.maxfail
669
+ and session.testsfailed >= session.config.option.maxfail
670
+ ):
671
+ return 1
672
+
409
673
  # Run normal tests in-process
410
674
  for idx, item in enumerate(normal_items):
411
675
  nextitem = normal_items[idx + 1] if idx + 1 < len(normal_items) else None
@@ -1,9 +1,12 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pytest-isolated
3
- Version: 0.3.0
3
+ Version: 0.4.0
4
4
  Summary: Run marked pytest tests in grouped subprocesses (cross-platform).
5
5
  Author: pytest-isolated contributors
6
6
  License-Expression: MIT
7
+ Project-URL: Homepage, https://github.com/dyollb/pytest-isolated
8
+ Project-URL: Repository, https://github.com/dyollb/pytest-isolated
9
+ Project-URL: Issues, https://github.com/dyollb/pytest-isolated/issues
7
10
  Classifier: Development Status :: 4 - Beta
8
11
  Classifier: Framework :: Pytest
9
12
  Classifier: Intended Audience :: Developers
@@ -29,7 +32,7 @@ Dynamic: license-file
29
32
  [![Tests](https://github.com/dyollb/pytest-isolated/actions/workflows/test.yml/badge.svg)](https://github.com/dyollb/pytest-isolated/actions/workflows/test.yml)
30
33
  [![PyPI](https://img.shields.io/pypi/v/pytest-isolated.svg)](https://pypi.org/project/pytest-isolated/)
31
34
 
32
- A pytest plugin that runs marked tests in isolated subprocesses with intelligent grouping.
35
+ A cross-platform pytest plugin that runs marked tests in isolated subprocesses with intelligent grouping.
33
36
 
34
37
  ## Features
35
38
 
@@ -41,6 +44,10 @@ A pytest plugin that runs marked tests in isolated subprocesses with intelligent
41
44
  - Configurable timeouts to prevent hanging subprocesses
42
45
  - Cross-platform: Linux, macOS, Windows
43
46
 
47
+ ## Cheatsheet for pytest-forked users
48
+
49
+ This plugin is inspired by [pytest-forked](https://github.com/pytest-dev/pytest-forked). See [pytest-forked migration guide](docs/pytest-forked-migration.md) for a quick reference comparing features.
50
+
44
51
  ## Installation
45
52
 
46
53
  ```bash
@@ -63,6 +70,7 @@ def test_isolated():
63
70
  Tests with the same group run together in one subprocess:
64
71
 
65
72
  ```python
73
+ # Using keyword argument
66
74
  @pytest.mark.isolated(group="mygroup")
67
75
  def test_one():
68
76
  shared_state.append(1)
@@ -71,6 +79,11 @@ def test_one():
71
79
  def test_two():
72
80
  # Sees state from test_one
73
81
  assert len(shared_state) == 2
82
+
83
+ # Or using positional argument
84
+ @pytest.mark.isolated("mygroup")
85
+ def test_three():
86
+ shared_state.append(3)
74
87
  ```
75
88
 
76
89
  Set timeout per test group:
@@ -82,13 +95,48 @@ def test_with_timeout():
82
95
  expensive_operation()
83
96
  ```
84
97
 
85
- Tests without an explicit group are automatically grouped by module.
98
+ **Note:** Tests without an explicit `group` parameter each run in their own unique subprocess for maximum isolation.
99
+
100
+ ### Class and Module Markers
101
+
102
+ Apply to entire classes to share state between methods:
103
+
104
+ ```python
105
+ @pytest.mark.isolated
106
+ class TestDatabase:
107
+ def test_setup(self):
108
+ self.db = create_database()
109
+
110
+ def test_query(self):
111
+ # Shares state with test_setup
112
+ result = self.db.query("SELECT 1")
113
+ assert result
114
+ ```
115
+
116
+ Apply to entire modules using `pytestmark`:
117
+
118
+ ```python
119
+ import pytest
120
+
121
+ pytestmark = pytest.mark.isolated
122
+
123
+ def test_one():
124
+ # Runs in isolated subprocess
125
+ pass
126
+
127
+ def test_two():
128
+ # Shares subprocess with test_one
129
+ pass
130
+ ```
86
131
 
87
132
  ## Configuration
88
133
 
89
134
  ### Command Line
90
135
 
91
136
  ```bash
137
+ # Run all tests in isolation (even without @pytest.mark.isolated)
138
+ pytest --isolated
139
+
92
140
  # Set isolated test timeout (seconds)
93
141
  pytest --isolated-timeout=60
94
142
 
@@ -0,0 +1,9 @@
1
+ pytest_isolated/__init__.py,sha256=sBrN76YMZ6pbqIZzb-Yz5SyDPluz0TCQcNJJ3kYSSc0,89
2
+ pytest_isolated/plugin.py,sha256=33KIXSKlaC6Usq8ZrJmy2fjSTyVYj7q85777y7jWAGU,25698
3
+ pytest_isolated/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ pytest_isolated-0.4.0.dist-info/licenses/LICENSE,sha256=WECJyowi685PZSnKcA4Tqs7jukfzbnk7iMPLnm_q4JI,1067
5
+ pytest_isolated-0.4.0.dist-info/METADATA,sha256=wQ548E9j84pUfBNIYzDtbdcAh0CaGLXCiq7XGkXFI28,6679
6
+ pytest_isolated-0.4.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
7
+ pytest_isolated-0.4.0.dist-info/entry_points.txt,sha256=HgRNPjIGoPBF1pkhma4UtaSwhpOVB8oZRZ0L1FcZXgk,45
8
+ pytest_isolated-0.4.0.dist-info/top_level.txt,sha256=FAtpozhvI-YaiFoZMepi9JAm6e87mW-TM1Ovu5xLOxg,16
9
+ pytest_isolated-0.4.0.dist-info/RECORD,,
@@ -1,9 +0,0 @@
1
- pytest_isolated/__init__.py,sha256=b7RfFW9uJXumJ6DTDdg-VvSUEeh-Pc2srTjCKJRxB7k,89
2
- pytest_isolated/plugin.py,sha256=ISfxTMyJVaddrqCFZOdBXFnC4E_Lh22gRUaNRb1Wo8I,15179
3
- pytest_isolated/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- pytest_isolated-0.3.0.dist-info/licenses/LICENSE,sha256=WECJyowi685PZSnKcA4Tqs7jukfzbnk7iMPLnm_q4JI,1067
5
- pytest_isolated-0.3.0.dist-info/METADATA,sha256=CDzpvrFqvUFguTk9_Yr32h88GatvKDHCO0d7CsVC5Ug,5384
6
- pytest_isolated-0.3.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
7
- pytest_isolated-0.3.0.dist-info/entry_points.txt,sha256=HgRNPjIGoPBF1pkhma4UtaSwhpOVB8oZRZ0L1FcZXgk,45
8
- pytest_isolated-0.3.0.dist-info/top_level.txt,sha256=FAtpozhvI-YaiFoZMepi9JAm6e87mW-TM1Ovu5xLOxg,16
9
- pytest_isolated-0.3.0.dist-info/RECORD,,