pytest-isolated 0.4.0__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pytest_isolated/plugin.py CHANGED
@@ -1,678 +1,19 @@
1
- from __future__ import annotations
2
-
3
- import contextlib
4
- import json
5
- import os
6
- import subprocess
7
- import sys
8
- import tempfile
9
- import time
10
- from collections import OrderedDict
11
- from pathlib import Path
12
- from typing import Any, Final, Literal, TypedDict, cast
13
-
14
- import pytest
15
-
16
- # Guard to prevent infinite recursion (parent spawns child; child must not spawn again)
17
- SUBPROC_ENV: Final = "PYTEST_RUNNING_IN_SUBPROCESS"
18
-
19
- # Parent tells child where to write JSONL records per test call
20
- SUBPROC_REPORT_PATH: Final = "PYTEST_SUBPROCESS_REPORT_PATH"
21
-
22
- # Options that should be forwarded to subprocess (flags without values)
23
- _FORWARD_FLAGS: Final = {
24
- "-v",
25
- "--verbose",
26
- "-q",
27
- "--quiet",
28
- "-s", # disable output capturing
29
- "-l",
30
- "--showlocals",
31
- "--strict-markers",
32
- "--strict-config",
33
- "-x", # exit on first failure
34
- "--exitfirst",
35
- }
36
-
37
- # Options that should be forwarded to subprocess (options with values)
38
- _FORWARD_OPTIONS_WITH_VALUE: Final = {
39
- "--tb", # traceback style
40
- "-r", # show extra test summary info
41
- "--capture", # capture method (fd/sys/no/tee-sys)
42
- }
43
-
44
-
45
- def _has_isolated_marker(obj: Any) -> bool:
46
- """Check if an object has the isolated marker in its pytestmark."""
47
- markers = getattr(obj, "pytestmark", [])
48
- if not isinstance(markers, list):
49
- markers = [markers]
50
- return any(getattr(m, "name", None) == "isolated" for m in markers)
51
-
52
-
53
- # ---------------------------------------------------------------------------
54
- # Cross-platform crash detection helpers
55
- # ---------------------------------------------------------------------------
56
-
57
-
58
- def _format_crash_reason(returncode: int) -> str:
59
- """Format a human-readable crash reason from a return code.
60
-
61
- On Unix, negative return codes indicate signal numbers.
62
- On Windows, we report the exit code directly.
63
- """
64
- if returncode < 0:
65
- # Unix: negative return code is -signal_number
66
- return f"crashed with signal {-returncode}"
67
- # Windows or other: positive exit code
68
- return f"crashed with exit code {returncode}"
69
-
70
-
71
- def _format_crash_message(
72
- returncode: int,
73
- context: str,
74
- stderr_text: str = "",
75
- ) -> str:
76
- """Build a complete crash error message with optional stderr output.
77
-
78
- Args:
79
- returncode: The subprocess return code.
80
- context: Description of when the crash occurred (e.g., "during test execution").
81
- stderr_text: Optional captured stderr from the subprocess.
82
-
83
- Returns:
84
- A formatted error message suitable for test failure reports.
85
- """
86
- reason = _format_crash_reason(returncode)
87
- msg = f"Subprocess {reason} {context}."
88
- if stderr_text:
89
- msg += f"\n\nSubprocess stderr:\n{stderr_text}"
90
- return msg
91
-
92
-
93
- class _TestRecord(TypedDict, total=False):
94
- """Structure for test phase results from subprocess."""
95
-
96
- nodeid: str
97
- when: Literal["setup", "call", "teardown"]
98
- outcome: Literal["passed", "failed", "skipped"]
99
- longrepr: str
100
- duration: float
101
- stdout: str
102
- stderr: str
103
- keywords: list[str]
104
- sections: list[tuple[str, str]]
105
- user_properties: list[tuple[str, Any]]
106
- wasxfail: bool
107
-
108
-
109
- def pytest_addoption(parser: pytest.Parser) -> None:
110
- group = parser.getgroup("isolated")
111
- group.addoption(
112
- "--isolated",
113
- action="store_true",
114
- default=False,
115
- help="Run all tests in isolated subprocesses",
116
- )
117
- group.addoption(
118
- "--isolated-timeout",
119
- type=int,
120
- default=None,
121
- help="Timeout in seconds for isolated test groups (default: 300)",
122
- )
123
- group.addoption(
124
- "--no-isolation",
125
- action="store_true",
126
- default=False,
127
- help="Disable subprocess isolation (for debugging)",
128
- )
129
- parser.addini(
130
- "isolated_timeout",
131
- type="string",
132
- default="300",
133
- help="Default timeout in seconds for isolated test groups",
134
- )
135
- parser.addini(
136
- "isolated_capture_passed",
137
- type="bool",
138
- default=False,
139
- help="Capture output for passed tests (default: False)",
140
- )
141
-
142
-
143
- def pytest_configure(config: pytest.Config) -> None:
144
- config.addinivalue_line(
145
- "markers",
146
- "isolated(group=None, timeout=None): run this test in a grouped "
147
- "fresh Python subprocess; tests with the same group run together in "
148
- "one subprocess. timeout (seconds) overrides global --isolated-timeout.",
149
- )
150
-
151
-
152
- # ----------------------------
153
- # CHILD MODE: record results + captured output per test phase
154
- # ----------------------------
155
- def pytest_runtest_logreport(report: pytest.TestReport) -> None:
156
- """Write test phase results to a JSONL file when running in subprocess mode."""
157
- path = os.environ.get(SUBPROC_REPORT_PATH)
158
- if not path:
159
- return
160
-
161
- # Capture ALL phases (setup, call, teardown), not just call
162
- rec: _TestRecord = {
163
- "nodeid": report.nodeid,
164
- "when": report.when, # setup, call, or teardown
165
- "outcome": report.outcome, # passed/failed/skipped
166
- "longrepr": str(report.longrepr) if report.longrepr else "",
167
- "duration": getattr(report, "duration", 0.0),
168
- "stdout": getattr(report, "capstdout", "") or "",
169
- "stderr": getattr(report, "capstderr", "") or "",
170
- # Preserve test metadata for proper reporting
171
- "keywords": list(report.keywords),
172
- "sections": getattr(report, "sections", []), # captured logs, etc.
173
- "user_properties": getattr(report, "user_properties", []),
174
- "wasxfail": hasattr(report, "wasxfail"),
175
- }
176
- with Path(path).open("a", encoding="utf-8") as f:
177
- f.write(json.dumps(rec) + "\n")
178
-
179
-
180
- # ----------------------------
181
- # PARENT MODE: group marked tests
182
- # ----------------------------
183
- def pytest_collection_modifyitems(
184
- config: pytest.Config, items: list[pytest.Item]
185
- ) -> None:
186
- if os.environ.get(SUBPROC_ENV) == "1":
187
- return # child should not do grouping
188
-
189
- # If --no-isolation is set, treat all tests as normal (no subprocess isolation)
190
- if config.getoption("no_isolation", False):
191
- config._subprocess_groups = OrderedDict() # type: ignore[attr-defined]
192
- config._subprocess_normal_items = items # type: ignore[attr-defined]
193
- return
194
-
195
- # If --isolated is set, run all tests in isolation
196
- run_all_isolated = config.getoption("isolated", False)
197
-
198
- groups: OrderedDict[str, list[pytest.Item]] = OrderedDict()
199
- group_timeouts: dict[str, int | None] = {} # Track timeout per group
200
- normal: list[pytest.Item] = []
201
-
202
- for item in items:
203
- m = item.get_closest_marker("isolated")
204
-
205
- # Skip non-isolated tests unless --isolated flag is set
206
- if not m and not run_all_isolated:
207
- normal.append(item)
208
- continue
209
-
210
- # Get group from marker (positional arg, keyword arg, or default)
211
- group = None
212
- if m:
213
- # Support @pytest.mark.isolated("groupname") - positional arg
214
- if m.args:
215
- group = m.args[0]
216
- # Support @pytest.mark.isolated(group="groupname") - keyword arg
217
- elif "group" in m.kwargs:
218
- group = m.kwargs["group"]
219
-
220
- # Default grouping logic
221
- if group is None:
222
- # If --isolated flag is used (no explicit marker), use unique nodeid
223
- if not m:
224
- group = item.nodeid
225
- # Check if marker was applied to a class or module
226
- elif isinstance(item, pytest.Function):
227
- if item.cls is not None and _has_isolated_marker(item.cls):
228
- # Group by class name (module::class)
229
- parts = item.nodeid.split("::")
230
- group = "::".join(parts[:2]) if len(parts) >= 3 else item.nodeid
231
- elif _has_isolated_marker(item.module):
232
- # Group by module name (first part of nodeid)
233
- parts = item.nodeid.split("::")
234
- group = parts[0]
235
- else:
236
- # Explicit marker on function uses unique nodeid
237
- group = item.nodeid
238
- else:
239
- # Non-Function items use unique nodeid
240
- group = item.nodeid
241
-
242
- # Store group-specific timeout (first marker wins)
243
- group_key = str(group)
244
- if group_key not in group_timeouts:
245
- timeout = m.kwargs.get("timeout") if m else None
246
- group_timeouts[group_key] = timeout
247
-
248
- groups.setdefault(group_key, []).append(item)
249
-
250
- config._subprocess_groups = groups # type: ignore[attr-defined]
251
- config._subprocess_group_timeouts = group_timeouts # type: ignore[attr-defined]
252
- config._subprocess_normal_items = normal # type: ignore[attr-defined]
253
-
254
-
255
- def _emit_report(
256
- item: pytest.Item,
257
- *,
258
- when: Literal["setup", "call", "teardown"],
259
- outcome: Literal["passed", "failed", "skipped"],
260
- longrepr: str = "",
261
- duration: float = 0.0,
262
- stdout: str = "",
263
- stderr: str = "",
264
- sections: list[tuple[str, str]] | None = None,
265
- user_properties: list[tuple[str, Any]] | None = None,
266
- wasxfail: bool = False,
267
- capture_passed: bool = False,
268
- ) -> None:
269
- """Emit a test report for a specific test phase."""
270
- call = pytest.CallInfo.from_call(lambda: None, when=when)
271
- rep = pytest.TestReport.from_item_and_call(item, call)
272
- rep.outcome = outcome
273
- rep.duration = duration
274
-
275
- if user_properties:
276
- rep.user_properties = user_properties
277
-
278
- if wasxfail:
279
- rep.wasxfail = "reason: xfail"
280
-
281
- # For skipped tests, longrepr needs to be a tuple (path, lineno, reason)
282
- if outcome == "skipped" and longrepr:
283
- # Parse longrepr or create simple tuple
284
- lineno = item.location[1] if item.location[1] is not None else -1
285
- rep.longrepr = (str(item.fspath), lineno, longrepr) # type: ignore[assignment]
286
- elif outcome == "failed" and longrepr:
287
- rep.longrepr = longrepr
288
-
289
- # Add captured output as sections (capstdout/capstderr are read-only)
290
- if outcome == "failed" or (outcome == "passed" and capture_passed):
291
- all_sections = list(sections) if sections else []
292
- if stdout:
293
- all_sections.append(("Captured stdout call", stdout))
294
- if stderr:
295
- all_sections.append(("Captured stderr call", stderr))
296
- if all_sections:
297
- rep.sections = all_sections
298
-
299
- item.ihook.pytest_runtest_logreport(report=rep)
1
+ """pytest-isolated plugin - Run tests in isolated subprocesses.
300
2
 
3
+ This plugin allows running tests in isolated subprocesses to prevent state leakage.
4
+ """
301
5
 
302
- def _emit_failure_for_items(
303
- items: list[pytest.Item],
304
- error_message: str,
305
- session: pytest.Session,
306
- capture_passed: bool = False,
307
- ) -> None:
308
- """Emit synthetic failure reports when subprocess execution fails.
309
-
310
- When a subprocess crashes, times out, or fails during collection, we emit
311
- synthetic test phase reports to mark affected tests as failed. We report
312
- setup="passed" and teardown="passed" (even though these phases never ran)
313
- to ensure pytest categorizes the test as FAILED rather than ERROR. The actual
314
- failure is reported in the call phase with the error message.
315
-
316
- For xfail tests, call is reported as skipped with wasxfail=True to maintain
317
- proper xfail semantics.
318
- """
319
- for it in items:
320
- xfail_marker = it.get_closest_marker("xfail")
321
- _emit_report(it, when="setup", outcome="passed", capture_passed=capture_passed)
322
- if xfail_marker:
323
- _emit_report(
324
- it,
325
- when="call",
326
- outcome="skipped",
327
- longrepr=error_message,
328
- wasxfail=True,
329
- capture_passed=capture_passed,
330
- )
331
- else:
332
- _emit_report(
333
- it,
334
- when="call",
335
- outcome="failed",
336
- longrepr=error_message,
337
- capture_passed=capture_passed,
338
- )
339
- session.testsfailed += 1
340
- _emit_report(
341
- it, when="teardown", outcome="passed", capture_passed=capture_passed
342
- )
343
-
344
-
345
- def pytest_runtestloop(session: pytest.Session) -> int | None:
346
- """Execute isolated test groups in subprocesses and remaining tests in-process.
347
-
348
- Any subprocess timeouts are caught and reported as test failures; the
349
- subprocess.TimeoutExpired exception is not propagated to the caller.
350
- """
351
- if os.environ.get(SUBPROC_ENV) == "1":
352
- return None # child runs the normal loop
353
-
354
- config = session.config
355
- groups = getattr(config, "_subprocess_groups", OrderedDict())
356
- if not isinstance(groups, OrderedDict):
357
- groups = OrderedDict()
358
- group_timeouts: dict[str, int | None] = getattr(
359
- config, "_subprocess_group_timeouts", {}
360
- )
361
- normal_items: list[pytest.Item] = getattr(
362
- config, "_subprocess_normal_items", session.items
363
- )
364
-
365
- # Get default timeout configuration
366
- timeout_opt = config.getoption("isolated_timeout", None)
367
- timeout_ini = config.getini("isolated_timeout")
368
- default_timeout = timeout_opt or (int(timeout_ini) if timeout_ini else 300)
369
-
370
- # Get capture configuration
371
- capture_passed = config.getini("isolated_capture_passed")
372
-
373
- # Run groups
374
- for group_name, group_items in groups.items():
375
- nodeids = [it.nodeid for it in group_items]
376
-
377
- # Get timeout for this group (marker timeout > global timeout)
378
- group_timeout = group_timeouts.get(group_name) or default_timeout
379
-
380
- # file where the child will append JSONL records
381
- with tempfile.NamedTemporaryFile(
382
- prefix="pytest-subproc-", suffix=".jsonl", delete=False
383
- ) as tf:
384
- report_path = tf.name
385
-
386
- env = os.environ.copy()
387
- env[SUBPROC_ENV] = "1"
388
- env[SUBPROC_REPORT_PATH] = report_path
389
-
390
- # Forward relevant pytest options to subprocess for consistency
391
- # Only forward specific options that affect test execution behavior
392
- forwarded_args = []
393
- if hasattr(config, "invocation_params") and hasattr(
394
- config.invocation_params, "args"
395
- ):
396
- skip_next = False
397
-
398
- for arg in config.invocation_params.args:
399
- if skip_next:
400
- skip_next = False
401
- continue
402
-
403
- # Forward only explicitly allowed options
404
- if arg in _FORWARD_FLAGS:
405
- forwarded_args.append(arg)
406
- elif arg in _FORWARD_OPTIONS_WITH_VALUE:
407
- forwarded_args.append(arg)
408
- skip_next = True # Next arg is the value
409
- elif arg.startswith(
410
- tuple(f"{opt}=" for opt in _FORWARD_OPTIONS_WITH_VALUE)
411
- ):
412
- forwarded_args.append(arg)
413
-
414
- # Skip everything else (positional args, test paths, unknown options)
415
-
416
- # Build pytest command for subprocess
417
- cmd = [sys.executable, "-m", "pytest"]
418
- cmd.extend(forwarded_args)
419
-
420
- # Pass rootdir to subprocess to ensure it uses the same project root
421
- if config.rootpath:
422
- cmd.extend(["--rootdir", str(config.rootpath)])
423
-
424
- # Add the test nodeids
425
- cmd.extend(nodeids)
426
-
427
- start_time = time.time()
428
-
429
- # Determine the working directory for the subprocess
430
- # Use rootpath if set, otherwise use invocation directory
431
- # This ensures nodeids (which are relative to rootpath) can be resolved
432
- subprocess_cwd = None
433
- if config.rootpath:
434
- subprocess_cwd = str(config.rootpath)
435
- elif hasattr(config, "invocation_params") and hasattr(
436
- config.invocation_params, "dir"
437
- ):
438
- subprocess_cwd = str(config.invocation_params.dir)
439
-
440
- proc_stderr = b""
441
- try:
442
- proc = subprocess.run(
443
- cmd,
444
- env=env,
445
- timeout=group_timeout,
446
- capture_output=True,
447
- check=False,
448
- cwd=subprocess_cwd,
449
- )
450
- returncode = proc.returncode
451
- proc_stderr = proc.stderr or b""
452
- timed_out = False
453
- except subprocess.TimeoutExpired as exc:
454
- returncode = -1
455
- proc_stderr = exc.stderr or b""
456
- timed_out = True
457
-
458
- execution_time = time.time() - start_time
459
-
460
- # Gather results from JSONL file
461
- results: dict[str, dict[str, _TestRecord]] = {}
462
- report_file = Path(report_path)
463
- if report_file.exists():
464
- with report_file.open(encoding="utf-8") as f:
465
- for line in f:
466
- file_line = line.strip()
467
- if not file_line:
468
- continue
469
- rec = cast(_TestRecord, json.loads(file_line))
470
- nodeid = rec["nodeid"]
471
- when = rec["when"]
472
-
473
- if nodeid not in results:
474
- results[nodeid] = {}
475
- results[nodeid][when] = rec
476
- with contextlib.suppress(OSError):
477
- report_file.unlink()
478
-
479
- # For crashes (negative returncode), check if we should treat as xfail
480
- if returncode < 0 and results:
481
- # Check if all tests in this group are marked xfail
482
- all_xfail = all(it.get_closest_marker("xfail") for it in group_items)
483
- if all_xfail:
484
- # Override any results from subprocess - crash is the expected outcome
485
- msg = (
486
- f"Subprocess crashed with signal {-returncode} "
487
- f"(expected for xfail test)"
488
- )
489
- _emit_failure_for_items(group_items, msg, session, capture_passed)
490
- continue
491
-
492
- # Handle timeout
493
- if timed_out:
494
- msg = (
495
- f"Subprocess group={group_name!r} timed out after {group_timeout} "
496
- f"seconds (execution time: {execution_time:.2f}s). "
497
- f"Increase timeout with --isolated-timeout, isolated_timeout ini, "
498
- f"or @pytest.mark.isolated(timeout=N)."
499
- )
500
- _emit_failure_for_items(group_items, msg, session, capture_passed)
501
- continue
502
-
503
- # Handle crash during collection (no results produced)
504
- if not results:
505
- stderr_text = proc_stderr.decode("utf-8", errors="replace").strip()
506
- msg = (
507
- f"Subprocess group={group_name!r} exited with code {returncode} "
508
- f"and produced no per-test report. The subprocess may have "
509
- f"crashed during collection."
510
- )
511
- if stderr_text:
512
- msg += f"\n\nSubprocess stderr:\n{stderr_text}"
513
- _emit_failure_for_items(group_items, msg, session, capture_passed)
514
- continue
515
-
516
- # Handle mid-test crash: detect tests with incomplete phases
517
- # (e.g., setup recorded but call missing indicates crash during test)
518
- crashed_items: list[pytest.Item] = []
519
-
520
- for it in group_items:
521
- node_results = results.get(it.nodeid, {})
522
- # Test started (setup passed) but crashed before call completed.
523
- # If setup was skipped or failed, no call phase is expected.
524
- if node_results and "call" not in node_results:
525
- setup_result = node_results.get("setup", {})
526
- setup_outcome = setup_result.get("outcome", "")
527
- if setup_outcome == "passed":
528
- crashed_items.append(it)
529
-
530
- # If we detected crashed tests, also find tests that never ran
531
- # (they come after the crashing test in the same group)
532
- not_run_items: list[pytest.Item] = []
533
- if crashed_items:
534
- for it in group_items:
535
- node_results = results.get(it.nodeid, {})
536
- # Test never started (no results at all)
537
- if not node_results:
538
- not_run_items.append(it)
539
-
540
- if crashed_items or not_run_items:
541
- stderr_text = proc_stderr.decode("utf-8", errors="replace").strip()
542
-
543
- # Emit failures for crashed tests
544
- if crashed_items:
545
- crash_msg = _format_crash_message(
546
- returncode, "during test execution", stderr_text
547
- )
548
-
549
- for it in crashed_items:
550
- node_results = results.get(it.nodeid, {})
551
- # Emit setup phase if it was recorded
552
- if "setup" in node_results:
553
- rec = node_results["setup"]
554
- _emit_report(
555
- it,
556
- when="setup",
557
- outcome=rec["outcome"],
558
- longrepr=rec.get("longrepr", ""),
559
- duration=rec.get("duration", 0.0),
560
- capture_passed=capture_passed,
561
- )
562
- else:
563
- _emit_report(
564
- it,
565
- when="setup",
566
- outcome="passed",
567
- capture_passed=capture_passed,
568
- )
569
-
570
- # Emit call phase as failed with crash info
571
- xfail_marker = it.get_closest_marker("xfail")
572
- if xfail_marker:
573
- _emit_report(
574
- it,
575
- when="call",
576
- outcome="skipped",
577
- longrepr=crash_msg,
578
- wasxfail=True,
579
- capture_passed=capture_passed,
580
- )
581
- else:
582
- _emit_report(
583
- it,
584
- when="call",
585
- outcome="failed",
586
- longrepr=crash_msg,
587
- capture_passed=capture_passed,
588
- )
589
- session.testsfailed += 1
590
-
591
- _emit_report(
592
- it,
593
- when="teardown",
594
- outcome="passed",
595
- capture_passed=capture_passed,
596
- )
597
- # Remove from results so they're not processed again
598
- results.pop(it.nodeid, None)
599
-
600
- # Emit failures for tests that never ran due to earlier crash
601
- if not_run_items:
602
- not_run_msg = _format_crash_message(
603
- returncode, "during earlier test execution", stderr_text
604
- )
605
- not_run_msg = f"Test did not run - {not_run_msg}"
606
- _emit_failure_for_items(
607
- not_run_items, not_run_msg, session, capture_passed
608
- )
609
- for it in not_run_items:
610
- results.pop(it.nodeid, None)
611
-
612
- # Emit per-test results into parent (all phases)
613
- for it in group_items:
614
- node_results = results.get(it.nodeid, {})
615
-
616
- # Skip tests that were already handled by crash detection above
617
- if it.nodeid not in results:
618
- continue
619
-
620
- # Check if setup passed (to determine if missing call is expected)
621
- setup_passed = (
622
- "setup" in node_results and node_results["setup"]["outcome"] == "passed"
623
- )
624
-
625
- # Emit setup, call, teardown in order
626
- for when in ["setup", "call", "teardown"]: # type: ignore[assignment]
627
- if when not in node_results:
628
- # If missing call phase AND setup passed, emit a failure
629
- # (crash detection above should handle most cases, but this
630
- # is a safety net for unexpected situations)
631
- # If setup failed, missing call is expected (pytest skips call)
632
- if when == "call" and setup_passed:
633
- msg = (
634
- "Missing 'call' phase result"
635
- f" from subprocess for {it.nodeid}"
636
- )
637
- _emit_report(
638
- it,
639
- when="call",
640
- outcome="failed",
641
- longrepr=msg,
642
- capture_passed=capture_passed,
643
- )
644
- session.testsfailed += 1
645
- continue
646
-
647
- rec = node_results[when]
648
- _emit_report(
649
- it,
650
- when=when, # type: ignore[arg-type]
651
- outcome=rec.get("outcome", "failed"), # type: ignore[arg-type]
652
- longrepr=rec.get("longrepr", ""),
653
- duration=rec.get("duration", 0.0),
654
- stdout=rec.get("stdout", ""),
655
- stderr=rec.get("stderr", ""),
656
- capture_passed=capture_passed,
657
- sections=rec.get("sections"),
658
- user_properties=rec.get("user_properties"),
659
- wasxfail=rec.get("wasxfail", False),
660
- )
661
-
662
- if when == "call" and rec["outcome"] == "failed":
663
- session.testsfailed += 1
664
-
665
- # Check if we should exit early due to maxfail/exitfirst
666
- if (
667
- session.testsfailed
668
- and session.config.option.maxfail
669
- and session.testsfailed >= session.config.option.maxfail
670
- ):
671
- return 1
672
-
673
- # Run normal tests in-process
674
- for idx, item in enumerate(normal_items):
675
- nextitem = normal_items[idx + 1] if idx + 1 < len(normal_items) else None
676
- item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
6
+ from __future__ import annotations
677
7
 
678
- return 1 if session.testsfailed else 0
8
+ from .config import pytest_addoption, pytest_configure
9
+ from .execution import pytest_runtestloop
10
+ from .grouping import pytest_collection_modifyitems
11
+ from .reporting import pytest_runtest_logreport
12
+
13
+ __all__: tuple[str, ...] = (
14
+ "pytest_addoption",
15
+ "pytest_collection_modifyitems",
16
+ "pytest_configure",
17
+ "pytest_runtest_logreport",
18
+ "pytest_runtestloop",
19
+ )