tenzir-test 0.12.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tenzir_test/__init__.py +56 -0
- tenzir_test/_python_runner.py +93 -0
- tenzir_test/checks.py +31 -0
- tenzir_test/cli.py +216 -0
- tenzir_test/config.py +57 -0
- tenzir_test/engine/__init__.py +5 -0
- tenzir_test/engine/operations.py +36 -0
- tenzir_test/engine/registry.py +30 -0
- tenzir_test/engine/state.py +43 -0
- tenzir_test/engine/worker.py +8 -0
- tenzir_test/fixtures/__init__.py +614 -0
- tenzir_test/fixtures/node.py +257 -0
- tenzir_test/packages.py +33 -0
- tenzir_test/py.typed +0 -0
- tenzir_test/run.py +3678 -0
- tenzir_test/runners/__init__.py +164 -0
- tenzir_test/runners/_utils.py +17 -0
- tenzir_test/runners/custom_python_fixture_runner.py +171 -0
- tenzir_test/runners/diff_runner.py +139 -0
- tenzir_test/runners/ext_runner.py +28 -0
- tenzir_test/runners/runner.py +38 -0
- tenzir_test/runners/shell_runner.py +158 -0
- tenzir_test/runners/tenzir_runner.py +37 -0
- tenzir_test/runners/tql_runner.py +13 -0
- tenzir_test-0.12.0.dist-info/METADATA +81 -0
- tenzir_test-0.12.0.dist-info/RECORD +29 -0
- tenzir_test-0.12.0.dist-info/WHEEL +4 -0
- tenzir_test-0.12.0.dist-info/entry_points.txt +3 -0
- tenzir_test-0.12.0.dist-info/licenses/LICENSE +190 -0
tenzir_test/run.py
ADDED
|
@@ -0,0 +1,3678 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
from __future__ import annotations
|
|
5
|
+
|
|
6
|
+
import atexit
|
|
7
|
+
import builtins
|
|
8
|
+
import contextlib
|
|
9
|
+
import dataclasses
|
|
10
|
+
import difflib
|
|
11
|
+
import enum
|
|
12
|
+
import importlib.util
|
|
13
|
+
import logging
|
|
14
|
+
import os
|
|
15
|
+
import re
|
|
16
|
+
import shlex
|
|
17
|
+
import shutil
|
|
18
|
+
import signal
|
|
19
|
+
import subprocess
|
|
20
|
+
import sys
|
|
21
|
+
import tempfile
|
|
22
|
+
import threading
|
|
23
|
+
import typing
|
|
24
|
+
from collections.abc import Iterable, Iterator, Mapping, Sequence
|
|
25
|
+
from pathlib import Path
|
|
26
|
+
from types import ModuleType
|
|
27
|
+
from typing import Any, Literal, TypeVar, cast, overload
|
|
28
|
+
|
|
29
|
+
import yaml
|
|
30
|
+
|
|
31
|
+
import tenzir_test.fixtures as fixtures_impl
|
|
32
|
+
from . import packages
|
|
33
|
+
from .config import Settings, discover_settings
|
|
34
|
+
from .runners import (
|
|
35
|
+
ShellRunner, # noqa: F401
|
|
36
|
+
CustomPythonFixture, # noqa: F401
|
|
37
|
+
ExtRunner, # noqa: F401
|
|
38
|
+
Runner, # noqa: F401
|
|
39
|
+
TqlRunner, # noqa: F401
|
|
40
|
+
allowed_extensions,
|
|
41
|
+
get_runner_for_test as runners_get_runner,
|
|
42
|
+
iter_runners as runners_iter_runners,
|
|
43
|
+
runner_names,
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
TestConfig = dict[str, object]
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
@dataclasses.dataclass(frozen=True, slots=True)
|
|
51
|
+
class TestQueueItem:
|
|
52
|
+
runner: Runner
|
|
53
|
+
path: Path
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@dataclasses.dataclass(frozen=True, slots=True)
|
|
57
|
+
class SuiteInfo:
|
|
58
|
+
name: str
|
|
59
|
+
directory: Path
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
@dataclasses.dataclass(slots=True)
|
|
63
|
+
class SuiteQueueItem:
|
|
64
|
+
suite: SuiteInfo
|
|
65
|
+
tests: list[TestQueueItem]
|
|
66
|
+
fixtures: tuple[str, ...]
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
@dataclasses.dataclass(slots=True)
|
|
70
|
+
class SuiteCandidate:
|
|
71
|
+
tests: list[TestQueueItem]
|
|
72
|
+
fixtures: tuple[str, ...] | None = None
|
|
73
|
+
parse_error: bool = False
|
|
74
|
+
fixture_mismatch: bool = False
|
|
75
|
+
mismatch_example: tuple[str, ...] | None = None
|
|
76
|
+
mismatch_path: Path | None = None
|
|
77
|
+
|
|
78
|
+
def record_fixtures(self, fixtures: tuple[str, ...]) -> None:
|
|
79
|
+
if self.fixtures is None:
|
|
80
|
+
self.fixtures = fixtures
|
|
81
|
+
return
|
|
82
|
+
if self.fixtures != fixtures:
|
|
83
|
+
self.fixture_mismatch = True
|
|
84
|
+
self.mismatch_example = fixtures
|
|
85
|
+
if self.tests:
|
|
86
|
+
self.mismatch_path = self.tests[-1].path
|
|
87
|
+
|
|
88
|
+
def mark_parse_error(self) -> None:
|
|
89
|
+
self.parse_error = True
|
|
90
|
+
|
|
91
|
+
def is_valid(self) -> bool:
|
|
92
|
+
return not self.parse_error and not self.fixture_mismatch
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
RunnerQueueItem = TestQueueItem | SuiteQueueItem
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
@dataclasses.dataclass(slots=True)
|
|
99
|
+
class ProjectSelection:
|
|
100
|
+
"""Describe which tests to execute for a given project root."""
|
|
101
|
+
|
|
102
|
+
root: Path
|
|
103
|
+
selectors: list[Path]
|
|
104
|
+
run_all: bool
|
|
105
|
+
kind: Literal["root", "satellite"]
|
|
106
|
+
|
|
107
|
+
def should_run(self) -> bool:
|
|
108
|
+
return self.run_all or bool(self.selectors)
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
@dataclasses.dataclass(slots=True)
|
|
112
|
+
class ExecutionPlan:
|
|
113
|
+
"""Aggregate the projects participating in a CLI invocation."""
|
|
114
|
+
|
|
115
|
+
root: ProjectSelection
|
|
116
|
+
satellites: list[ProjectSelection]
|
|
117
|
+
|
|
118
|
+
def projects(self) -> Iterator[ProjectSelection]:
|
|
119
|
+
yield self.root
|
|
120
|
+
yield from self.satellites
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
T = TypeVar("T")
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
class ExecutionMode(enum.Enum):
|
|
127
|
+
"""Supported discovery modes."""
|
|
128
|
+
|
|
129
|
+
PROJECT = "project"
|
|
130
|
+
PACKAGE = "package"
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
class HarnessMode(enum.Enum):
|
|
134
|
+
"""Internal execution modes for the harness."""
|
|
135
|
+
|
|
136
|
+
COMPARE = "compare"
|
|
137
|
+
UPDATE = "update"
|
|
138
|
+
PASSTHROUGH = "passthrough"
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
class ColorMode(enum.Enum):
|
|
142
|
+
"""Supported color output policies."""
|
|
143
|
+
|
|
144
|
+
AUTO = "auto"
|
|
145
|
+
ALWAYS = "always"
|
|
146
|
+
NEVER = "never"
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def detect_execution_mode(root: Path) -> tuple[ExecutionMode, Path | None]:
|
|
150
|
+
"""Return the execution mode and detected package root for `root`."""
|
|
151
|
+
|
|
152
|
+
if packages.is_package_dir(root):
|
|
153
|
+
return ExecutionMode.PACKAGE, root
|
|
154
|
+
|
|
155
|
+
parent = root.parent if root.name == "tests" else None
|
|
156
|
+
if parent is not None and packages.is_package_dir(parent):
|
|
157
|
+
return ExecutionMode.PACKAGE, parent
|
|
158
|
+
|
|
159
|
+
return ExecutionMode.PROJECT, None
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
_settings: Settings | None = None
|
|
163
|
+
TENZIR_BINARY: str | None = None
|
|
164
|
+
TENZIR_NODE_BINARY: str | None = None
|
|
165
|
+
ROOT: Path = Path.cwd()
|
|
166
|
+
INPUTS_DIR: Path = ROOT / "inputs"
|
|
167
|
+
EXECUTION_MODE: ExecutionMode = ExecutionMode.PROJECT
|
|
168
|
+
_DETECTED_PACKAGE_ROOT: Path | None = None
|
|
169
|
+
HARNESS_MODE = HarnessMode.COMPARE
|
|
170
|
+
_COLOR_MODE = ColorMode.NEVER
|
|
171
|
+
COLORS_ENABLED = False
|
|
172
|
+
CHECKMARK = ""
|
|
173
|
+
CROSS = ""
|
|
174
|
+
INFO = ""
|
|
175
|
+
SKIP = ""
|
|
176
|
+
DEBUG_PREFIX = ""
|
|
177
|
+
BOLD = ""
|
|
178
|
+
CHECK_COLOR = ""
|
|
179
|
+
PASS_MAX_COLOR = ""
|
|
180
|
+
FAIL_COLOR = ""
|
|
181
|
+
SKIP_COLOR = ""
|
|
182
|
+
RESET_COLOR = ""
|
|
183
|
+
DETAIL_COLOR = ""
|
|
184
|
+
DIFF_ADD_COLOR = ""
|
|
185
|
+
PASS_SPECTRUM: list[str] = []
|
|
186
|
+
_COLORED_PASS_SPECTRUM = [
|
|
187
|
+
"\033[38;5;52m", # 0-9% deep red
|
|
188
|
+
"\033[38;5;88m", # 10-19% red
|
|
189
|
+
"\033[38;5;124m", # 20-29% dark orange
|
|
190
|
+
"\033[38;5;166m", # 30-39% orange
|
|
191
|
+
"\033[38;5;202m", # 40-49% amber
|
|
192
|
+
"\033[38;5;214m", # 50-59% golden
|
|
193
|
+
"\033[38;5;184m", # 60-69% yellow-green
|
|
194
|
+
"\033[38;5;148m", # 70-79% spring green
|
|
195
|
+
"\033[38;5;112m", # 80-89% medium green
|
|
196
|
+
"\033[38;5;28m", # 90-99% deep forest green
|
|
197
|
+
"\033[92m", # 100% bright green
|
|
198
|
+
]
|
|
199
|
+
_INTERRUPTED_NOTICE = "└─▶ test interrupted by user"
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
def _colors_available() -> bool:
|
|
203
|
+
if _COLOR_MODE is ColorMode.NEVER:
|
|
204
|
+
return False
|
|
205
|
+
if "NO_COLOR" in os.environ:
|
|
206
|
+
return False
|
|
207
|
+
if _COLOR_MODE is ColorMode.ALWAYS:
|
|
208
|
+
return True
|
|
209
|
+
return True
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
def _apply_color_palette() -> None:
|
|
213
|
+
global COLORS_ENABLED
|
|
214
|
+
global CHECKMARK
|
|
215
|
+
global CROSS
|
|
216
|
+
global INFO
|
|
217
|
+
global SKIP
|
|
218
|
+
global DEBUG_PREFIX
|
|
219
|
+
global BOLD
|
|
220
|
+
global CHECK_COLOR
|
|
221
|
+
global PASS_MAX_COLOR
|
|
222
|
+
global FAIL_COLOR
|
|
223
|
+
global SKIP_COLOR
|
|
224
|
+
global RESET_COLOR
|
|
225
|
+
global DETAIL_COLOR
|
|
226
|
+
global DIFF_ADD_COLOR
|
|
227
|
+
global PASS_SPECTRUM
|
|
228
|
+
global _INTERRUPTED_NOTICE
|
|
229
|
+
|
|
230
|
+
COLORS_ENABLED = _colors_available()
|
|
231
|
+
RESET_COLOR = "\033[0m" if COLORS_ENABLED else ""
|
|
232
|
+
|
|
233
|
+
def _wrap(code: str, text: str) -> str:
|
|
234
|
+
if not code:
|
|
235
|
+
return text
|
|
236
|
+
return f"{code}{text}{RESET_COLOR}"
|
|
237
|
+
|
|
238
|
+
CHECK_COLOR = "\033[92;1m" if COLORS_ENABLED else ""
|
|
239
|
+
PASS_MAX_COLOR = "\033[92m" if COLORS_ENABLED else ""
|
|
240
|
+
FAIL_COLOR = "\033[31m" if COLORS_ENABLED else ""
|
|
241
|
+
SKIP_COLOR = "\033[90;1m" if COLORS_ENABLED else ""
|
|
242
|
+
DETAIL_COLOR = "\033[2;37m" if COLORS_ENABLED else ""
|
|
243
|
+
DIFF_ADD_COLOR = "\033[32m" if COLORS_ENABLED else ""
|
|
244
|
+
BOLD = "\033[1m" if COLORS_ENABLED else ""
|
|
245
|
+
|
|
246
|
+
CHECKMARK = _wrap(CHECK_COLOR, "✔")
|
|
247
|
+
CROSS = _wrap(FAIL_COLOR, "✘")
|
|
248
|
+
INFO = _wrap("\033[94;1m" if COLORS_ENABLED else "", "i")
|
|
249
|
+
SKIP = _wrap(SKIP_COLOR, "●")
|
|
250
|
+
DEBUG_PREFIX = _wrap("\033[95m" if COLORS_ENABLED else "", "◆")
|
|
251
|
+
PASS_SPECTRUM = (
|
|
252
|
+
list(_COLORED_PASS_SPECTRUM) if COLORS_ENABLED else ["" for _ in _COLORED_PASS_SPECTRUM]
|
|
253
|
+
)
|
|
254
|
+
_INTERRUPTED_NOTICE = (
|
|
255
|
+
f"└─▶ {_wrap('\033[33m' if COLORS_ENABLED else '', 'test interrupted by user')}"
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
def refresh_color_palette() -> None:
|
|
260
|
+
"""Re-evaluate ANSI color availability based on environment variables."""
|
|
261
|
+
|
|
262
|
+
_apply_color_palette()
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
def colors_enabled() -> bool:
|
|
266
|
+
return COLORS_ENABLED
|
|
267
|
+
|
|
268
|
+
|
|
269
|
+
def get_color_mode() -> ColorMode:
|
|
270
|
+
return _COLOR_MODE
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
def set_color_mode(mode: ColorMode) -> None:
|
|
274
|
+
global _COLOR_MODE
|
|
275
|
+
if not isinstance(mode, ColorMode):
|
|
276
|
+
raise TypeError("mode must be an instance of ColorMode")
|
|
277
|
+
if _COLOR_MODE is mode:
|
|
278
|
+
return
|
|
279
|
+
_COLOR_MODE = mode
|
|
280
|
+
_apply_color_palette()
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
def colorize(text: str, color: str) -> str:
|
|
284
|
+
"""Wrap `text` with the given ANSI `color` code if colors are enabled."""
|
|
285
|
+
|
|
286
|
+
if not color:
|
|
287
|
+
return text
|
|
288
|
+
return f"{color}{text}{RESET_COLOR}"
|
|
289
|
+
|
|
290
|
+
|
|
291
|
+
def format_failure_message(message: str) -> str:
|
|
292
|
+
"""Render a standardized failure line with optional ANSI coloring."""
|
|
293
|
+
|
|
294
|
+
return f"└─▶ {colorize(message, FAIL_COLOR)}"
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
_apply_color_palette()
|
|
298
|
+
ANSI_ESCAPE = re.compile(r"\x1b\[[0-9;]*m")
|
|
299
|
+
|
|
300
|
+
stdout_lock = threading.RLock()
|
|
301
|
+
|
|
302
|
+
_INTERRUPT_EVENT = threading.Event()
|
|
303
|
+
_INTERRUPT_ANNOUNCED = threading.Event()
|
|
304
|
+
_INTERRUPT_SIGNALS = {signal.SIGINT, signal.SIGTERM}
|
|
305
|
+
|
|
306
|
+
|
|
307
|
+
def interrupt_requested() -> bool:
|
|
308
|
+
"""Return whether a graceful shutdown was requested."""
|
|
309
|
+
|
|
310
|
+
return _INTERRUPT_EVENT.is_set()
|
|
311
|
+
|
|
312
|
+
|
|
313
|
+
def _announce_interrupt() -> None:
|
|
314
|
+
if _INTERRUPT_ANNOUNCED.is_set():
|
|
315
|
+
return
|
|
316
|
+
_INTERRUPT_ANNOUNCED.set()
|
|
317
|
+
with stdout_lock:
|
|
318
|
+
print(
|
|
319
|
+
f"{INFO} received interrupt; finishing active tests (press Ctrl+C again to abort)",
|
|
320
|
+
)
|
|
321
|
+
|
|
322
|
+
|
|
323
|
+
def _request_interrupt() -> None:
|
|
324
|
+
first_interrupt = not _INTERRUPT_EVENT.is_set()
|
|
325
|
+
_INTERRUPT_EVENT.set()
|
|
326
|
+
if first_interrupt or not _INTERRUPT_ANNOUNCED.is_set():
|
|
327
|
+
_announce_interrupt()
|
|
328
|
+
|
|
329
|
+
|
|
330
|
+
@contextlib.contextmanager
|
|
331
|
+
def _install_interrupt_handler() -> Iterator[None]:
|
|
332
|
+
previous = signal.getsignal(signal.SIGINT)
|
|
333
|
+
|
|
334
|
+
def _handle_interrupt(
|
|
335
|
+
signum: int, frame: object | None
|
|
336
|
+
) -> None: # pragma: no cover - signal path
|
|
337
|
+
if interrupt_requested():
|
|
338
|
+
signal.signal(signum, signal.SIG_DFL)
|
|
339
|
+
signal.raise_signal(signum)
|
|
340
|
+
return
|
|
341
|
+
_request_interrupt()
|
|
342
|
+
|
|
343
|
+
signal.signal(signal.SIGINT, _handle_interrupt)
|
|
344
|
+
try:
|
|
345
|
+
yield
|
|
346
|
+
finally:
|
|
347
|
+
signal.signal(signal.SIGINT, previous)
|
|
348
|
+
_INTERRUPT_EVENT.clear()
|
|
349
|
+
_INTERRUPT_ANNOUNCED.clear()
|
|
350
|
+
|
|
351
|
+
|
|
352
|
+
def _is_interrupt_exit(returncode: int) -> bool:
|
|
353
|
+
if returncode < 0:
|
|
354
|
+
return -returncode in _INTERRUPT_SIGNALS
|
|
355
|
+
return returncode in {128 + sig for sig in _INTERRUPT_SIGNALS}
|
|
356
|
+
|
|
357
|
+
|
|
358
|
+
_CURRENT_RETRY_CONTEXT = threading.local()
|
|
359
|
+
_CURRENT_SUITE_CONTEXT = threading.local()
|
|
360
|
+
|
|
361
|
+
|
|
362
|
+
@contextlib.contextmanager
|
|
363
|
+
def _push_suite_context(*, name: str, index: int, total: int) -> Iterator[None]:
|
|
364
|
+
previous = getattr(_CURRENT_SUITE_CONTEXT, "value", None)
|
|
365
|
+
_CURRENT_SUITE_CONTEXT.value = (name, index, total)
|
|
366
|
+
try:
|
|
367
|
+
yield
|
|
368
|
+
finally:
|
|
369
|
+
if previous is None:
|
|
370
|
+
if hasattr(_CURRENT_SUITE_CONTEXT, "value"):
|
|
371
|
+
delattr(_CURRENT_SUITE_CONTEXT, "value")
|
|
372
|
+
else:
|
|
373
|
+
_CURRENT_SUITE_CONTEXT.value = previous
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
def _current_suite_progress() -> tuple[str, int, int] | None:
|
|
377
|
+
value = getattr(_CURRENT_SUITE_CONTEXT, "value", None)
|
|
378
|
+
if (
|
|
379
|
+
isinstance(value, tuple)
|
|
380
|
+
and len(value) == 3
|
|
381
|
+
and isinstance(value[0], str)
|
|
382
|
+
and isinstance(value[1], int)
|
|
383
|
+
and isinstance(value[2], int)
|
|
384
|
+
):
|
|
385
|
+
return value
|
|
386
|
+
return None
|
|
387
|
+
|
|
388
|
+
|
|
389
|
+
@contextlib.contextmanager
|
|
390
|
+
def _push_retry_context(*, attempt: int, max_attempts: int) -> Iterator[None]:
|
|
391
|
+
previous = getattr(_CURRENT_RETRY_CONTEXT, "value", None)
|
|
392
|
+
_CURRENT_RETRY_CONTEXT.value = (attempt, max_attempts)
|
|
393
|
+
try:
|
|
394
|
+
yield
|
|
395
|
+
finally:
|
|
396
|
+
setattr(_CURRENT_RETRY_CONTEXT, "last", (attempt, max_attempts))
|
|
397
|
+
if previous is None:
|
|
398
|
+
if hasattr(_CURRENT_RETRY_CONTEXT, "value"):
|
|
399
|
+
delattr(_CURRENT_RETRY_CONTEXT, "value")
|
|
400
|
+
else:
|
|
401
|
+
_CURRENT_RETRY_CONTEXT.value = previous
|
|
402
|
+
|
|
403
|
+
|
|
404
|
+
def _current_retry_progress() -> tuple[int, int] | None:
|
|
405
|
+
value = getattr(_CURRENT_RETRY_CONTEXT, "value", None)
|
|
406
|
+
if (
|
|
407
|
+
isinstance(value, tuple)
|
|
408
|
+
and len(value) == 2
|
|
409
|
+
and isinstance(value[0], int)
|
|
410
|
+
and isinstance(value[1], int)
|
|
411
|
+
):
|
|
412
|
+
return value
|
|
413
|
+
fallback = getattr(_CURRENT_RETRY_CONTEXT, "last", None)
|
|
414
|
+
if (
|
|
415
|
+
isinstance(fallback, tuple)
|
|
416
|
+
and len(fallback) == 2
|
|
417
|
+
and isinstance(fallback[0], int)
|
|
418
|
+
and isinstance(fallback[1], int)
|
|
419
|
+
):
|
|
420
|
+
return fallback
|
|
421
|
+
return None
|
|
422
|
+
|
|
423
|
+
|
|
424
|
+
def should_suppress_failure_output() -> bool:
|
|
425
|
+
progress = getattr(_CURRENT_RETRY_CONTEXT, "value", None)
|
|
426
|
+
if (
|
|
427
|
+
isinstance(progress, tuple)
|
|
428
|
+
and len(progress) == 2
|
|
429
|
+
and isinstance(progress[0], int)
|
|
430
|
+
and isinstance(progress[1], int)
|
|
431
|
+
):
|
|
432
|
+
attempt, max_attempts = cast(tuple[int, int], progress)
|
|
433
|
+
return attempt < max_attempts
|
|
434
|
+
return False
|
|
435
|
+
|
|
436
|
+
|
|
437
|
+
def _format_attempt_suffix() -> str:
|
|
438
|
+
progress = _current_retry_progress()
|
|
439
|
+
if not progress:
|
|
440
|
+
return ""
|
|
441
|
+
attempt, max_attempts = progress
|
|
442
|
+
if attempt <= 1 or max_attempts <= 1:
|
|
443
|
+
return ""
|
|
444
|
+
detail = f"attempts={attempt}/{max_attempts}"
|
|
445
|
+
return f" {colorize(detail, DETAIL_COLOR)}"
|
|
446
|
+
|
|
447
|
+
|
|
448
|
+
def _format_suite_suffix() -> str:
|
|
449
|
+
progress = _current_suite_progress()
|
|
450
|
+
if not progress:
|
|
451
|
+
return ""
|
|
452
|
+
name, index, total = progress
|
|
453
|
+
if not name or total <= 0:
|
|
454
|
+
return ""
|
|
455
|
+
detail = f"suite={name} ({index}/{total})"
|
|
456
|
+
return f" {colorize(detail, DETAIL_COLOR)}"
|
|
457
|
+
|
|
458
|
+
|
|
459
|
+
TEST_TMP_ENV_VAR = "TENZIR_TMP_DIR"
|
|
460
|
+
_TMP_KEEP_ENV_VAR = "TENZIR_KEEP_TMP_DIRS"
|
|
461
|
+
_TMP_ROOT_NAME = ".tenzir-test"
|
|
462
|
+
_TMP_SUBDIR_NAME = "tmp"
|
|
463
|
+
_TMP_BASE_DIRS: set[Path] = set()
|
|
464
|
+
_ACTIVE_TMP_DIRS: set[Path] = set()
|
|
465
|
+
_TMP_DIR_LOCK = threading.Lock()
|
|
466
|
+
KEEP_TMP_DIRS = bool(os.environ.get(_TMP_KEEP_ENV_VAR))
|
|
467
|
+
|
|
468
|
+
SHOW_DIFF_OUTPUT = True
|
|
469
|
+
SHOW_DIFF_STAT = True
|
|
470
|
+
_BLOCK_INDENT = ""
|
|
471
|
+
_PLUS_SYMBOLS = {1: "□", 10: "▣", 100: "■"}
|
|
472
|
+
_MINUS_SYMBOLS = {1: "□", 10: "▣", 100: "■"}
|
|
473
|
+
|
|
474
|
+
|
|
475
|
+
def set_show_diff_output(enabled: bool) -> None:
|
|
476
|
+
global SHOW_DIFF_OUTPUT
|
|
477
|
+
SHOW_DIFF_OUTPUT = enabled
|
|
478
|
+
|
|
479
|
+
|
|
480
|
+
def should_show_diff_output() -> bool:
|
|
481
|
+
return SHOW_DIFF_OUTPUT
|
|
482
|
+
|
|
483
|
+
|
|
484
|
+
def set_show_diff_stat(enabled: bool) -> None:
|
|
485
|
+
global SHOW_DIFF_STAT
|
|
486
|
+
SHOW_DIFF_STAT = enabled
|
|
487
|
+
|
|
488
|
+
|
|
489
|
+
def should_show_diff_stat() -> bool:
|
|
490
|
+
return SHOW_DIFF_STAT
|
|
491
|
+
|
|
492
|
+
|
|
493
|
+
def set_harness_mode(mode: HarnessMode) -> None:
|
|
494
|
+
"""Set the global harness execution mode."""
|
|
495
|
+
|
|
496
|
+
global HARNESS_MODE
|
|
497
|
+
HARNESS_MODE = mode
|
|
498
|
+
|
|
499
|
+
|
|
500
|
+
def get_harness_mode() -> HarnessMode:
|
|
501
|
+
"""Return the current harness execution mode."""
|
|
502
|
+
|
|
503
|
+
return HARNESS_MODE
|
|
504
|
+
|
|
505
|
+
|
|
506
|
+
def is_passthrough_enabled() -> bool:
|
|
507
|
+
"""Return whether passthrough output is enabled."""
|
|
508
|
+
|
|
509
|
+
return HARNESS_MODE is HarnessMode.PASSTHROUGH
|
|
510
|
+
|
|
511
|
+
|
|
512
|
+
def is_update_mode() -> bool:
|
|
513
|
+
"""Return whether the harness updates reference artifacts."""
|
|
514
|
+
|
|
515
|
+
return HARNESS_MODE is HarnessMode.UPDATE
|
|
516
|
+
|
|
517
|
+
|
|
518
|
+
def set_passthrough_enabled(enabled: bool) -> None:
|
|
519
|
+
"""Backward-compatible helper to toggle passthrough mode."""
|
|
520
|
+
|
|
521
|
+
if enabled:
|
|
522
|
+
set_harness_mode(HarnessMode.PASSTHROUGH)
|
|
523
|
+
elif HARNESS_MODE is HarnessMode.PASSTHROUGH:
|
|
524
|
+
set_harness_mode(HarnessMode.COMPARE)
|
|
525
|
+
|
|
526
|
+
|
|
527
|
+
@overload
|
|
528
|
+
def run_subprocess(
|
|
529
|
+
args: Sequence[str],
|
|
530
|
+
*,
|
|
531
|
+
capture_output: bool,
|
|
532
|
+
check: bool = False,
|
|
533
|
+
text: Literal[False] = False,
|
|
534
|
+
force_capture: bool = False,
|
|
535
|
+
**kwargs: Any,
|
|
536
|
+
) -> subprocess.CompletedProcess[bytes]: ...
|
|
537
|
+
|
|
538
|
+
|
|
539
|
+
@overload
|
|
540
|
+
def run_subprocess(
|
|
541
|
+
args: Sequence[str],
|
|
542
|
+
*,
|
|
543
|
+
capture_output: bool,
|
|
544
|
+
check: bool = False,
|
|
545
|
+
text: Literal[True],
|
|
546
|
+
force_capture: bool = False,
|
|
547
|
+
**kwargs: Any,
|
|
548
|
+
) -> subprocess.CompletedProcess[str]: ...
|
|
549
|
+
|
|
550
|
+
|
|
551
|
+
def run_subprocess(
|
|
552
|
+
args: Sequence[str],
|
|
553
|
+
*,
|
|
554
|
+
capture_output: bool,
|
|
555
|
+
check: bool = False,
|
|
556
|
+
text: bool = False,
|
|
557
|
+
force_capture: bool = False,
|
|
558
|
+
**kwargs: Any,
|
|
559
|
+
) -> subprocess.CompletedProcess[bytes] | subprocess.CompletedProcess[str]:
|
|
560
|
+
"""Execute a subprocess honoring passthrough configuration.
|
|
561
|
+
|
|
562
|
+
When passthrough is enabled the process inherits stdout/stderr so developers
|
|
563
|
+
can observe output directly. Otherwise the helper captures both streams when
|
|
564
|
+
`capture_output` is true, mirroring ``subprocess.run``'s behaviour.
|
|
565
|
+
|
|
566
|
+
Runner authors should prefer this helper over direct ``subprocess`` calls so
|
|
567
|
+
passthrough semantics remain consistent across implementations.
|
|
568
|
+
"""
|
|
569
|
+
|
|
570
|
+
if any(key in kwargs for key in {"stdout", "stderr", "capture_output"}):
|
|
571
|
+
raise TypeError("run_subprocess manages stdout/stderr automatically")
|
|
572
|
+
|
|
573
|
+
passthrough = is_passthrough_enabled()
|
|
574
|
+
stream_output = passthrough and not force_capture
|
|
575
|
+
stdout = subprocess.PIPE if capture_output and not stream_output else None
|
|
576
|
+
stderr = subprocess.PIPE if capture_output and not stream_output else None
|
|
577
|
+
|
|
578
|
+
if _CLI_LOGGER.isEnabledFor(logging.DEBUG):
|
|
579
|
+
cmd_display = shlex.join(str(arg) for arg in args)
|
|
580
|
+
cwd_value = kwargs.get("cwd")
|
|
581
|
+
if cwd_value:
|
|
582
|
+
cwd_segment = f" (cwd={cwd_value if isinstance(cwd_value, str) else str(cwd_value)})"
|
|
583
|
+
else:
|
|
584
|
+
cwd_segment = ""
|
|
585
|
+
_CLI_LOGGER.debug("exec %s%s", cmd_display, cwd_segment)
|
|
586
|
+
|
|
587
|
+
return subprocess.run(
|
|
588
|
+
args,
|
|
589
|
+
check=check,
|
|
590
|
+
stdout=stdout,
|
|
591
|
+
stderr=stderr,
|
|
592
|
+
text=text,
|
|
593
|
+
**kwargs,
|
|
594
|
+
)
|
|
595
|
+
|
|
596
|
+
|
|
597
|
+
def _resolve_tmp_base() -> Path:
|
|
598
|
+
preferred = ROOT / _TMP_ROOT_NAME / _TMP_SUBDIR_NAME
|
|
599
|
+
try:
|
|
600
|
+
preferred.mkdir(parents=True, exist_ok=True)
|
|
601
|
+
except OSError:
|
|
602
|
+
base = Path(tempfile.gettempdir()) / _TMP_ROOT_NAME.strip(".") / _TMP_SUBDIR_NAME
|
|
603
|
+
base.mkdir(parents=True, exist_ok=True)
|
|
604
|
+
else:
|
|
605
|
+
base = preferred
|
|
606
|
+
_TMP_BASE_DIRS.add(base)
|
|
607
|
+
return base
|
|
608
|
+
|
|
609
|
+
|
|
610
|
+
def _tmp_prefix_for(test: Path) -> str:
|
|
611
|
+
try:
|
|
612
|
+
relative = test.relative_to(ROOT)
|
|
613
|
+
base = relative.with_suffix("")
|
|
614
|
+
candidate = "-".join(base.parts)
|
|
615
|
+
except ValueError:
|
|
616
|
+
candidate = test.stem
|
|
617
|
+
sanitized = re.sub(r"[^A-Za-z0-9_.-]", "-", candidate)
|
|
618
|
+
sanitized = sanitized.strip("-") or "test"
|
|
619
|
+
return (sanitized[:32] if len(sanitized) > 32 else sanitized) or "test"
|
|
620
|
+
|
|
621
|
+
|
|
622
|
+
def _create_test_tmp_dir(test: Path) -> Path:
|
|
623
|
+
prefix = f"{_tmp_prefix_for(test)}-"
|
|
624
|
+
with _TMP_DIR_LOCK:
|
|
625
|
+
base = _resolve_tmp_base()
|
|
626
|
+
if not base.exists():
|
|
627
|
+
base.mkdir(parents=True, exist_ok=True)
|
|
628
|
+
path = Path(tempfile.mkdtemp(prefix=prefix, dir=str(base)))
|
|
629
|
+
_ACTIVE_TMP_DIRS.add(path)
|
|
630
|
+
return path
|
|
631
|
+
|
|
632
|
+
|
|
633
|
+
def set_keep_tmp_dirs(enabled: bool) -> None:
|
|
634
|
+
global KEEP_TMP_DIRS
|
|
635
|
+
KEEP_TMP_DIRS = enabled
|
|
636
|
+
|
|
637
|
+
|
|
638
|
+
def cleanup_test_tmp_dir(path: str | os.PathLike[str] | None) -> None:
|
|
639
|
+
if not path:
|
|
640
|
+
return
|
|
641
|
+
tmp_path = Path(path)
|
|
642
|
+
with _TMP_DIR_LOCK:
|
|
643
|
+
_ACTIVE_TMP_DIRS.discard(tmp_path)
|
|
644
|
+
try:
|
|
645
|
+
fixtures_impl.invoke_tmp_dir_cleanup(tmp_path)
|
|
646
|
+
except Exception: # pragma: no cover - defensive logging
|
|
647
|
+
pass
|
|
648
|
+
if KEEP_TMP_DIRS:
|
|
649
|
+
return
|
|
650
|
+
with _TMP_DIR_LOCK:
|
|
651
|
+
if tmp_path.exists():
|
|
652
|
+
shutil.rmtree(tmp_path, ignore_errors=True)
|
|
653
|
+
_cleanup_tmp_base_dirs()
|
|
654
|
+
|
|
655
|
+
|
|
656
|
+
def _cleanup_remaining_tmp_dirs() -> None:
|
|
657
|
+
with _TMP_DIR_LOCK:
|
|
658
|
+
remaining = list(_ACTIVE_TMP_DIRS)
|
|
659
|
+
for tmp_path in remaining:
|
|
660
|
+
cleanup_test_tmp_dir(tmp_path)
|
|
661
|
+
with _TMP_DIR_LOCK:
|
|
662
|
+
_cleanup_tmp_base_dirs()
|
|
663
|
+
|
|
664
|
+
|
|
665
|
+
def _cleanup_all_tmp_dirs() -> None:
|
|
666
|
+
"""Eagerly remove any temporary directories created by the harness."""
|
|
667
|
+
|
|
668
|
+
if KEEP_TMP_DIRS:
|
|
669
|
+
return
|
|
670
|
+
with _TMP_DIR_LOCK:
|
|
671
|
+
remaining = list(_ACTIVE_TMP_DIRS)
|
|
672
|
+
for tmp_path in remaining:
|
|
673
|
+
cleanup_test_tmp_dir(tmp_path)
|
|
674
|
+
with _TMP_DIR_LOCK:
|
|
675
|
+
_cleanup_tmp_base_dirs()
|
|
676
|
+
|
|
677
|
+
|
|
678
|
+
def _cleanup_tmp_base_dirs() -> None:
|
|
679
|
+
if KEEP_TMP_DIRS:
|
|
680
|
+
return
|
|
681
|
+
for base in tuple(_TMP_BASE_DIRS):
|
|
682
|
+
if not base.exists():
|
|
683
|
+
_TMP_BASE_DIRS.discard(base)
|
|
684
|
+
continue
|
|
685
|
+
for candidate in _ACTIVE_TMP_DIRS:
|
|
686
|
+
try:
|
|
687
|
+
candidate.relative_to(base)
|
|
688
|
+
except ValueError:
|
|
689
|
+
continue
|
|
690
|
+
break
|
|
691
|
+
else:
|
|
692
|
+
try:
|
|
693
|
+
base.rmdir()
|
|
694
|
+
except OSError:
|
|
695
|
+
continue
|
|
696
|
+
_TMP_BASE_DIRS.discard(base)
|
|
697
|
+
parent = base.parent
|
|
698
|
+
try:
|
|
699
|
+
parent.rmdir()
|
|
700
|
+
except OSError:
|
|
701
|
+
pass
|
|
702
|
+
|
|
703
|
+
|
|
704
|
+
atexit.register(_cleanup_remaining_tmp_dirs)
|
|
705
|
+
|
|
706
|
+
_default_debug_logging = bool(os.environ.get("TENZIR_TEST_DEBUG"))
|
|
707
|
+
_debug_logging = _default_debug_logging
|
|
708
|
+
|
|
709
|
+
_runner_names: set[str] = set()
|
|
710
|
+
_allowed_extensions: set[str] = set()
|
|
711
|
+
_DEFAULT_RUNNER_BY_SUFFIX: dict[str, str] = {
|
|
712
|
+
".tql": "tenzir",
|
|
713
|
+
".py": "python",
|
|
714
|
+
".sh": "shell",
|
|
715
|
+
}
|
|
716
|
+
|
|
717
|
+
_CONFIG_FILE_NAME = "test.yaml"
|
|
718
|
+
_CONFIG_LOGGER = logging.getLogger("tenzir_test.config")
|
|
719
|
+
_CONFIG_LOGGER.setLevel(logging.INFO)
|
|
720
|
+
if not _CONFIG_LOGGER.handlers:
|
|
721
|
+
handler = logging.StreamHandler()
|
|
722
|
+
handler.setLevel(logging.INFO)
|
|
723
|
+
handler.setFormatter(logging.Formatter("%(message)s"))
|
|
724
|
+
_CONFIG_LOGGER.addHandler(handler)
|
|
725
|
+
_CONFIG_LOGGER.propagate = False
|
|
726
|
+
|
|
727
|
+
|
|
728
|
+
class _CliDebugHandler(logging.Handler):
|
|
729
|
+
"""Stream debug messages through stdout using the CLI formatting."""
|
|
730
|
+
|
|
731
|
+
def emit(self, record: logging.LogRecord) -> None: # pragma: no cover - thin wrapper
|
|
732
|
+
message = self.format(record)
|
|
733
|
+
with stdout_lock:
|
|
734
|
+
builtins.print(f"{DEBUG_PREFIX} {message}", flush=True)
|
|
735
|
+
|
|
736
|
+
|
|
737
|
+
_CLI_LOGGER = logging.getLogger("tenzir_test.cli")
|
|
738
|
+
if not _CLI_LOGGER.handlers:
|
|
739
|
+
cli_handler = _CliDebugHandler()
|
|
740
|
+
cli_handler.setLevel(logging.DEBUG)
|
|
741
|
+
cli_handler.setFormatter(logging.Formatter("%(message)s"))
|
|
742
|
+
_CLI_LOGGER.addHandler(cli_handler)
|
|
743
|
+
_CLI_LOGGER.propagate = False
|
|
744
|
+
_CLI_LOGGER.setLevel(logging.DEBUG if _debug_logging else logging.WARNING)
|
|
745
|
+
|
|
746
|
+
_DIRECTORY_CONFIG_CACHE: dict[Path, "_DirectoryConfig"] = {}
|
|
747
|
+
|
|
748
|
+
_DISCOVERY_ENABLED = False
|
|
749
|
+
|
|
750
|
+
|
|
751
|
+
def _set_discovery_logging(enabled: bool) -> None:
|
|
752
|
+
global _DISCOVERY_ENABLED
|
|
753
|
+
_DISCOVERY_ENABLED = enabled
|
|
754
|
+
|
|
755
|
+
|
|
756
|
+
def _print_discovery_message(message: str) -> None:
|
|
757
|
+
if _CLI_LOGGER.isEnabledFor(logging.DEBUG):
|
|
758
|
+
_CLI_LOGGER.debug(message)
|
|
759
|
+
else:
|
|
760
|
+
with stdout_lock:
|
|
761
|
+
builtins.print(f"{DEBUG_PREFIX} {message}", flush=True)
|
|
762
|
+
|
|
763
|
+
|
|
764
|
+
class ProjectMarker(enum.Enum):
|
|
765
|
+
"""Sentinel indicators that describe a project root."""
|
|
766
|
+
|
|
767
|
+
PACKAGE_MANIFEST = "package_manifest"
|
|
768
|
+
TESTS_DIRECTORY = "tests_directory"
|
|
769
|
+
TEST_CONFIG = "test_config"
|
|
770
|
+
TEST_SUITE_DIRECTORY = "test_suite_directory"
|
|
771
|
+
|
|
772
|
+
|
|
773
|
+
@dataclasses.dataclass(frozen=True, slots=True)
|
|
774
|
+
class ProjectSignature:
|
|
775
|
+
"""Description of markers that identify a project root."""
|
|
776
|
+
|
|
777
|
+
root: Path
|
|
778
|
+
markers: frozenset[ProjectMarker]
|
|
779
|
+
|
|
780
|
+
@property
|
|
781
|
+
def kind(self) -> Literal["package", "project"]:
|
|
782
|
+
return "package" if ProjectMarker.PACKAGE_MANIFEST in self.markers else "project"
|
|
783
|
+
|
|
784
|
+
def has(self, marker: ProjectMarker) -> bool:
|
|
785
|
+
return marker in self.markers
|
|
786
|
+
|
|
787
|
+
|
|
788
|
+
_PRIMARY_PROJECT_MARKERS = {
|
|
789
|
+
ProjectMarker.PACKAGE_MANIFEST,
|
|
790
|
+
ProjectMarker.TESTS_DIRECTORY,
|
|
791
|
+
ProjectMarker.TEST_CONFIG,
|
|
792
|
+
ProjectMarker.TEST_SUITE_DIRECTORY,
|
|
793
|
+
}
|
|
794
|
+
|
|
795
|
+
|
|
796
|
+
def _describe_project_root(path: Path) -> ProjectSignature | None:
|
|
797
|
+
"""Return a signature describing why a path qualifies as a project root."""
|
|
798
|
+
|
|
799
|
+
try:
|
|
800
|
+
resolved = path.resolve()
|
|
801
|
+
except FileNotFoundError:
|
|
802
|
+
return None
|
|
803
|
+
|
|
804
|
+
if not resolved.exists() or not resolved.is_dir():
|
|
805
|
+
return None
|
|
806
|
+
|
|
807
|
+
markers: set[ProjectMarker] = set()
|
|
808
|
+
|
|
809
|
+
if packages.is_package_dir(resolved):
|
|
810
|
+
markers.add(ProjectMarker.PACKAGE_MANIFEST)
|
|
811
|
+
|
|
812
|
+
tests_dir = resolved / "tests"
|
|
813
|
+
if tests_dir.is_dir():
|
|
814
|
+
markers.add(ProjectMarker.TESTS_DIRECTORY)
|
|
815
|
+
|
|
816
|
+
if (resolved / "test.yaml").is_file():
|
|
817
|
+
markers.add(ProjectMarker.TEST_CONFIG)
|
|
818
|
+
|
|
819
|
+
if resolved.name == "tests" and resolved.is_dir():
|
|
820
|
+
markers.add(ProjectMarker.TEST_SUITE_DIRECTORY)
|
|
821
|
+
|
|
822
|
+
if not markers:
|
|
823
|
+
return None
|
|
824
|
+
|
|
825
|
+
if not markers & _PRIMARY_PROJECT_MARKERS:
|
|
826
|
+
return None
|
|
827
|
+
|
|
828
|
+
return ProjectSignature(root=resolved, markers=frozenset(markers))
|
|
829
|
+
|
|
830
|
+
|
|
831
|
+
def _discover_enclosed_projects(path: Path, *, base_root: Path) -> list[Path]:
|
|
832
|
+
"""Return project roots discovered directly underneath `path`."""
|
|
833
|
+
|
|
834
|
+
try:
|
|
835
|
+
resolved = path.resolve()
|
|
836
|
+
except FileNotFoundError:
|
|
837
|
+
return []
|
|
838
|
+
|
|
839
|
+
if not resolved.exists() or not resolved.is_dir():
|
|
840
|
+
return []
|
|
841
|
+
|
|
842
|
+
candidates: list[Path] = []
|
|
843
|
+
try:
|
|
844
|
+
entries = sorted(resolved.iterdir())
|
|
845
|
+
except OSError:
|
|
846
|
+
return []
|
|
847
|
+
|
|
848
|
+
for entry in entries:
|
|
849
|
+
if not entry.is_dir():
|
|
850
|
+
continue
|
|
851
|
+
project_root = _find_project_root(entry, base_root=base_root)
|
|
852
|
+
if project_root is None:
|
|
853
|
+
continue
|
|
854
|
+
resolved_root = project_root.resolve()
|
|
855
|
+
if resolved_root == base_root:
|
|
856
|
+
continue
|
|
857
|
+
if resolved_root not in candidates:
|
|
858
|
+
candidates.append(resolved_root)
|
|
859
|
+
|
|
860
|
+
return candidates
|
|
861
|
+
|
|
862
|
+
|
|
863
|
+
def _set_project_root(path: Path) -> None:
|
|
864
|
+
"""Switch global project state to `path`."""
|
|
865
|
+
|
|
866
|
+
global ROOT, INPUTS_DIR, EXECUTION_MODE, _DETECTED_PACKAGE_ROOT
|
|
867
|
+
ROOT = path
|
|
868
|
+
INPUTS_DIR = _resolve_inputs_dir(path)
|
|
869
|
+
EXECUTION_MODE, _DETECTED_PACKAGE_ROOT = detect_execution_mode(path)
|
|
870
|
+
_clear_directory_config_cache()
|
|
871
|
+
|
|
872
|
+
|
|
873
|
+
def _is_project_root(path: Path) -> bool:
|
|
874
|
+
"""Return True if the directory looks like a tenzir-test project root."""
|
|
875
|
+
|
|
876
|
+
return _describe_project_root(path) is not None
|
|
877
|
+
|
|
878
|
+
|
|
879
|
+
def _resolve_cli_path(argument: Path, *, base_root: Path) -> Path:
|
|
880
|
+
if argument.is_absolute():
|
|
881
|
+
return argument.resolve()
|
|
882
|
+
|
|
883
|
+
candidates = [Path.cwd() / argument, base_root / argument]
|
|
884
|
+
for candidate in candidates:
|
|
885
|
+
if candidate.exists():
|
|
886
|
+
return candidate.resolve()
|
|
887
|
+
|
|
888
|
+
# Neither candidate exists; prefer base-root resolution for error messages.
|
|
889
|
+
return (base_root / argument).resolve()
|
|
890
|
+
|
|
891
|
+
|
|
892
|
+
def _find_project_root(path: Path, *, base_root: Path) -> Path | None:
|
|
893
|
+
package_root = packages.find_package_root(path)
|
|
894
|
+
if package_root is not None:
|
|
895
|
+
return package_root.resolve()
|
|
896
|
+
|
|
897
|
+
resolved = path.resolve()
|
|
898
|
+
nested_test_dir = resolved / "test"
|
|
899
|
+
if _is_project_root(nested_test_dir):
|
|
900
|
+
resolved = nested_test_dir
|
|
901
|
+
try:
|
|
902
|
+
resolved.relative_to(base_root)
|
|
903
|
+
except ValueError:
|
|
904
|
+
pass
|
|
905
|
+
else:
|
|
906
|
+
if resolved != base_root and _is_project_root(resolved):
|
|
907
|
+
try:
|
|
908
|
+
rel = resolved.relative_to(base_root)
|
|
909
|
+
except ValueError:
|
|
910
|
+
return resolved
|
|
911
|
+
if not rel.parts or rel.parts[0] != "tests":
|
|
912
|
+
return resolved
|
|
913
|
+
return base_root
|
|
914
|
+
|
|
915
|
+
for candidate in [resolved, *resolved.parents]:
|
|
916
|
+
if candidate == base_root:
|
|
917
|
+
return base_root
|
|
918
|
+
if candidate.name == "tests":
|
|
919
|
+
parent = candidate.parent
|
|
920
|
+
if parent == base_root:
|
|
921
|
+
return base_root
|
|
922
|
+
if _is_project_root(parent):
|
|
923
|
+
return parent
|
|
924
|
+
candidate_test_dir = candidate / "test"
|
|
925
|
+
if (
|
|
926
|
+
_is_project_root(candidate_test_dir)
|
|
927
|
+
and candidate_test_dir != base_root
|
|
928
|
+
and candidate_test_dir.is_relative_to(resolved)
|
|
929
|
+
):
|
|
930
|
+
candidate = candidate_test_dir
|
|
931
|
+
if _is_project_root(candidate):
|
|
932
|
+
try:
|
|
933
|
+
rel = candidate.relative_to(base_root)
|
|
934
|
+
except ValueError:
|
|
935
|
+
if base_root.is_relative_to(candidate):
|
|
936
|
+
continue
|
|
937
|
+
return candidate
|
|
938
|
+
if rel.parts and rel.parts[0] == "tests":
|
|
939
|
+
continue
|
|
940
|
+
return candidate
|
|
941
|
+
return None
|
|
942
|
+
|
|
943
|
+
|
|
944
|
+
def _build_execution_plan(
|
|
945
|
+
base_root: Path,
|
|
946
|
+
raw_args: Sequence[Path],
|
|
947
|
+
*,
|
|
948
|
+
root_explicit: bool,
|
|
949
|
+
all_projects: bool = False,
|
|
950
|
+
) -> ExecutionPlan:
|
|
951
|
+
base_root_is_project = _is_project_root(base_root)
|
|
952
|
+
root_selectors: list[Path] = []
|
|
953
|
+
run_root_all = not raw_args
|
|
954
|
+
|
|
955
|
+
satellite_order: list[Path] = []
|
|
956
|
+
satellite_selectors: dict[Path, list[Path]] = {}
|
|
957
|
+
satellite_run_all: dict[Path, bool] = {}
|
|
958
|
+
|
|
959
|
+
for argument in raw_args:
|
|
960
|
+
resolved = _resolve_cli_path(argument, base_root=base_root)
|
|
961
|
+
project_root = _find_project_root(resolved, base_root=base_root)
|
|
962
|
+
|
|
963
|
+
if project_root is None:
|
|
964
|
+
if not resolved.exists():
|
|
965
|
+
raise SystemExit(
|
|
966
|
+
f"error: path `{argument}` does not exist (resolved to {resolved})"
|
|
967
|
+
)
|
|
968
|
+
enclosed_projects = _discover_enclosed_projects(resolved, base_root=base_root)
|
|
969
|
+
if enclosed_projects:
|
|
970
|
+
for nested_root in enclosed_projects:
|
|
971
|
+
selectors = satellite_selectors.setdefault(nested_root, [])
|
|
972
|
+
if nested_root not in satellite_order:
|
|
973
|
+
satellite_order.append(nested_root)
|
|
974
|
+
satellite_run_all[nested_root] = True
|
|
975
|
+
continue
|
|
976
|
+
|
|
977
|
+
try:
|
|
978
|
+
resolved.relative_to(base_root)
|
|
979
|
+
except ValueError:
|
|
980
|
+
if _DISCOVERY_ENABLED:
|
|
981
|
+
_print_discovery_message(
|
|
982
|
+
f"ignoring `{argument}` (resolved to {resolved}) - no tenzir-test project found"
|
|
983
|
+
)
|
|
984
|
+
continue
|
|
985
|
+
|
|
986
|
+
# Default to root project for existing paths inside the main project tree.
|
|
987
|
+
project_root = base_root
|
|
988
|
+
|
|
989
|
+
if project_root == base_root:
|
|
990
|
+
if not base_root_is_project:
|
|
991
|
+
enclosed_projects = _discover_enclosed_projects(resolved, base_root=base_root)
|
|
992
|
+
if enclosed_projects:
|
|
993
|
+
for nested_root in enclosed_projects:
|
|
994
|
+
selectors = satellite_selectors.setdefault(nested_root, [])
|
|
995
|
+
if nested_root not in satellite_order:
|
|
996
|
+
satellite_order.append(nested_root)
|
|
997
|
+
satellite_run_all[nested_root] = True
|
|
998
|
+
continue
|
|
999
|
+
if resolved == base_root:
|
|
1000
|
+
run_root_all = True
|
|
1001
|
+
continue
|
|
1002
|
+
if not resolved.exists():
|
|
1003
|
+
raise SystemExit(
|
|
1004
|
+
f"error: test path `{argument}` does not exist (resolved to {resolved})"
|
|
1005
|
+
)
|
|
1006
|
+
root_selectors.append(resolved)
|
|
1007
|
+
continue
|
|
1008
|
+
|
|
1009
|
+
project_root = project_root.resolve()
|
|
1010
|
+
selectors = satellite_selectors.setdefault(project_root, [])
|
|
1011
|
+
if project_root not in satellite_order:
|
|
1012
|
+
satellite_order.append(project_root)
|
|
1013
|
+
if resolved == project_root:
|
|
1014
|
+
satellite_run_all[project_root] = True
|
|
1015
|
+
continue
|
|
1016
|
+
if not resolved.exists():
|
|
1017
|
+
raise SystemExit(
|
|
1018
|
+
f"error: test path `{argument}` does not exist (resolved to {resolved})"
|
|
1019
|
+
)
|
|
1020
|
+
selectors.append(resolved)
|
|
1021
|
+
|
|
1022
|
+
if all_projects:
|
|
1023
|
+
run_root_all = True
|
|
1024
|
+
elif root_explicit and not raw_args:
|
|
1025
|
+
run_root_all = True
|
|
1026
|
+
|
|
1027
|
+
root_selection = ProjectSelection(
|
|
1028
|
+
root=base_root,
|
|
1029
|
+
selectors=[path.resolve() for path in root_selectors],
|
|
1030
|
+
run_all=run_root_all,
|
|
1031
|
+
kind="root",
|
|
1032
|
+
)
|
|
1033
|
+
|
|
1034
|
+
satellites: list[ProjectSelection] = []
|
|
1035
|
+
for project_root in satellite_order:
|
|
1036
|
+
selectors = [path.resolve() for path in satellite_selectors.get(project_root, [])]
|
|
1037
|
+
run_all = satellite_run_all.get(project_root, False) or not selectors
|
|
1038
|
+
satellites.append(
|
|
1039
|
+
ProjectSelection(
|
|
1040
|
+
root=project_root,
|
|
1041
|
+
selectors=selectors,
|
|
1042
|
+
run_all=run_all,
|
|
1043
|
+
kind="satellite",
|
|
1044
|
+
)
|
|
1045
|
+
)
|
|
1046
|
+
|
|
1047
|
+
return ExecutionPlan(root=root_selection, satellites=satellites)
|
|
1048
|
+
|
|
1049
|
+
|
|
1050
|
+
def _format_relative_path(path: Path, base: Path) -> str:
|
|
1051
|
+
try:
|
|
1052
|
+
relative = path.relative_to(base)
|
|
1053
|
+
except ValueError:
|
|
1054
|
+
try:
|
|
1055
|
+
relative_str = os.path.relpath(path, base)
|
|
1056
|
+
except ValueError:
|
|
1057
|
+
return path.as_posix()
|
|
1058
|
+
if relative_str == ".":
|
|
1059
|
+
return "."
|
|
1060
|
+
return relative_str.replace(os.sep, "/")
|
|
1061
|
+
if not relative.parts:
|
|
1062
|
+
return "."
|
|
1063
|
+
return relative.as_posix()
|
|
1064
|
+
|
|
1065
|
+
|
|
1066
|
+
def _marker_for_selection(selection: ProjectSelection) -> str:
|
|
1067
|
+
if selection.kind == "root":
|
|
1068
|
+
return "■"
|
|
1069
|
+
if packages.is_package_dir(selection.root):
|
|
1070
|
+
return "○"
|
|
1071
|
+
return "□"
|
|
1072
|
+
|
|
1073
|
+
|
|
1074
|
+
def _print_execution_plan(plan: ExecutionPlan, *, display_base: Path) -> int:
|
|
1075
|
+
active: list[tuple[str, ProjectSelection]] = []
|
|
1076
|
+
if plan.root.should_run():
|
|
1077
|
+
active.append((_marker_for_selection(plan.root), plan.root))
|
|
1078
|
+
for satellite in plan.satellites:
|
|
1079
|
+
if satellite.should_run():
|
|
1080
|
+
active.append((_marker_for_selection(satellite), satellite))
|
|
1081
|
+
|
|
1082
|
+
if not active:
|
|
1083
|
+
return 0
|
|
1084
|
+
|
|
1085
|
+
if len(active) == 1:
|
|
1086
|
+
return 0
|
|
1087
|
+
|
|
1088
|
+
print(f"{INFO} found {len(active)} projects")
|
|
1089
|
+
for marker, selection in active:
|
|
1090
|
+
name = selection.root.name or selection.root.as_posix()
|
|
1091
|
+
print(f"{INFO} {marker} {name}")
|
|
1092
|
+
return len(active)
|
|
1093
|
+
|
|
1094
|
+
|
|
1095
|
+
_MISSING = object()
|
|
1096
|
+
|
|
1097
|
+
|
|
1098
|
+
def get_default_jobs() -> int:
|
|
1099
|
+
"""Return the default number of worker threads for the CLI."""
|
|
1100
|
+
|
|
1101
|
+
return 4 * (os.cpu_count() or 16)
|
|
1102
|
+
|
|
1103
|
+
|
|
1104
|
+
@dataclasses.dataclass(slots=True)
|
|
1105
|
+
class _DirectoryConfig:
|
|
1106
|
+
values: TestConfig
|
|
1107
|
+
sources: dict[str, Path]
|
|
1108
|
+
|
|
1109
|
+
|
|
1110
|
+
def _default_test_config() -> TestConfig:
|
|
1111
|
+
return {
|
|
1112
|
+
"error": False,
|
|
1113
|
+
"timeout": 30,
|
|
1114
|
+
"runner": None,
|
|
1115
|
+
"skip": None,
|
|
1116
|
+
"fixtures": tuple(),
|
|
1117
|
+
"inputs": None,
|
|
1118
|
+
"retry": 1,
|
|
1119
|
+
"suite": None,
|
|
1120
|
+
}
|
|
1121
|
+
|
|
1122
|
+
|
|
1123
|
+
def _canonical_config_key(key: str) -> str:
|
|
1124
|
+
if key == "fixture":
|
|
1125
|
+
return "fixtures"
|
|
1126
|
+
return key
|
|
1127
|
+
|
|
1128
|
+
|
|
1129
|
+
ConfigOrigin = Literal["directory", "frontmatter"]
|
|
1130
|
+
|
|
1131
|
+
|
|
1132
|
+
def _raise_config_error(location: Path | str, message: str, line_number: int | None = None) -> None:
|
|
1133
|
+
base = str(location)
|
|
1134
|
+
if line_number is not None:
|
|
1135
|
+
base = f"{base}:{line_number}"
|
|
1136
|
+
raise ValueError(f"Error in {base}: {message}")
|
|
1137
|
+
|
|
1138
|
+
|
|
1139
|
+
def _normalize_fixtures_value(
|
|
1140
|
+
value: typing.Any,
|
|
1141
|
+
*,
|
|
1142
|
+
location: Path | str,
|
|
1143
|
+
line_number: int | None = None,
|
|
1144
|
+
) -> tuple[str, ...]:
|
|
1145
|
+
raw: typing.Any
|
|
1146
|
+
if isinstance(value, list):
|
|
1147
|
+
raw = value
|
|
1148
|
+
elif isinstance(value, str):
|
|
1149
|
+
try:
|
|
1150
|
+
parsed = yaml.safe_load(value)
|
|
1151
|
+
except yaml.YAMLError:
|
|
1152
|
+
parsed = None
|
|
1153
|
+
if isinstance(parsed, list):
|
|
1154
|
+
raw = parsed
|
|
1155
|
+
else:
|
|
1156
|
+
raw = [value]
|
|
1157
|
+
else:
|
|
1158
|
+
_raise_config_error(
|
|
1159
|
+
location,
|
|
1160
|
+
f"Invalid value for 'fixtures', expected string or list, got '{value}'",
|
|
1161
|
+
line_number,
|
|
1162
|
+
)
|
|
1163
|
+
return tuple()
|
|
1164
|
+
|
|
1165
|
+
fixtures: list[str] = []
|
|
1166
|
+
for entry in raw:
|
|
1167
|
+
if not isinstance(entry, str):
|
|
1168
|
+
_raise_config_error(
|
|
1169
|
+
location,
|
|
1170
|
+
f"Invalid fixture entry '{entry}', expected string",
|
|
1171
|
+
line_number,
|
|
1172
|
+
)
|
|
1173
|
+
name = entry.strip()
|
|
1174
|
+
if not name:
|
|
1175
|
+
_raise_config_error(
|
|
1176
|
+
location,
|
|
1177
|
+
"Fixture names must be non-empty strings",
|
|
1178
|
+
line_number,
|
|
1179
|
+
)
|
|
1180
|
+
fixtures.append(name)
|
|
1181
|
+
return tuple(fixtures)
|
|
1182
|
+
|
|
1183
|
+
|
|
1184
|
+
def _extract_location_path(location: Path | str) -> Path:
|
|
1185
|
+
if isinstance(location, Path):
|
|
1186
|
+
return location
|
|
1187
|
+
location_str = str(location)
|
|
1188
|
+
if ":" in location_str:
|
|
1189
|
+
location_str = location_str.split(":", 1)[0]
|
|
1190
|
+
return Path(location_str)
|
|
1191
|
+
|
|
1192
|
+
|
|
1193
|
+
def _normalize_inputs_value(
|
|
1194
|
+
value: typing.Any,
|
|
1195
|
+
*,
|
|
1196
|
+
location: Path | str,
|
|
1197
|
+
line_number: int | None = None,
|
|
1198
|
+
) -> str | None:
|
|
1199
|
+
if value is None:
|
|
1200
|
+
return None
|
|
1201
|
+
if isinstance(value, os.PathLike) or isinstance(value, str):
|
|
1202
|
+
raw = os.fspath(value).strip()
|
|
1203
|
+
if not raw:
|
|
1204
|
+
_raise_config_error(
|
|
1205
|
+
location,
|
|
1206
|
+
"'inputs' value must be a non-empty string",
|
|
1207
|
+
line_number,
|
|
1208
|
+
)
|
|
1209
|
+
base_dir = _extract_location_path(location).parent
|
|
1210
|
+
path = Path(raw)
|
|
1211
|
+
if not path.is_absolute():
|
|
1212
|
+
path = base_dir / path
|
|
1213
|
+
try:
|
|
1214
|
+
normalized = path.resolve()
|
|
1215
|
+
except OSError:
|
|
1216
|
+
normalized = path
|
|
1217
|
+
return str(normalized)
|
|
1218
|
+
|
|
1219
|
+
_raise_config_error(
|
|
1220
|
+
location,
|
|
1221
|
+
f"Invalid value for 'inputs', expected string, got '{value}'",
|
|
1222
|
+
line_number,
|
|
1223
|
+
)
|
|
1224
|
+
return None
|
|
1225
|
+
|
|
1226
|
+
|
|
1227
|
+
def _assign_config_option(
|
|
1228
|
+
config: TestConfig,
|
|
1229
|
+
key: str,
|
|
1230
|
+
value: typing.Any,
|
|
1231
|
+
*,
|
|
1232
|
+
location: Path | str,
|
|
1233
|
+
line_number: int | None = None,
|
|
1234
|
+
origin: ConfigOrigin,
|
|
1235
|
+
) -> None:
|
|
1236
|
+
canonical = _canonical_config_key(key)
|
|
1237
|
+
valid_keys: set[str] = {"error", "timeout", "runner", "skip", "fixtures", "inputs", "retry"}
|
|
1238
|
+
if origin == "directory":
|
|
1239
|
+
valid_keys.add("suite")
|
|
1240
|
+
if canonical not in valid_keys:
|
|
1241
|
+
_raise_config_error(location, f"Unknown configuration key '{key}'", line_number)
|
|
1242
|
+
|
|
1243
|
+
if canonical == "suite":
|
|
1244
|
+
if origin != "directory":
|
|
1245
|
+
_raise_config_error(
|
|
1246
|
+
location,
|
|
1247
|
+
"'suite' can only be specified in directory-level test.yaml files",
|
|
1248
|
+
line_number,
|
|
1249
|
+
)
|
|
1250
|
+
if not isinstance(value, str) or not value.strip():
|
|
1251
|
+
_raise_config_error(
|
|
1252
|
+
location,
|
|
1253
|
+
"'suite' value must be a non-empty string",
|
|
1254
|
+
line_number,
|
|
1255
|
+
)
|
|
1256
|
+
config[canonical] = value.strip()
|
|
1257
|
+
return
|
|
1258
|
+
|
|
1259
|
+
if canonical == "skip":
|
|
1260
|
+
if not isinstance(value, str) or not value.strip():
|
|
1261
|
+
_raise_config_error(
|
|
1262
|
+
location,
|
|
1263
|
+
"'skip' value must be a non-empty string",
|
|
1264
|
+
line_number,
|
|
1265
|
+
)
|
|
1266
|
+
config[canonical] = value
|
|
1267
|
+
return
|
|
1268
|
+
|
|
1269
|
+
if canonical == "error":
|
|
1270
|
+
if isinstance(value, bool):
|
|
1271
|
+
config[canonical] = value
|
|
1272
|
+
return
|
|
1273
|
+
if isinstance(value, str):
|
|
1274
|
+
lowered = value.lower()
|
|
1275
|
+
if lowered in {"true", "false"}:
|
|
1276
|
+
config[canonical] = lowered == "true"
|
|
1277
|
+
return
|
|
1278
|
+
_raise_config_error(
|
|
1279
|
+
location,
|
|
1280
|
+
f"Invalid value for '{canonical}', expected 'true' or 'false', got '{value}'",
|
|
1281
|
+
line_number,
|
|
1282
|
+
)
|
|
1283
|
+
return
|
|
1284
|
+
|
|
1285
|
+
if canonical == "timeout":
|
|
1286
|
+
if isinstance(value, int):
|
|
1287
|
+
timeout_value = value
|
|
1288
|
+
elif isinstance(value, str) and value.strip().isdigit():
|
|
1289
|
+
timeout_value = int(value)
|
|
1290
|
+
else:
|
|
1291
|
+
_raise_config_error(
|
|
1292
|
+
location,
|
|
1293
|
+
f"Invalid value for 'timeout', expected integer, got '{value}'",
|
|
1294
|
+
line_number,
|
|
1295
|
+
)
|
|
1296
|
+
return
|
|
1297
|
+
if timeout_value <= 0:
|
|
1298
|
+
_raise_config_error(
|
|
1299
|
+
location,
|
|
1300
|
+
f"Invalid value for 'timeout', expected positive integer, got '{value}'",
|
|
1301
|
+
line_number,
|
|
1302
|
+
)
|
|
1303
|
+
config[canonical] = timeout_value
|
|
1304
|
+
return
|
|
1305
|
+
|
|
1306
|
+
if canonical == "fixtures":
|
|
1307
|
+
suite_value = config.get("suite")
|
|
1308
|
+
if origin == "frontmatter" and isinstance(suite_value, str) and suite_value.strip():
|
|
1309
|
+
_raise_config_error(
|
|
1310
|
+
location,
|
|
1311
|
+
"'fixtures' cannot be specified in test frontmatter within a suite; configure fixtures in test.yaml",
|
|
1312
|
+
line_number,
|
|
1313
|
+
)
|
|
1314
|
+
config[canonical] = _normalize_fixtures_value(
|
|
1315
|
+
value, location=location, line_number=line_number
|
|
1316
|
+
)
|
|
1317
|
+
return
|
|
1318
|
+
|
|
1319
|
+
if canonical == "inputs":
|
|
1320
|
+
config[canonical] = _normalize_inputs_value(
|
|
1321
|
+
value, location=location, line_number=line_number
|
|
1322
|
+
)
|
|
1323
|
+
return
|
|
1324
|
+
if canonical == "retry":
|
|
1325
|
+
if origin == "frontmatter" and isinstance(config.get("suite"), str):
|
|
1326
|
+
_raise_config_error(
|
|
1327
|
+
location,
|
|
1328
|
+
"'retry' cannot be overridden in test frontmatter within a suite",
|
|
1329
|
+
line_number,
|
|
1330
|
+
)
|
|
1331
|
+
if isinstance(value, int):
|
|
1332
|
+
retry_value = value
|
|
1333
|
+
elif isinstance(value, str) and value.strip().isdigit():
|
|
1334
|
+
retry_value = int(value)
|
|
1335
|
+
else:
|
|
1336
|
+
_raise_config_error(
|
|
1337
|
+
location,
|
|
1338
|
+
f"Invalid value for 'retry', expected integer, got '{value}'",
|
|
1339
|
+
line_number,
|
|
1340
|
+
)
|
|
1341
|
+
return
|
|
1342
|
+
if retry_value <= 0:
|
|
1343
|
+
_raise_config_error(
|
|
1344
|
+
location,
|
|
1345
|
+
f"Invalid value for 'retry', expected positive integer, got '{value}'",
|
|
1346
|
+
line_number,
|
|
1347
|
+
)
|
|
1348
|
+
config[canonical] = retry_value
|
|
1349
|
+
return
|
|
1350
|
+
|
|
1351
|
+
if canonical == "runner":
|
|
1352
|
+
if not isinstance(value, str):
|
|
1353
|
+
_raise_config_error(
|
|
1354
|
+
location,
|
|
1355
|
+
f"Invalid value for 'runner', expected string, got '{value}'",
|
|
1356
|
+
line_number,
|
|
1357
|
+
)
|
|
1358
|
+
runner_names = _runner_names or {runner.name for runner in runners_iter_runners()}
|
|
1359
|
+
if runner_names and value not in runner_names:
|
|
1360
|
+
_CONFIG_LOGGER.info(
|
|
1361
|
+
"Runner '%s' is not registered; proceeding with explicit selection.",
|
|
1362
|
+
value,
|
|
1363
|
+
)
|
|
1364
|
+
config[canonical] = value
|
|
1365
|
+
return
|
|
1366
|
+
|
|
1367
|
+
config[canonical] = value
|
|
1368
|
+
|
|
1369
|
+
|
|
1370
|
+
def _log_directory_override(
|
|
1371
|
+
*,
|
|
1372
|
+
path: Path,
|
|
1373
|
+
key: str,
|
|
1374
|
+
previous: object,
|
|
1375
|
+
new: object,
|
|
1376
|
+
previous_source: Path,
|
|
1377
|
+
) -> None:
|
|
1378
|
+
message = (
|
|
1379
|
+
f"{path} overrides '{key}' from {previous!r} (defined in {previous_source}) to {new!r}"
|
|
1380
|
+
)
|
|
1381
|
+
_CONFIG_LOGGER.info(message)
|
|
1382
|
+
|
|
1383
|
+
|
|
1384
|
+
def _load_directory_config(directory: Path) -> _DirectoryConfig:
|
|
1385
|
+
resolved = directory.resolve()
|
|
1386
|
+
cached = _DIRECTORY_CONFIG_CACHE.get(resolved)
|
|
1387
|
+
if cached is not None:
|
|
1388
|
+
return cached
|
|
1389
|
+
|
|
1390
|
+
try:
|
|
1391
|
+
resolved.relative_to(ROOT)
|
|
1392
|
+
inside_root = True
|
|
1393
|
+
except ValueError:
|
|
1394
|
+
inside_root = False
|
|
1395
|
+
|
|
1396
|
+
sources: dict[str, Path]
|
|
1397
|
+
|
|
1398
|
+
if inside_root and resolved != ROOT:
|
|
1399
|
+
parent_config = _load_directory_config(resolved.parent)
|
|
1400
|
+
values = dict(parent_config.values)
|
|
1401
|
+
sources = dict(parent_config.sources)
|
|
1402
|
+
else:
|
|
1403
|
+
values = _default_test_config()
|
|
1404
|
+
sources = {}
|
|
1405
|
+
|
|
1406
|
+
config_path = resolved / _CONFIG_FILE_NAME
|
|
1407
|
+
if config_path.is_file():
|
|
1408
|
+
with open(config_path, "r", encoding="utf-8") as handle:
|
|
1409
|
+
data = yaml.safe_load(handle) or {}
|
|
1410
|
+
if not isinstance(data, dict):
|
|
1411
|
+
raise ValueError(f"Error in {config_path}: configuration must define a mapping")
|
|
1412
|
+
for raw_key, raw_value in data.items():
|
|
1413
|
+
key = _canonical_config_key(str(raw_key))
|
|
1414
|
+
previous_value = values.get(key, _MISSING)
|
|
1415
|
+
previous_source = sources.get(key)
|
|
1416
|
+
_assign_config_option(
|
|
1417
|
+
values,
|
|
1418
|
+
key,
|
|
1419
|
+
raw_value,
|
|
1420
|
+
location=config_path,
|
|
1421
|
+
origin="directory",
|
|
1422
|
+
)
|
|
1423
|
+
new_value = values.get(key)
|
|
1424
|
+
if (
|
|
1425
|
+
previous_source is not None
|
|
1426
|
+
and previous_value is not _MISSING
|
|
1427
|
+
and new_value != previous_value
|
|
1428
|
+
):
|
|
1429
|
+
_log_directory_override(
|
|
1430
|
+
path=config_path,
|
|
1431
|
+
key=key,
|
|
1432
|
+
previous=previous_value,
|
|
1433
|
+
new=new_value,
|
|
1434
|
+
previous_source=previous_source,
|
|
1435
|
+
)
|
|
1436
|
+
sources[key] = config_path
|
|
1437
|
+
|
|
1438
|
+
directory_config = _DirectoryConfig(values=values, sources=sources)
|
|
1439
|
+
_DIRECTORY_CONFIG_CACHE[resolved] = directory_config
|
|
1440
|
+
return directory_config
|
|
1441
|
+
|
|
1442
|
+
|
|
1443
|
+
def _get_directory_defaults(directory: Path) -> TestConfig:
|
|
1444
|
+
config = _load_directory_config(directory).values
|
|
1445
|
+
return dict(config)
|
|
1446
|
+
|
|
1447
|
+
|
|
1448
|
+
def _resolve_suite_for_test(test: Path) -> SuiteInfo | None:
|
|
1449
|
+
directory_config = _load_directory_config(test.parent)
|
|
1450
|
+
suite_value = directory_config.values.get("suite")
|
|
1451
|
+
if not isinstance(suite_value, str) or not suite_value.strip():
|
|
1452
|
+
return None
|
|
1453
|
+
suite_source = directory_config.sources.get("suite")
|
|
1454
|
+
if suite_source is None:
|
|
1455
|
+
return None
|
|
1456
|
+
suite_dir = suite_source.parent
|
|
1457
|
+
try:
|
|
1458
|
+
resolved_dir = suite_dir.resolve()
|
|
1459
|
+
except OSError:
|
|
1460
|
+
resolved_dir = suite_dir
|
|
1461
|
+
try:
|
|
1462
|
+
test.resolve().relative_to(resolved_dir)
|
|
1463
|
+
except (OSError, ValueError):
|
|
1464
|
+
# Only treat as a suite when the test lives under the suite directory.
|
|
1465
|
+
return None
|
|
1466
|
+
return SuiteInfo(name=suite_value.strip(), directory=resolved_dir)
|
|
1467
|
+
|
|
1468
|
+
|
|
1469
|
+
def _clear_directory_config_cache() -> None:
|
|
1470
|
+
_DIRECTORY_CONFIG_CACHE.clear()
|
|
1471
|
+
|
|
1472
|
+
|
|
1473
|
+
def _iter_project_test_directories(root: Path) -> Iterator[Path]:
|
|
1474
|
+
"""Yield directories that contain tests for the current project."""
|
|
1475
|
+
|
|
1476
|
+
if EXECUTION_MODE is ExecutionMode.PACKAGE:
|
|
1477
|
+
if root.name == "tests" and root.is_dir():
|
|
1478
|
+
yield root
|
|
1479
|
+
return
|
|
1480
|
+
package_root = _DETECTED_PACKAGE_ROOT
|
|
1481
|
+
if package_root is None:
|
|
1482
|
+
return
|
|
1483
|
+
tests_dir = package_root / "tests"
|
|
1484
|
+
if tests_dir.is_dir():
|
|
1485
|
+
yield tests_dir
|
|
1486
|
+
return
|
|
1487
|
+
|
|
1488
|
+
package_dirs = list(packages.iter_package_dirs(root))
|
|
1489
|
+
if package_dirs:
|
|
1490
|
+
for package_dir in package_dirs:
|
|
1491
|
+
tests_dir = package_dir / "tests"
|
|
1492
|
+
if tests_dir.is_dir():
|
|
1493
|
+
yield tests_dir
|
|
1494
|
+
if package_dirs:
|
|
1495
|
+
return
|
|
1496
|
+
|
|
1497
|
+
default_tests = root / "tests"
|
|
1498
|
+
if default_tests.is_dir():
|
|
1499
|
+
yield default_tests
|
|
1500
|
+
return
|
|
1501
|
+
|
|
1502
|
+
for dir_path in root.iterdir():
|
|
1503
|
+
if not dir_path.is_dir() or dir_path.name.startswith("."):
|
|
1504
|
+
continue
|
|
1505
|
+
if dir_path.name in {"fixtures", "runners"}:
|
|
1506
|
+
continue
|
|
1507
|
+
if _is_inputs_path(dir_path):
|
|
1508
|
+
continue
|
|
1509
|
+
yield dir_path
|
|
1510
|
+
|
|
1511
|
+
|
|
1512
|
+
def _is_inputs_path(path: Path) -> bool:
|
|
1513
|
+
"""Return True when the path lives under an inputs directory."""
|
|
1514
|
+
try:
|
|
1515
|
+
parts = path.relative_to(ROOT).parts
|
|
1516
|
+
except ValueError:
|
|
1517
|
+
parts = path.parts
|
|
1518
|
+
|
|
1519
|
+
for index, part in enumerate(parts):
|
|
1520
|
+
if part != "inputs":
|
|
1521
|
+
continue
|
|
1522
|
+
if index == 0:
|
|
1523
|
+
return True
|
|
1524
|
+
if index > 0 and parts[index - 1] == "tests":
|
|
1525
|
+
return True
|
|
1526
|
+
return False
|
|
1527
|
+
|
|
1528
|
+
|
|
1529
|
+
def _refresh_registry() -> None:
|
|
1530
|
+
global _runner_names, _allowed_extensions
|
|
1531
|
+
_runner_names = runner_names()
|
|
1532
|
+
_allowed_extensions = allowed_extensions()
|
|
1533
|
+
|
|
1534
|
+
|
|
1535
|
+
def update_registry_metadata(names: list[str], extensions: list[str]) -> None:
|
|
1536
|
+
global _runner_names, _allowed_extensions
|
|
1537
|
+
_runner_names = set(names)
|
|
1538
|
+
_allowed_extensions = set(extensions)
|
|
1539
|
+
|
|
1540
|
+
|
|
1541
|
+
def get_allowed_extensions() -> set[str]:
|
|
1542
|
+
return set(_allowed_extensions)
|
|
1543
|
+
|
|
1544
|
+
|
|
1545
|
+
def default_runner_for_suffix(suffix: str) -> str | None:
|
|
1546
|
+
return _DEFAULT_RUNNER_BY_SUFFIX.get(suffix)
|
|
1547
|
+
|
|
1548
|
+
|
|
1549
|
+
def _resolve_inputs_dir(root: Path) -> Path:
|
|
1550
|
+
direct = root / "inputs"
|
|
1551
|
+
if direct.exists():
|
|
1552
|
+
return direct
|
|
1553
|
+
tests_inputs = root / "tests" / "inputs"
|
|
1554
|
+
if tests_inputs.exists():
|
|
1555
|
+
return tests_inputs
|
|
1556
|
+
return direct
|
|
1557
|
+
|
|
1558
|
+
|
|
1559
|
+
def _looks_like_project_root(path: Path) -> bool:
|
|
1560
|
+
"""Return True when the path or one of its parents resembles a project root."""
|
|
1561
|
+
|
|
1562
|
+
candidates = [path, *path.parents]
|
|
1563
|
+
for candidate in candidates:
|
|
1564
|
+
if packages.is_package_dir(candidate):
|
|
1565
|
+
return True
|
|
1566
|
+
if candidate.name == "tests" and candidate.is_dir():
|
|
1567
|
+
return True
|
|
1568
|
+
tests_dir = candidate / "tests"
|
|
1569
|
+
if tests_dir.is_dir():
|
|
1570
|
+
return True
|
|
1571
|
+
return False
|
|
1572
|
+
|
|
1573
|
+
|
|
1574
|
+
def ensure_settings() -> Settings:
|
|
1575
|
+
"""Return the active harness settings, discovering defaults on first use."""
|
|
1576
|
+
|
|
1577
|
+
if _settings is None:
|
|
1578
|
+
apply_settings(discover_settings())
|
|
1579
|
+
return cast(Settings, _settings)
|
|
1580
|
+
|
|
1581
|
+
|
|
1582
|
+
def apply_settings(settings: Settings) -> None:
|
|
1583
|
+
global TENZIR_BINARY, TENZIR_NODE_BINARY
|
|
1584
|
+
global _settings
|
|
1585
|
+
_settings = settings
|
|
1586
|
+
TENZIR_BINARY = settings.tenzir_binary
|
|
1587
|
+
TENZIR_NODE_BINARY = settings.tenzir_node_binary
|
|
1588
|
+
_set_project_root(settings.root)
|
|
1589
|
+
|
|
1590
|
+
|
|
1591
|
+
def _import_module_from_path(module_name: str, path: Path, *, package: bool = False) -> ModuleType:
|
|
1592
|
+
if package:
|
|
1593
|
+
search_locations = [str(path.parent)]
|
|
1594
|
+
else:
|
|
1595
|
+
search_locations = None
|
|
1596
|
+
spec = importlib.util.spec_from_file_location(
|
|
1597
|
+
module_name,
|
|
1598
|
+
path,
|
|
1599
|
+
submodule_search_locations=search_locations,
|
|
1600
|
+
)
|
|
1601
|
+
if spec is None or spec.loader is None:
|
|
1602
|
+
raise ImportError(f"could not load fixture module from {path}")
|
|
1603
|
+
module = importlib.util.module_from_spec(spec)
|
|
1604
|
+
sys.modules[module_name] = module
|
|
1605
|
+
spec.loader.exec_module(module)
|
|
1606
|
+
return module
|
|
1607
|
+
|
|
1608
|
+
|
|
1609
|
+
_FIXTURE_LOAD_ROOTS: set[Path] = set()
|
|
1610
|
+
_RUNNER_LOAD_ROOTS: set[Path] = set()
|
|
1611
|
+
|
|
1612
|
+
|
|
1613
|
+
def _load_project_fixtures(root: Path, *, expose_namespace: bool) -> None:
|
|
1614
|
+
resolved_root = root.resolve()
|
|
1615
|
+
if resolved_root in _FIXTURE_LOAD_ROOTS:
|
|
1616
|
+
return
|
|
1617
|
+
|
|
1618
|
+
fixtures_package = root / "fixtures"
|
|
1619
|
+
fixtures_file = root / "fixtures.py"
|
|
1620
|
+
|
|
1621
|
+
try:
|
|
1622
|
+
alias_target = None
|
|
1623
|
+
if fixtures_package.is_dir():
|
|
1624
|
+
init_file = fixtures_package / "__init__.py"
|
|
1625
|
+
if init_file.exists():
|
|
1626
|
+
alias_target = _import_module_from_path(
|
|
1627
|
+
"_tenzir_project_fixtures", init_file, package=True
|
|
1628
|
+
)
|
|
1629
|
+
else:
|
|
1630
|
+
for candidate in sorted(fixtures_package.glob("*.py")):
|
|
1631
|
+
alias_target = _import_module_from_path(
|
|
1632
|
+
f"_tenzir_project_fixture_{candidate.stem}", candidate
|
|
1633
|
+
)
|
|
1634
|
+
elif fixtures_file.exists():
|
|
1635
|
+
alias_target = _import_module_from_path("_tenzir_project_fixtures", fixtures_file)
|
|
1636
|
+
if alias_target is not None and expose_namespace:
|
|
1637
|
+
if "fixtures" not in sys.modules:
|
|
1638
|
+
sys.modules["fixtures"] = alias_target
|
|
1639
|
+
except ValueError as exc: # registration error (e.g., duplicate fixture)
|
|
1640
|
+
raise RuntimeError(f"failed to load fixtures from {resolved_root}: {exc}") from exc
|
|
1641
|
+
except Exception as exc: # pragma: no cover - defensive logging
|
|
1642
|
+
raise RuntimeError(f"failed to load fixtures from {resolved_root}: {exc}") from exc
|
|
1643
|
+
|
|
1644
|
+
_FIXTURE_LOAD_ROOTS.add(resolved_root)
|
|
1645
|
+
|
|
1646
|
+
|
|
1647
|
+
def _load_project_runners(root: Path, *, expose_namespace: bool) -> None:
|
|
1648
|
+
resolved_root = root.resolve()
|
|
1649
|
+
if resolved_root in _RUNNER_LOAD_ROOTS:
|
|
1650
|
+
return
|
|
1651
|
+
|
|
1652
|
+
runners_package = root / "runners"
|
|
1653
|
+
|
|
1654
|
+
alias_target = None
|
|
1655
|
+
|
|
1656
|
+
try:
|
|
1657
|
+
if runners_package.is_dir():
|
|
1658
|
+
init_file = runners_package / "__init__.py"
|
|
1659
|
+
if init_file.exists():
|
|
1660
|
+
alias_target = _import_module_from_path(
|
|
1661
|
+
"_tenzir_project_runners", init_file, package=True
|
|
1662
|
+
)
|
|
1663
|
+
else:
|
|
1664
|
+
for candidate in sorted(runners_package.glob("*.py")):
|
|
1665
|
+
alias_target = _import_module_from_path(
|
|
1666
|
+
f"_tenzir_project_runner_{candidate.stem}", candidate
|
|
1667
|
+
)
|
|
1668
|
+
if alias_target is not None and expose_namespace:
|
|
1669
|
+
if "runners" not in sys.modules:
|
|
1670
|
+
sys.modules["runners"] = alias_target
|
|
1671
|
+
except ValueError as exc:
|
|
1672
|
+
raise RuntimeError(f"failed to load runners from {resolved_root}: {exc}") from exc
|
|
1673
|
+
except Exception as exc: # pragma: no cover - defensive logging
|
|
1674
|
+
raise RuntimeError(f"failed to load runners from {resolved_root}: {exc}") from exc
|
|
1675
|
+
|
|
1676
|
+
_RUNNER_LOAD_ROOTS.add(resolved_root)
|
|
1677
|
+
_refresh_registry()
|
|
1678
|
+
|
|
1679
|
+
|
|
1680
|
+
def get_test_env_and_config_args(
|
|
1681
|
+
test: Path,
|
|
1682
|
+
*,
|
|
1683
|
+
inputs: str | os.PathLike[str] | None = None,
|
|
1684
|
+
) -> tuple[dict[str, str], list[str]]:
|
|
1685
|
+
config_file = test.parent / "tenzir.yaml"
|
|
1686
|
+
node_config_file = test.parent / "tenzir-node.yaml"
|
|
1687
|
+
config_args = [f"--config={config_file}"] if config_file.exists() else []
|
|
1688
|
+
env = os.environ.copy()
|
|
1689
|
+
if inputs is None:
|
|
1690
|
+
inputs_path = str(_resolve_inputs_dir(ROOT).resolve())
|
|
1691
|
+
else:
|
|
1692
|
+
candidate = Path(os.fspath(inputs))
|
|
1693
|
+
if not candidate.is_absolute():
|
|
1694
|
+
candidate = test.parent / candidate
|
|
1695
|
+
inputs_path = str(candidate.resolve())
|
|
1696
|
+
env["TENZIR_INPUTS"] = inputs_path
|
|
1697
|
+
if config_file.exists():
|
|
1698
|
+
env.setdefault("TENZIR_CONFIG", str(config_file))
|
|
1699
|
+
if node_config_file.exists():
|
|
1700
|
+
env["TENZIR_NODE_CONFIG"] = str(node_config_file)
|
|
1701
|
+
if TENZIR_BINARY:
|
|
1702
|
+
env["TENZIR_BINARY"] = TENZIR_BINARY
|
|
1703
|
+
if TENZIR_NODE_BINARY:
|
|
1704
|
+
env["TENZIR_NODE_BINARY"] = TENZIR_NODE_BINARY
|
|
1705
|
+
env["TENZIR_TEST_ROOT"] = str(ROOT)
|
|
1706
|
+
tmp_dir = _create_test_tmp_dir(test)
|
|
1707
|
+
env[TEST_TMP_ENV_VAR] = str(tmp_dir)
|
|
1708
|
+
return env, config_args
|
|
1709
|
+
|
|
1710
|
+
|
|
1711
|
+
def _apply_fixture_env(env: dict[str, str], fixtures: tuple[str, ...]) -> None:
|
|
1712
|
+
if fixtures:
|
|
1713
|
+
env["TENZIR_TEST_FIXTURES"] = ",".join(fixtures)
|
|
1714
|
+
else:
|
|
1715
|
+
env.pop("TENZIR_TEST_FIXTURES", None)
|
|
1716
|
+
|
|
1717
|
+
|
|
1718
|
+
def set_debug_logging(enabled: bool) -> None:
|
|
1719
|
+
global _debug_logging
|
|
1720
|
+
_debug_logging = enabled
|
|
1721
|
+
_CLI_LOGGER.setLevel(logging.DEBUG if enabled else logging.WARNING)
|
|
1722
|
+
|
|
1723
|
+
|
|
1724
|
+
def is_debug_logging_enabled() -> bool:
|
|
1725
|
+
return _debug_logging
|
|
1726
|
+
|
|
1727
|
+
|
|
1728
|
+
def log_comparison(test: Path, ref_path: Path, *, mode: str) -> None:
|
|
1729
|
+
if not _debug_logging or should_suppress_failure_output():
|
|
1730
|
+
return
|
|
1731
|
+
rel_test = _relativize_path(test)
|
|
1732
|
+
rel_ref = _relativize_path(ref_path)
|
|
1733
|
+
_CLI_LOGGER.debug("%s %s -> %s", mode, rel_test, rel_ref)
|
|
1734
|
+
|
|
1735
|
+
|
|
1736
|
+
def report_failure(test: Path, message: str) -> None:
|
|
1737
|
+
if should_suppress_failure_output():
|
|
1738
|
+
return
|
|
1739
|
+
with stdout_lock:
|
|
1740
|
+
fail(test)
|
|
1741
|
+
if message:
|
|
1742
|
+
print(message)
|
|
1743
|
+
|
|
1744
|
+
|
|
1745
|
+
def report_interrupted_test(test: Path) -> None:
|
|
1746
|
+
"""Emit a standardized message for user-triggered interrupts."""
|
|
1747
|
+
|
|
1748
|
+
report_failure(test, _INTERRUPTED_NOTICE)
|
|
1749
|
+
|
|
1750
|
+
|
|
1751
|
+
def parse_test_config(test_file: Path, coverage: bool = False) -> TestConfig:
|
|
1752
|
+
"""Parse test configuration from frontmatter at the beginning of the file."""
|
|
1753
|
+
config = _default_test_config()
|
|
1754
|
+
|
|
1755
|
+
defaults = _get_directory_defaults(test_file.parent)
|
|
1756
|
+
for key, value in defaults.items():
|
|
1757
|
+
config[key] = value
|
|
1758
|
+
|
|
1759
|
+
is_tql = test_file.suffix == ".tql"
|
|
1760
|
+
comment_frontmatter_suffixes = {".py", ".sh"}
|
|
1761
|
+
is_comment_frontmatter = test_file.suffix in comment_frontmatter_suffixes
|
|
1762
|
+
|
|
1763
|
+
def _error(message: str, line_number: int | None = None) -> None:
|
|
1764
|
+
location = f"{test_file}:{line_number}" if line_number is not None else f"{test_file}"
|
|
1765
|
+
raise ValueError(f"Error in {location}: {message}")
|
|
1766
|
+
|
|
1767
|
+
with open(test_file, "r", encoding="utf-8", errors="ignore") as handle:
|
|
1768
|
+
lines = handle.readlines()
|
|
1769
|
+
|
|
1770
|
+
consumed_frontmatter = False
|
|
1771
|
+
if is_tql:
|
|
1772
|
+
idx = 0
|
|
1773
|
+
while idx < len(lines) and not lines[idx].strip():
|
|
1774
|
+
idx += 1
|
|
1775
|
+
if idx < len(lines) and lines[idx].strip() == "---":
|
|
1776
|
+
idx += 1
|
|
1777
|
+
yaml_lines: list[str] = []
|
|
1778
|
+
while idx < len(lines):
|
|
1779
|
+
line = lines[idx]
|
|
1780
|
+
if line.strip() == "---":
|
|
1781
|
+
idx += 1
|
|
1782
|
+
consumed_frontmatter = True
|
|
1783
|
+
break
|
|
1784
|
+
yaml_lines.append(line)
|
|
1785
|
+
idx += 1
|
|
1786
|
+
if not consumed_frontmatter:
|
|
1787
|
+
_error("YAML frontmatter must be terminated with '---'")
|
|
1788
|
+
yaml_data = yaml.safe_load("".join(yaml_lines)) or {}
|
|
1789
|
+
if not isinstance(yaml_data, dict):
|
|
1790
|
+
_error("YAML frontmatter must define a mapping")
|
|
1791
|
+
for key, value in yaml_data.items():
|
|
1792
|
+
_assign_config_option(
|
|
1793
|
+
config,
|
|
1794
|
+
str(key),
|
|
1795
|
+
value,
|
|
1796
|
+
location=test_file,
|
|
1797
|
+
origin="frontmatter",
|
|
1798
|
+
)
|
|
1799
|
+
|
|
1800
|
+
if not consumed_frontmatter and is_comment_frontmatter:
|
|
1801
|
+
line_number = 0
|
|
1802
|
+
for raw_line in lines:
|
|
1803
|
+
line_number += 1
|
|
1804
|
+
stripped = raw_line.strip()
|
|
1805
|
+
if line_number == 1 and stripped.startswith("#!"):
|
|
1806
|
+
# Skip shebangs so subsequent frontmatter comments still apply.
|
|
1807
|
+
continue
|
|
1808
|
+
if not stripped.startswith("#"):
|
|
1809
|
+
break
|
|
1810
|
+
content = stripped[1:].strip()
|
|
1811
|
+
parts = content.split(":", 1)
|
|
1812
|
+
if len(parts) != 2:
|
|
1813
|
+
if line_number == 1:
|
|
1814
|
+
break
|
|
1815
|
+
_error("Invalid frontmatter, expected 'key: value'", line_number)
|
|
1816
|
+
key = parts[0].strip()
|
|
1817
|
+
value = parts[1].strip()
|
|
1818
|
+
_assign_config_option(
|
|
1819
|
+
config,
|
|
1820
|
+
key,
|
|
1821
|
+
value,
|
|
1822
|
+
location=test_file,
|
|
1823
|
+
line_number=line_number,
|
|
1824
|
+
origin="frontmatter",
|
|
1825
|
+
)
|
|
1826
|
+
|
|
1827
|
+
if coverage:
|
|
1828
|
+
timeout_value = cast(int, config["timeout"])
|
|
1829
|
+
config["timeout"] = timeout_value * 5
|
|
1830
|
+
|
|
1831
|
+
runner_value = config.get("runner")
|
|
1832
|
+
if not isinstance(runner_value, str) or not runner_value:
|
|
1833
|
+
suffix = test_file.suffix.lower()
|
|
1834
|
+
default_runner = _DEFAULT_RUNNER_BY_SUFFIX.get(suffix)
|
|
1835
|
+
if default_runner is None:
|
|
1836
|
+
matching_names = [
|
|
1837
|
+
runner.name
|
|
1838
|
+
for runner in runners_iter_runners()
|
|
1839
|
+
if getattr(runner, "_ext", None) == suffix.lstrip(".")
|
|
1840
|
+
]
|
|
1841
|
+
if not matching_names:
|
|
1842
|
+
raise ValueError(
|
|
1843
|
+
f"No runner registered for '{test_file}' (extension '{suffix or '<none>'}')"
|
|
1844
|
+
" and no 'runner' specified in frontmatter"
|
|
1845
|
+
)
|
|
1846
|
+
default_runner = matching_names[0]
|
|
1847
|
+
config["runner"] = default_runner
|
|
1848
|
+
if config.get("suite") is None:
|
|
1849
|
+
config.pop("suite", None)
|
|
1850
|
+
return config
|
|
1851
|
+
|
|
1852
|
+
|
|
1853
|
+
def print(*args: object, **kwargs: Any) -> None:
|
|
1854
|
+
# TODO: Properly solve the synchronization below.
|
|
1855
|
+
if "flush" not in kwargs:
|
|
1856
|
+
kwargs["flush"] = True
|
|
1857
|
+
builtins.print(*args, **kwargs)
|
|
1858
|
+
|
|
1859
|
+
|
|
1860
|
+
@dataclasses.dataclass
|
|
1861
|
+
class RunnerStats:
|
|
1862
|
+
total: int = 0
|
|
1863
|
+
failed: int = 0
|
|
1864
|
+
skipped: int = 0
|
|
1865
|
+
|
|
1866
|
+
|
|
1867
|
+
@dataclasses.dataclass
|
|
1868
|
+
class FixtureStats:
|
|
1869
|
+
total: int = 0
|
|
1870
|
+
failed: int = 0
|
|
1871
|
+
skipped: int = 0
|
|
1872
|
+
|
|
1873
|
+
|
|
1874
|
+
def _merge_runner_stats(
|
|
1875
|
+
left: dict[str, RunnerStats], right: dict[str, RunnerStats]
|
|
1876
|
+
) -> dict[str, RunnerStats]:
|
|
1877
|
+
merged: dict[str, RunnerStats] = {}
|
|
1878
|
+
for name in {**left, **right}:
|
|
1879
|
+
stats = RunnerStats()
|
|
1880
|
+
if (lhs := left.get(name)) is not None:
|
|
1881
|
+
stats.total += lhs.total
|
|
1882
|
+
stats.failed += lhs.failed
|
|
1883
|
+
stats.skipped += lhs.skipped
|
|
1884
|
+
if (rhs := right.get(name)) is not None:
|
|
1885
|
+
stats.total += rhs.total
|
|
1886
|
+
stats.failed += rhs.failed
|
|
1887
|
+
stats.skipped += rhs.skipped
|
|
1888
|
+
merged[name] = stats
|
|
1889
|
+
return merged
|
|
1890
|
+
|
|
1891
|
+
|
|
1892
|
+
def _merge_fixture_stats(
|
|
1893
|
+
left: dict[str, FixtureStats], right: dict[str, FixtureStats]
|
|
1894
|
+
) -> dict[str, FixtureStats]:
|
|
1895
|
+
merged: dict[str, FixtureStats] = {}
|
|
1896
|
+
for name in {**left, **right}:
|
|
1897
|
+
stats = FixtureStats()
|
|
1898
|
+
if (lhs := left.get(name)) is not None:
|
|
1899
|
+
stats.total += lhs.total
|
|
1900
|
+
stats.failed += lhs.failed
|
|
1901
|
+
stats.skipped += lhs.skipped
|
|
1902
|
+
if (rhs := right.get(name)) is not None:
|
|
1903
|
+
stats.total += rhs.total
|
|
1904
|
+
stats.failed += rhs.failed
|
|
1905
|
+
stats.skipped += rhs.skipped
|
|
1906
|
+
merged[name] = stats
|
|
1907
|
+
return merged
|
|
1908
|
+
|
|
1909
|
+
|
|
1910
|
+
@dataclasses.dataclass
|
|
1911
|
+
class Summary:
|
|
1912
|
+
failed: int = 0
|
|
1913
|
+
total: int = 0
|
|
1914
|
+
skipped: int = 0
|
|
1915
|
+
failed_paths: list[Path] = dataclasses.field(default_factory=list)
|
|
1916
|
+
skipped_paths: list[Path] = dataclasses.field(default_factory=list)
|
|
1917
|
+
runner_stats: dict[str, RunnerStats] = dataclasses.field(default_factory=dict)
|
|
1918
|
+
fixture_stats: dict[str, FixtureStats] = dataclasses.field(default_factory=dict)
|
|
1919
|
+
|
|
1920
|
+
def __add__(self, other: "Summary") -> "Summary":
|
|
1921
|
+
return Summary(
|
|
1922
|
+
failed=self.failed + other.failed,
|
|
1923
|
+
total=self.total + other.total,
|
|
1924
|
+
skipped=self.skipped + other.skipped,
|
|
1925
|
+
failed_paths=[*self.failed_paths, *other.failed_paths],
|
|
1926
|
+
skipped_paths=[*self.skipped_paths, *other.skipped_paths],
|
|
1927
|
+
runner_stats=_merge_runner_stats(self.runner_stats, other.runner_stats),
|
|
1928
|
+
fixture_stats=_merge_fixture_stats(self.fixture_stats, other.fixture_stats),
|
|
1929
|
+
)
|
|
1930
|
+
|
|
1931
|
+
def record_runner_outcome(self, runner_name: str, outcome: bool | str) -> None:
|
|
1932
|
+
stats = self.runner_stats.setdefault(runner_name, RunnerStats())
|
|
1933
|
+
stats.total += 1
|
|
1934
|
+
if outcome == "skipped":
|
|
1935
|
+
stats.skipped += 1
|
|
1936
|
+
elif not outcome:
|
|
1937
|
+
stats.failed += 1
|
|
1938
|
+
|
|
1939
|
+
def record_fixture_outcome(self, fixtures: Iterable[str], outcome: bool | str) -> None:
|
|
1940
|
+
for fixture in fixtures:
|
|
1941
|
+
stats = self.fixture_stats.setdefault(fixture, FixtureStats())
|
|
1942
|
+
stats.total += 1
|
|
1943
|
+
if outcome == "skipped":
|
|
1944
|
+
stats.skipped += 1
|
|
1945
|
+
elif not outcome:
|
|
1946
|
+
stats.failed += 1
|
|
1947
|
+
|
|
1948
|
+
|
|
1949
|
+
@dataclasses.dataclass(slots=True)
|
|
1950
|
+
class ProjectResult:
|
|
1951
|
+
selection: ProjectSelection
|
|
1952
|
+
summary: Summary
|
|
1953
|
+
queue_size: int
|
|
1954
|
+
|
|
1955
|
+
|
|
1956
|
+
@dataclasses.dataclass(slots=True)
|
|
1957
|
+
class ExecutionResult:
|
|
1958
|
+
summary: Summary
|
|
1959
|
+
project_results: tuple[ProjectResult, ...]
|
|
1960
|
+
queue_size: int
|
|
1961
|
+
exit_code: int
|
|
1962
|
+
interrupted: bool
|
|
1963
|
+
|
|
1964
|
+
|
|
1965
|
+
class HarnessError(RuntimeError):
|
|
1966
|
+
"""Fatal harness error signalling invalid invocation or configuration."""
|
|
1967
|
+
|
|
1968
|
+
def __init__(self, message: str, *, exit_code: int = 1, show_message: bool = True) -> None:
|
|
1969
|
+
super().__init__(message)
|
|
1970
|
+
self.exit_code = exit_code
|
|
1971
|
+
self.show_message = show_message
|
|
1972
|
+
|
|
1973
|
+
|
|
1974
|
+
def _format_percentage(count: int, total: int) -> str:
|
|
1975
|
+
return f"{_percentage_value(count, total)}%"
|
|
1976
|
+
|
|
1977
|
+
|
|
1978
|
+
def _percentage_value(count: int, total: int) -> int:
|
|
1979
|
+
if total <= 0:
|
|
1980
|
+
return 0
|
|
1981
|
+
return int(round((count / total) * 100))
|
|
1982
|
+
|
|
1983
|
+
|
|
1984
|
+
def _format_summary(summary: Summary) -> str:
|
|
1985
|
+
total = summary.total
|
|
1986
|
+
passed = max(0, total - summary.failed - summary.skipped)
|
|
1987
|
+
if total <= 0:
|
|
1988
|
+
return "Test summary: No tests were discovered."
|
|
1989
|
+
|
|
1990
|
+
passed_segment = f"{CHECKMARK} Passed {passed}/{total} ({_format_percentage(passed, total)})"
|
|
1991
|
+
failed_segment = (
|
|
1992
|
+
f"{CROSS} Failed {summary.failed} ({_format_percentage(summary.failed, total)})"
|
|
1993
|
+
)
|
|
1994
|
+
skipped_segment = (
|
|
1995
|
+
f"{SKIP} Skipped {summary.skipped} ({_format_percentage(summary.skipped, total)})"
|
|
1996
|
+
)
|
|
1997
|
+
|
|
1998
|
+
return f"Test summary: {passed_segment} • {failed_segment} • {skipped_segment}"
|
|
1999
|
+
|
|
2000
|
+
|
|
2001
|
+
def _summarize_runner_plan(
|
|
2002
|
+
queue: Sequence[RunnerQueueItem],
|
|
2003
|
+
*,
|
|
2004
|
+
tenzir_version: str | None,
|
|
2005
|
+
runner_versions: Mapping[str, str] | None = None,
|
|
2006
|
+
) -> str:
|
|
2007
|
+
breakdown = _runner_breakdown(
|
|
2008
|
+
queue,
|
|
2009
|
+
tenzir_version=tenzir_version,
|
|
2010
|
+
runner_versions=runner_versions,
|
|
2011
|
+
)
|
|
2012
|
+
if not breakdown:
|
|
2013
|
+
return "no runners"
|
|
2014
|
+
parts: list[str] = []
|
|
2015
|
+
for name, count, version in breakdown:
|
|
2016
|
+
base = name
|
|
2017
|
+
if version:
|
|
2018
|
+
base = f"{base} (v{version})"
|
|
2019
|
+
parts.append(f"{count}× {base}")
|
|
2020
|
+
return ", ".join(parts)
|
|
2021
|
+
|
|
2022
|
+
|
|
2023
|
+
def _iter_queue_tests(queue: Sequence[RunnerQueueItem]) -> Iterator[TestQueueItem]:
|
|
2024
|
+
for item in queue:
|
|
2025
|
+
if isinstance(item, SuiteQueueItem):
|
|
2026
|
+
yield from item.tests
|
|
2027
|
+
else:
|
|
2028
|
+
yield item
|
|
2029
|
+
|
|
2030
|
+
|
|
2031
|
+
def _suite_test_sort_key(directory: Path, path: Path) -> str:
|
|
2032
|
+
try:
|
|
2033
|
+
relative = path.relative_to(directory)
|
|
2034
|
+
except ValueError:
|
|
2035
|
+
return path.as_posix()
|
|
2036
|
+
return relative.as_posix()
|
|
2037
|
+
|
|
2038
|
+
|
|
2039
|
+
def _queue_sort_key(item: RunnerQueueItem) -> str:
|
|
2040
|
+
if isinstance(item, SuiteQueueItem):
|
|
2041
|
+
if item.tests:
|
|
2042
|
+
return str(item.tests[0].path)
|
|
2043
|
+
return str(item.suite.directory)
|
|
2044
|
+
return str(item.path)
|
|
2045
|
+
|
|
2046
|
+
|
|
2047
|
+
def _path_is_within(path: Path, directory: Path) -> bool:
|
|
2048
|
+
try:
|
|
2049
|
+
path.relative_to(directory)
|
|
2050
|
+
return True
|
|
2051
|
+
except ValueError:
|
|
2052
|
+
return False
|
|
2053
|
+
|
|
2054
|
+
|
|
2055
|
+
def _build_queue_from_paths(
|
|
2056
|
+
paths: Iterable[Path],
|
|
2057
|
+
*,
|
|
2058
|
+
coverage: bool,
|
|
2059
|
+
) -> list[RunnerQueueItem]:
|
|
2060
|
+
suite_groups: dict[SuiteInfo, SuiteCandidate] = {}
|
|
2061
|
+
individuals: dict[Path, TestQueueItem] = {}
|
|
2062
|
+
|
|
2063
|
+
for test_path in sorted({path.resolve() for path in paths}, key=lambda p: str(p)):
|
|
2064
|
+
try:
|
|
2065
|
+
runner = get_runner_for_test(test_path)
|
|
2066
|
+
except ValueError as error:
|
|
2067
|
+
raise HarnessError(f"error: {error}") from error
|
|
2068
|
+
|
|
2069
|
+
suite_info = _resolve_suite_for_test(test_path)
|
|
2070
|
+
test_item = TestQueueItem(runner=runner, path=test_path)
|
|
2071
|
+
if suite_info is None:
|
|
2072
|
+
individuals[test_path] = test_item
|
|
2073
|
+
continue
|
|
2074
|
+
|
|
2075
|
+
candidate = suite_groups.setdefault(suite_info, SuiteCandidate(tests=[]))
|
|
2076
|
+
candidate.tests.append(test_item)
|
|
2077
|
+
try:
|
|
2078
|
+
config = parse_test_config(test_path, coverage=coverage)
|
|
2079
|
+
except ValueError:
|
|
2080
|
+
candidate.mark_parse_error()
|
|
2081
|
+
continue
|
|
2082
|
+
fixtures = cast(tuple[str, ...], config.get("fixtures", tuple()))
|
|
2083
|
+
candidate.record_fixtures(fixtures)
|
|
2084
|
+
|
|
2085
|
+
queue: list[RunnerQueueItem] = []
|
|
2086
|
+
for suite_info, candidate in suite_groups.items():
|
|
2087
|
+
if candidate.fixture_mismatch:
|
|
2088
|
+
example = candidate.mismatch_example or tuple()
|
|
2089
|
+
expected = candidate.fixtures or tuple()
|
|
2090
|
+
config_path = suite_info.directory / _CONFIG_FILE_NAME
|
|
2091
|
+
expected_list = ", ".join(expected) or "<none>"
|
|
2092
|
+
example_list = ", ".join(example) or "<none>"
|
|
2093
|
+
mismatch_path = candidate.mismatch_path or (
|
|
2094
|
+
candidate.tests[-1].path if candidate.tests else None
|
|
2095
|
+
)
|
|
2096
|
+
location_detail = (
|
|
2097
|
+
f" ({_relativize_path(mismatch_path)})" if mismatch_path is not None else ""
|
|
2098
|
+
)
|
|
2099
|
+
raise HarnessError(
|
|
2100
|
+
f"error: suite '{suite_info.name}' defined in {config_path} must use identical fixtures "
|
|
2101
|
+
f"across tests (expected: {expected_list}, found: {example_list}{location_detail})"
|
|
2102
|
+
)
|
|
2103
|
+
if not candidate.is_valid() or not candidate.tests:
|
|
2104
|
+
for test_item in candidate.tests:
|
|
2105
|
+
individuals[test_item.path] = test_item
|
|
2106
|
+
continue
|
|
2107
|
+
fixtures = candidate.fixtures or tuple()
|
|
2108
|
+
sorted_tests = sorted(
|
|
2109
|
+
candidate.tests,
|
|
2110
|
+
key=lambda item: _suite_test_sort_key(suite_info.directory, item.path),
|
|
2111
|
+
)
|
|
2112
|
+
queue.append(SuiteQueueItem(suite=suite_info, tests=sorted_tests, fixtures=fixtures))
|
|
2113
|
+
|
|
2114
|
+
queue.extend(individuals.values())
|
|
2115
|
+
return queue
|
|
2116
|
+
|
|
2117
|
+
|
|
2118
|
+
def _collect_runner_versions(
|
|
2119
|
+
queue: Sequence[RunnerQueueItem],
|
|
2120
|
+
*,
|
|
2121
|
+
tenzir_version: str | None,
|
|
2122
|
+
) -> dict[str, str]:
|
|
2123
|
+
versions: dict[str, str] = {}
|
|
2124
|
+
if tenzir_version:
|
|
2125
|
+
versions["tenzir"] = tenzir_version
|
|
2126
|
+
|
|
2127
|
+
for item in _iter_queue_tests(queue):
|
|
2128
|
+
attr = getattr(item.runner, "version", None)
|
|
2129
|
+
if isinstance(attr, str) and attr:
|
|
2130
|
+
versions.setdefault(item.runner.name, attr)
|
|
2131
|
+
return versions
|
|
2132
|
+
|
|
2133
|
+
|
|
2134
|
+
def _runner_breakdown(
|
|
2135
|
+
queue: Sequence[RunnerQueueItem],
|
|
2136
|
+
*,
|
|
2137
|
+
tenzir_version: str | None,
|
|
2138
|
+
runner_versions: Mapping[str, str] | None = None,
|
|
2139
|
+
) -> list[tuple[str, int, str | None]]:
|
|
2140
|
+
counts: dict[str, int] = {}
|
|
2141
|
+
for item in _iter_queue_tests(queue):
|
|
2142
|
+
counts[item.runner.name] = counts.get(item.runner.name, 0) + 1
|
|
2143
|
+
|
|
2144
|
+
breakdown: list[tuple[str, int, str | None]] = []
|
|
2145
|
+
for name in sorted(counts):
|
|
2146
|
+
version = (runner_versions or {}).get(name)
|
|
2147
|
+
if version is None and name == "tenzir":
|
|
2148
|
+
version = tenzir_version
|
|
2149
|
+
breakdown.append((name, counts[name], version))
|
|
2150
|
+
return breakdown
|
|
2151
|
+
|
|
2152
|
+
|
|
2153
|
+
def _count_queue_tests(queue: Sequence[RunnerQueueItem]) -> int:
|
|
2154
|
+
total = 0
|
|
2155
|
+
for item in queue:
|
|
2156
|
+
if isinstance(item, SuiteQueueItem):
|
|
2157
|
+
total += len(item.tests)
|
|
2158
|
+
else:
|
|
2159
|
+
total += 1
|
|
2160
|
+
return total
|
|
2161
|
+
|
|
2162
|
+
|
|
2163
|
+
def _print_aggregate_totals(project_count: int, summary: Summary) -> None:
|
|
2164
|
+
total = summary.total
|
|
2165
|
+
failed = summary.failed
|
|
2166
|
+
skipped = summary.skipped
|
|
2167
|
+
passed = total - failed - skipped
|
|
2168
|
+
executed = max(total - skipped, 0)
|
|
2169
|
+
project_noun = "project" if project_count == 1 else "projects"
|
|
2170
|
+
test_noun = "test" if total == 1 else "tests"
|
|
2171
|
+
if total <= 0:
|
|
2172
|
+
print(f"{INFO} ran 0 tests across {project_count} {project_noun}")
|
|
2173
|
+
return
|
|
2174
|
+
pass_rate = _percentage_value(passed, executed) if executed > 0 else 0
|
|
2175
|
+
fail_rate = _percentage_value(failed, executed) if executed > 0 else 0
|
|
2176
|
+
pass_index = min(pass_rate // 10, len(PASS_SPECTRUM) - 1)
|
|
2177
|
+
passed_percentage = f"{PASS_SPECTRUM[pass_index]}{pass_rate}%{RESET_COLOR}"
|
|
2178
|
+
if fail_rate > 0:
|
|
2179
|
+
failed_percentage = f"{FAIL_COLOR}{fail_rate}%{RESET_COLOR}"
|
|
2180
|
+
else:
|
|
2181
|
+
failed_percentage = f"{fail_rate}%"
|
|
2182
|
+
pass_segment = f"{passed} passed ({passed_percentage})"
|
|
2183
|
+
fail_segment = f"{failed} failed ({failed_percentage})"
|
|
2184
|
+
detail = f"{pass_segment} / {fail_segment}"
|
|
2185
|
+
if skipped:
|
|
2186
|
+
detail = f"{detail} • {skipped} skipped"
|
|
2187
|
+
print(f"{INFO} ran {total} {test_noun} across {project_count} {project_noun}: {detail}")
|
|
2188
|
+
|
|
2189
|
+
|
|
2190
|
+
def _summarize_harness_configuration(
|
|
2191
|
+
*,
|
|
2192
|
+
jobs: int,
|
|
2193
|
+
update: bool,
|
|
2194
|
+
coverage: bool,
|
|
2195
|
+
debug: bool,
|
|
2196
|
+
show_summary: bool,
|
|
2197
|
+
runner_summary: bool,
|
|
2198
|
+
fixture_summary: bool,
|
|
2199
|
+
passthrough: bool,
|
|
2200
|
+
) -> tuple[int, str, str]:
|
|
2201
|
+
enabled_flags: list[str] = []
|
|
2202
|
+
toggles = (
|
|
2203
|
+
("coverage", coverage),
|
|
2204
|
+
("debug", debug),
|
|
2205
|
+
("summary", show_summary),
|
|
2206
|
+
("runner-summary", runner_summary),
|
|
2207
|
+
("fixture-summary", fixture_summary),
|
|
2208
|
+
("keep-tmp-dirs", KEEP_TMP_DIRS),
|
|
2209
|
+
)
|
|
2210
|
+
for name, flag in toggles:
|
|
2211
|
+
if flag:
|
|
2212
|
+
enabled_flags.append(name)
|
|
2213
|
+
if update:
|
|
2214
|
+
verb = "updating"
|
|
2215
|
+
elif passthrough:
|
|
2216
|
+
verb = "showing"
|
|
2217
|
+
else:
|
|
2218
|
+
verb = "running"
|
|
2219
|
+
return jobs, ", ".join(enabled_flags), verb
|
|
2220
|
+
|
|
2221
|
+
|
|
2222
|
+
def _relativize_path(path: Path) -> Path:
|
|
2223
|
+
try:
|
|
2224
|
+
return path.relative_to(ROOT)
|
|
2225
|
+
except ValueError:
|
|
2226
|
+
try:
|
|
2227
|
+
relative = os.path.relpath(path, ROOT)
|
|
2228
|
+
except ValueError:
|
|
2229
|
+
return path
|
|
2230
|
+
return Path(relative)
|
|
2231
|
+
|
|
2232
|
+
|
|
2233
|
+
def _get_test_fixtures(test: Path, *, coverage: bool) -> tuple[str, ...]:
|
|
2234
|
+
try:
|
|
2235
|
+
config = parse_test_config(test, coverage=coverage)
|
|
2236
|
+
except ValueError:
|
|
2237
|
+
return tuple()
|
|
2238
|
+
fixtures = config.get("fixtures", tuple())
|
|
2239
|
+
if isinstance(fixtures, tuple):
|
|
2240
|
+
return typing.cast(tuple[str, ...], fixtures)
|
|
2241
|
+
return tuple(typing.cast(Iterable[str], fixtures))
|
|
2242
|
+
|
|
2243
|
+
|
|
2244
|
+
def _build_path_tree(paths: Iterable[Path]) -> dict[str, dict[str, Any]]:
|
|
2245
|
+
tree: dict[str, dict[str, Any]] = {}
|
|
2246
|
+
for path in sorted(paths, key=lambda p: p.parts):
|
|
2247
|
+
node = tree
|
|
2248
|
+
parts = path.parts
|
|
2249
|
+
if parts and parts[0] in {"..", "."}:
|
|
2250
|
+
parts = (path.as_posix(),)
|
|
2251
|
+
for part in parts:
|
|
2252
|
+
node = node.setdefault(part, {})
|
|
2253
|
+
return tree
|
|
2254
|
+
|
|
2255
|
+
|
|
2256
|
+
def _render_tree(tree: dict[str, dict[str, Any]], prefix: str = "") -> Iterator[str]:
|
|
2257
|
+
items = sorted(tree.items())
|
|
2258
|
+
for index, (name, subtree) in enumerate(items):
|
|
2259
|
+
is_last = index == len(items) - 1
|
|
2260
|
+
connector = "└── " if is_last else "├── "
|
|
2261
|
+
yield f"{prefix}{connector}{name}"
|
|
2262
|
+
if subtree:
|
|
2263
|
+
extension = " " if is_last else "│ "
|
|
2264
|
+
yield from _render_tree(subtree, prefix + extension)
|
|
2265
|
+
|
|
2266
|
+
|
|
2267
|
+
def _strip_ansi(value: str) -> str:
|
|
2268
|
+
return ANSI_ESCAPE.sub("", value)
|
|
2269
|
+
|
|
2270
|
+
|
|
2271
|
+
def _ljust_visible(value: str, width: int) -> str:
|
|
2272
|
+
visible = len(_strip_ansi(value))
|
|
2273
|
+
if visible >= width:
|
|
2274
|
+
return value
|
|
2275
|
+
return value + " " * (width - visible)
|
|
2276
|
+
|
|
2277
|
+
|
|
2278
|
+
def _rjust_visible(value: str, width: int) -> str:
|
|
2279
|
+
visible = len(_strip_ansi(value))
|
|
2280
|
+
if visible >= width:
|
|
2281
|
+
return value
|
|
2282
|
+
return " " * (width - visible) + value
|
|
2283
|
+
|
|
2284
|
+
|
|
2285
|
+
def _color_tree_glyphs(line: str, color: str) -> str:
|
|
2286
|
+
glyphs = {"│", "├", "└", "─"}
|
|
2287
|
+
parts = []
|
|
2288
|
+
for char in line:
|
|
2289
|
+
if char in glyphs and color:
|
|
2290
|
+
parts.append(f"{color}{char}{RESET_COLOR}")
|
|
2291
|
+
else:
|
|
2292
|
+
parts.append(char)
|
|
2293
|
+
return "".join(parts)
|
|
2294
|
+
|
|
2295
|
+
|
|
2296
|
+
def _render_runner_box(summary: Summary) -> list[str]:
|
|
2297
|
+
if not summary.runner_stats:
|
|
2298
|
+
return []
|
|
2299
|
+
|
|
2300
|
+
headers = ("Runner", "Passed", "Failed", "Skipped", "Total", "Share")
|
|
2301
|
+
rows: list[tuple[str, str, str, str, str, str]] = []
|
|
2302
|
+
for name in sorted(summary.runner_stats):
|
|
2303
|
+
stats = summary.runner_stats[name]
|
|
2304
|
+
passed = max(0, stats.total - stats.failed - stats.skipped)
|
|
2305
|
+
rows.append(
|
|
2306
|
+
(
|
|
2307
|
+
name,
|
|
2308
|
+
str(passed),
|
|
2309
|
+
str(stats.failed),
|
|
2310
|
+
str(stats.skipped),
|
|
2311
|
+
str(stats.total),
|
|
2312
|
+
_format_percentage(stats.total, summary.total),
|
|
2313
|
+
)
|
|
2314
|
+
)
|
|
2315
|
+
|
|
2316
|
+
label_width = max(len(headers[0]), *(len(_strip_ansi(row[0])) for row in rows))
|
|
2317
|
+
passed_width = max(len(headers[1]), *(len(row[1]) for row in rows))
|
|
2318
|
+
failed_width = max(len(headers[2]), *(len(row[2]) for row in rows))
|
|
2319
|
+
skipped_width = max(len(headers[3]), *(len(row[3]) for row in rows))
|
|
2320
|
+
total_width = max(len(headers[4]), *(len(row[4]) for row in rows))
|
|
2321
|
+
share_width = max(len(headers[5]), *(len(row[5]) for row in rows))
|
|
2322
|
+
|
|
2323
|
+
def frame(char: str, width: int) -> str:
|
|
2324
|
+
return char * (width + 2)
|
|
2325
|
+
|
|
2326
|
+
top = (
|
|
2327
|
+
f"┌{frame('─', label_width)}┬{frame('─', passed_width)}┬{frame('─', failed_width)}"
|
|
2328
|
+
f"┬{frame('─', skipped_width)}┬{frame('─', total_width)}┬{frame('─', share_width)}┐"
|
|
2329
|
+
)
|
|
2330
|
+
header = (
|
|
2331
|
+
f"│ {_ljust_visible(headers[0], label_width)} │ {headers[1].rjust(passed_width)} │ "
|
|
2332
|
+
f"{headers[2].rjust(failed_width)} │ {headers[3].rjust(skipped_width)} │ "
|
|
2333
|
+
f"{headers[4].rjust(total_width)} │ {headers[5].rjust(share_width)} │"
|
|
2334
|
+
)
|
|
2335
|
+
separator = (
|
|
2336
|
+
f"├{frame('─', label_width)}┼{frame('─', passed_width)}┼{frame('─', failed_width)}"
|
|
2337
|
+
f"┼{frame('─', skipped_width)}┼{frame('─', total_width)}┼{frame('─', share_width)}┤"
|
|
2338
|
+
)
|
|
2339
|
+
body = [
|
|
2340
|
+
f"│ {_ljust_visible(name, label_width)} │ {_rjust_visible(passed, passed_width)} │ "
|
|
2341
|
+
f"{_rjust_visible(failed, failed_width)} │ {_rjust_visible(skipped, skipped_width)} │ "
|
|
2342
|
+
f"{_rjust_visible(total, total_width)} │ {_rjust_visible(share, share_width)} │"
|
|
2343
|
+
for name, passed, failed, skipped, total, share in rows
|
|
2344
|
+
]
|
|
2345
|
+
bottom = (
|
|
2346
|
+
f"└{frame('─', label_width)}┴{frame('─', passed_width)}┴{frame('─', failed_width)}"
|
|
2347
|
+
f"┴{frame('─', skipped_width)}┴{frame('─', total_width)}┴{frame('─', share_width)}┘"
|
|
2348
|
+
)
|
|
2349
|
+
return [top, header, separator, *body, bottom]
|
|
2350
|
+
|
|
2351
|
+
|
|
2352
|
+
def _render_fixture_box(summary: Summary) -> list[str]:
|
|
2353
|
+
if not summary.fixture_stats:
|
|
2354
|
+
return []
|
|
2355
|
+
|
|
2356
|
+
headers = ("Fixture", "Passed", "Failed", "Skipped", "Total", "Share")
|
|
2357
|
+
rows: list[tuple[str, str, str, str, str, str]] = []
|
|
2358
|
+
for name in sorted(summary.fixture_stats):
|
|
2359
|
+
stats = summary.fixture_stats[name]
|
|
2360
|
+
passed = max(0, stats.total - stats.failed - stats.skipped)
|
|
2361
|
+
rows.append(
|
|
2362
|
+
(
|
|
2363
|
+
name,
|
|
2364
|
+
str(passed),
|
|
2365
|
+
str(stats.failed),
|
|
2366
|
+
str(stats.skipped),
|
|
2367
|
+
str(stats.total),
|
|
2368
|
+
_format_percentage(stats.total, summary.total),
|
|
2369
|
+
)
|
|
2370
|
+
)
|
|
2371
|
+
|
|
2372
|
+
label_width = max(len(headers[0]), *(len(_strip_ansi(row[0])) for row in rows))
|
|
2373
|
+
passed_width = max(len(headers[1]), *(len(row[1]) for row in rows))
|
|
2374
|
+
failed_width = max(len(headers[2]), *(len(row[2]) for row in rows))
|
|
2375
|
+
skipped_width = max(len(headers[3]), *(len(row[3]) for row in rows))
|
|
2376
|
+
total_width = max(len(headers[4]), *(len(row[4]) for row in rows))
|
|
2377
|
+
share_width = max(len(headers[5]), *(len(row[5]) for row in rows))
|
|
2378
|
+
|
|
2379
|
+
def frame(char: str, width: int) -> str:
|
|
2380
|
+
return char * (width + 2)
|
|
2381
|
+
|
|
2382
|
+
top = (
|
|
2383
|
+
f"┌{frame('─', label_width)}┬{frame('─', passed_width)}┬{frame('─', failed_width)}"
|
|
2384
|
+
f"┬{frame('─', skipped_width)}┬{frame('─', total_width)}┬{frame('─', share_width)}┐"
|
|
2385
|
+
)
|
|
2386
|
+
header = (
|
|
2387
|
+
f"│ {_ljust_visible(headers[0], label_width)} │ {headers[1].rjust(passed_width)} │ "
|
|
2388
|
+
f"{headers[2].rjust(failed_width)} │ {headers[3].rjust(skipped_width)} │ "
|
|
2389
|
+
f"{headers[4].rjust(total_width)} │ {headers[5].rjust(share_width)} │"
|
|
2390
|
+
)
|
|
2391
|
+
separator = (
|
|
2392
|
+
f"├{frame('─', label_width)}┼{frame('─', passed_width)}┼{frame('─', failed_width)}"
|
|
2393
|
+
f"┼{frame('─', skipped_width)}┼{frame('─', total_width)}┼{frame('─', share_width)}┤"
|
|
2394
|
+
)
|
|
2395
|
+
body = [
|
|
2396
|
+
f"│ {_ljust_visible(name, label_width)} │ {_rjust_visible(passed, passed_width)} │ "
|
|
2397
|
+
f"{_rjust_visible(failed, failed_width)} │ {_rjust_visible(skipped, skipped_width)} │ "
|
|
2398
|
+
f"{_rjust_visible(total, total_width)} │ {_rjust_visible(share, share_width)} │"
|
|
2399
|
+
for name, passed, failed, skipped, total, share in rows
|
|
2400
|
+
]
|
|
2401
|
+
bottom = (
|
|
2402
|
+
f"└{frame('─', label_width)}┴{frame('─', passed_width)}┴{frame('─', failed_width)}"
|
|
2403
|
+
f"┴{frame('─', skipped_width)}┴{frame('─', total_width)}┴{frame('─', share_width)}┘"
|
|
2404
|
+
)
|
|
2405
|
+
return [top, header, separator, *body, bottom]
|
|
2406
|
+
|
|
2407
|
+
|
|
2408
|
+
def _render_summary_box(summary: Summary) -> list[str]:
|
|
2409
|
+
total = summary.total
|
|
2410
|
+
if total <= 0:
|
|
2411
|
+
return [
|
|
2412
|
+
"┌──────────────────────────┐",
|
|
2413
|
+
"│ No tests were discovered │",
|
|
2414
|
+
"└──────────────────────────┘",
|
|
2415
|
+
]
|
|
2416
|
+
|
|
2417
|
+
passed = max(0, total - summary.failed - summary.skipped)
|
|
2418
|
+
executed = max(total - summary.skipped, 0)
|
|
2419
|
+
pass_share = _percentage_value(passed, executed) if executed > 0 else 0
|
|
2420
|
+
fail_share = _percentage_value(summary.failed, executed) if executed > 0 else 0
|
|
2421
|
+
skip_share = _percentage_value(summary.skipped, total)
|
|
2422
|
+
|
|
2423
|
+
executed_rows = [
|
|
2424
|
+
(f"{CHECKMARK} Passed", str(passed), f"{pass_share}%"),
|
|
2425
|
+
(f"{CROSS} Failed", str(summary.failed), f"{fail_share}%"),
|
|
2426
|
+
]
|
|
2427
|
+
skipped_row = (f"{SKIP} Skipped", str(summary.skipped), f"{skip_share}%")
|
|
2428
|
+
total_row = ("∑ Total", str(total), "")
|
|
2429
|
+
|
|
2430
|
+
all_rows = [*executed_rows, skipped_row, total_row]
|
|
2431
|
+
headers = ("Outcome", "Count")
|
|
2432
|
+
label_width = max(len(headers[0]), *(len(_strip_ansi(row[0])) for row in all_rows))
|
|
2433
|
+
count_lengths = [len(_strip_ansi(row[1])) for row in all_rows]
|
|
2434
|
+
count_width = max(count_lengths, default=0)
|
|
2435
|
+
share_lengths = [len(row[2]) for row in all_rows if row[2]]
|
|
2436
|
+
percent_width = max(share_lengths, default=0)
|
|
2437
|
+
spacing = 2 if percent_width else 0
|
|
2438
|
+
column_width = count_width + spacing + percent_width
|
|
2439
|
+
|
|
2440
|
+
def frame(char: str, width: int) -> str:
|
|
2441
|
+
return char * (width + 2)
|
|
2442
|
+
|
|
2443
|
+
top = f"┌{frame('─', label_width)}┬{frame('─', column_width)}┐"
|
|
2444
|
+
header = f"│ {_ljust_visible(headers[0], label_width)} │ {_ljust_visible(headers[1], column_width)} │"
|
|
2445
|
+
separator = f"├{frame('─', label_width)}┼{frame('─', column_width)}┤"
|
|
2446
|
+
executed_lines = [
|
|
2447
|
+
f"│ {_ljust_visible(label, label_width)} │ "
|
|
2448
|
+
f"{_rjust_visible(count, count_width)}"
|
|
2449
|
+
f"{' ' * spacing if percent_width else ''}"
|
|
2450
|
+
f"{_rjust_visible(percent, percent_width) if percent_width else ''} │"
|
|
2451
|
+
for label, count, percent in executed_rows
|
|
2452
|
+
]
|
|
2453
|
+
group_separator = f"├{frame('─', label_width)}┼{frame('─', column_width)}┤"
|
|
2454
|
+
skipped_line = (
|
|
2455
|
+
f"│ {_ljust_visible(skipped_row[0], label_width)} │ "
|
|
2456
|
+
f"{_rjust_visible(skipped_row[1], count_width)}"
|
|
2457
|
+
f"{' ' * spacing if percent_width else ''}"
|
|
2458
|
+
f"{_rjust_visible(skipped_row[2], percent_width) if percent_width else ''} │"
|
|
2459
|
+
)
|
|
2460
|
+
summary_separator = f"├{frame('─', label_width)}┼{frame('─', column_width)}┤"
|
|
2461
|
+
total_line = (
|
|
2462
|
+
f"│ {_ljust_visible(total_row[0], label_width)} │ "
|
|
2463
|
+
f"{_ljust_visible(total_row[1], column_width)} │"
|
|
2464
|
+
)
|
|
2465
|
+
bottom = f"└{frame('─', label_width)}┴{frame('─', column_width)}┘"
|
|
2466
|
+
return [
|
|
2467
|
+
top,
|
|
2468
|
+
header,
|
|
2469
|
+
separator,
|
|
2470
|
+
*executed_lines,
|
|
2471
|
+
group_separator,
|
|
2472
|
+
skipped_line,
|
|
2473
|
+
summary_separator,
|
|
2474
|
+
total_line,
|
|
2475
|
+
bottom,
|
|
2476
|
+
]
|
|
2477
|
+
|
|
2478
|
+
|
|
2479
|
+
def _print_ascii_summary(summary: Summary, *, include_runner: bool, include_fixture: bool) -> None:
|
|
2480
|
+
runner_lines = _render_runner_box(summary) if include_runner else []
|
|
2481
|
+
fixture_lines = _render_fixture_box(summary) if include_fixture else []
|
|
2482
|
+
outcome_lines = _render_summary_box(summary)
|
|
2483
|
+
if not runner_lines and not fixture_lines and not outcome_lines:
|
|
2484
|
+
return
|
|
2485
|
+
print()
|
|
2486
|
+
segments: list[list[str]] = []
|
|
2487
|
+
if runner_lines:
|
|
2488
|
+
segments.append(runner_lines)
|
|
2489
|
+
if fixture_lines:
|
|
2490
|
+
segments.append(fixture_lines)
|
|
2491
|
+
if outcome_lines:
|
|
2492
|
+
segments.append(outcome_lines)
|
|
2493
|
+
for index, block in enumerate(segments):
|
|
2494
|
+
for line in block:
|
|
2495
|
+
print(line)
|
|
2496
|
+
if index < len(segments) - 1:
|
|
2497
|
+
print()
|
|
2498
|
+
|
|
2499
|
+
|
|
2500
|
+
def _print_compact_summary(summary: Summary) -> None:
|
|
2501
|
+
total = summary.total
|
|
2502
|
+
passed = max(0, total - summary.failed - summary.skipped)
|
|
2503
|
+
executed = max(total - summary.skipped, 0)
|
|
2504
|
+
if total > 0:
|
|
2505
|
+
pass_rate = _percentage_value(passed, executed) if executed > 0 else 0
|
|
2506
|
+
fail_rate = _percentage_value(summary.failed, executed) if executed > 0 else 0
|
|
2507
|
+
pass_index = min(pass_rate // 10, len(PASS_SPECTRUM) - 1)
|
|
2508
|
+
pass_percentage = f"{PASS_SPECTRUM[pass_index]}{pass_rate}%{RESET_COLOR}"
|
|
2509
|
+
if fail_rate > 0:
|
|
2510
|
+
fail_percentage = f"{FAIL_COLOR}{fail_rate}%{RESET_COLOR}"
|
|
2511
|
+
else:
|
|
2512
|
+
fail_percentage = f"{fail_rate}%"
|
|
2513
|
+
pass_segment = f"{passed} passed ({pass_percentage})"
|
|
2514
|
+
fail_segment = f"{summary.failed} failed ({fail_percentage})"
|
|
2515
|
+
detail = f"{pass_segment} / {fail_segment}"
|
|
2516
|
+
if summary.skipped:
|
|
2517
|
+
detail = f"{detail} • {summary.skipped} skipped"
|
|
2518
|
+
noun = "test" if total == 1 else "tests"
|
|
2519
|
+
print(f"{INFO} ran {total} {noun}: {detail}")
|
|
2520
|
+
else:
|
|
2521
|
+
print(f"{INFO} ran 0 tests")
|
|
2522
|
+
|
|
2523
|
+
|
|
2524
|
+
def _print_detailed_summary(summary: Summary) -> None:
|
|
2525
|
+
if not summary.failed_paths and not summary.skipped_paths:
|
|
2526
|
+
return
|
|
2527
|
+
|
|
2528
|
+
print()
|
|
2529
|
+
if summary.skipped_paths:
|
|
2530
|
+
print(f"{SKIP} Skipped tests:")
|
|
2531
|
+
for line in _render_tree(_build_path_tree(summary.skipped_paths)):
|
|
2532
|
+
print(f" {line}")
|
|
2533
|
+
if summary.failed_paths:
|
|
2534
|
+
print()
|
|
2535
|
+
if summary.failed_paths:
|
|
2536
|
+
print(f"{CROSS} Failed tests:")
|
|
2537
|
+
for line in _render_tree(_build_path_tree(summary.failed_paths)):
|
|
2538
|
+
print(f" {_color_tree_glyphs(line, FAIL_COLOR)}")
|
|
2539
|
+
|
|
2540
|
+
|
|
2541
|
+
def get_version() -> str:
|
|
2542
|
+
if not TENZIR_BINARY:
|
|
2543
|
+
raise FileNotFoundError("TENZIR_BINARY is not configured")
|
|
2544
|
+
return (
|
|
2545
|
+
subprocess.check_output(
|
|
2546
|
+
[
|
|
2547
|
+
TENZIR_BINARY,
|
|
2548
|
+
"--bare-mode",
|
|
2549
|
+
"--console-verbosity=warning",
|
|
2550
|
+
"version | select version | write_lines",
|
|
2551
|
+
]
|
|
2552
|
+
)
|
|
2553
|
+
.decode()
|
|
2554
|
+
.strip()
|
|
2555
|
+
)
|
|
2556
|
+
|
|
2557
|
+
|
|
2558
|
+
def success(test: Path) -> None:
|
|
2559
|
+
with stdout_lock:
|
|
2560
|
+
rel_test = _relativize_path(test)
|
|
2561
|
+
suite_suffix = _format_suite_suffix()
|
|
2562
|
+
attempt_suffix = _format_attempt_suffix()
|
|
2563
|
+
print(f"{CHECKMARK} {rel_test}{suite_suffix}{attempt_suffix}")
|
|
2564
|
+
|
|
2565
|
+
|
|
2566
|
+
def fail(test: Path) -> None:
|
|
2567
|
+
with stdout_lock:
|
|
2568
|
+
rel_test = _relativize_path(test)
|
|
2569
|
+
attempt_suffix = _format_attempt_suffix()
|
|
2570
|
+
suite_suffix = _format_suite_suffix()
|
|
2571
|
+
print(f"{CROSS} {rel_test}{suite_suffix}{attempt_suffix}")
|
|
2572
|
+
|
|
2573
|
+
|
|
2574
|
+
def last_and(items: Iterable[T]) -> Iterator[tuple[bool, T]]:
|
|
2575
|
+
iterator = iter(items)
|
|
2576
|
+
try:
|
|
2577
|
+
previous = next(iterator)
|
|
2578
|
+
except StopIteration:
|
|
2579
|
+
return
|
|
2580
|
+
for item in iterator:
|
|
2581
|
+
yield (False, previous)
|
|
2582
|
+
previous = item
|
|
2583
|
+
|
|
2584
|
+
|
|
2585
|
+
def _format_unary_symbols(count: int, symbols: dict[int, str]) -> str:
|
|
2586
|
+
if count <= 0:
|
|
2587
|
+
return ""
|
|
2588
|
+
hundreds, remainder = divmod(count, 100)
|
|
2589
|
+
tens, ones = divmod(remainder, 10)
|
|
2590
|
+
parts: list[str] = []
|
|
2591
|
+
if hundreds:
|
|
2592
|
+
parts.append(symbols[100] * hundreds)
|
|
2593
|
+
if tens:
|
|
2594
|
+
parts.append(symbols[10] * tens)
|
|
2595
|
+
if ones:
|
|
2596
|
+
parts.append(symbols[1] * ones)
|
|
2597
|
+
return "".join(parts)
|
|
2598
|
+
|
|
2599
|
+
|
|
2600
|
+
def _format_diff_counter(added: int, removed: int) -> str:
|
|
2601
|
+
plus_segment = _format_unary_symbols(added, _PLUS_SYMBOLS)
|
|
2602
|
+
minus_segment = _format_unary_symbols(removed, _MINUS_SYMBOLS)
|
|
2603
|
+
colored_plus = colorize(plus_segment, DIFF_ADD_COLOR) if plus_segment else ""
|
|
2604
|
+
colored_minus = colorize(minus_segment, FAIL_COLOR) if minus_segment else ""
|
|
2605
|
+
return f"{colored_plus}{colored_minus}"
|
|
2606
|
+
|
|
2607
|
+
|
|
2608
|
+
def _format_stat_header(path: os.PathLike[str] | str, added: int, removed: int) -> str:
|
|
2609
|
+
path_str = os.fspath(path)
|
|
2610
|
+
counter = _format_diff_counter(added, removed)
|
|
2611
|
+
plus_count = colorize(f"{added}(+)", DIFF_ADD_COLOR)
|
|
2612
|
+
minus_count = colorize(f"{removed}(-)", FAIL_COLOR)
|
|
2613
|
+
if counter:
|
|
2614
|
+
counter_segment = f" {counter}"
|
|
2615
|
+
else:
|
|
2616
|
+
counter_segment = ""
|
|
2617
|
+
return f"{_BLOCK_INDENT}┌ {path_str} {plus_count}/{minus_count}{counter_segment}"
|
|
2618
|
+
|
|
2619
|
+
|
|
2620
|
+
def _format_lines_changed(total: int) -> str:
|
|
2621
|
+
line = "line" if total == 1 else "lines"
|
|
2622
|
+
return f"{_BLOCK_INDENT}└ {total} {line} changed"
|
|
2623
|
+
|
|
2624
|
+
|
|
2625
|
+
def print_diff(expected: bytes, actual: bytes, path: Path) -> None:
|
|
2626
|
+
if should_suppress_failure_output():
|
|
2627
|
+
return
|
|
2628
|
+
diff = list(
|
|
2629
|
+
difflib.diff_bytes(
|
|
2630
|
+
difflib.unified_diff,
|
|
2631
|
+
expected.splitlines(keepends=True),
|
|
2632
|
+
actual.splitlines(keepends=True),
|
|
2633
|
+
n=2,
|
|
2634
|
+
)
|
|
2635
|
+
)
|
|
2636
|
+
added = sum(
|
|
2637
|
+
1
|
|
2638
|
+
for index, line in enumerate(diff)
|
|
2639
|
+
if index >= 2 and line.startswith(b"+") and not line.startswith(b"+++")
|
|
2640
|
+
)
|
|
2641
|
+
removed = sum(
|
|
2642
|
+
1
|
|
2643
|
+
for index, line in enumerate(diff)
|
|
2644
|
+
if index >= 2 and line.startswith(b"-") and not line.startswith(b"---")
|
|
2645
|
+
)
|
|
2646
|
+
show_stat = should_show_diff_stat()
|
|
2647
|
+
show_diff = should_show_diff_output()
|
|
2648
|
+
diff_lines: list[str] = []
|
|
2649
|
+
if should_show_diff_output():
|
|
2650
|
+
skip = 2
|
|
2651
|
+
for raw_line in diff:
|
|
2652
|
+
if skip > 0:
|
|
2653
|
+
skip -= 1
|
|
2654
|
+
continue
|
|
2655
|
+
text = raw_line.decode("utf-8", "replace").rstrip("\r\n")
|
|
2656
|
+
if raw_line.startswith(b"+") and not raw_line.startswith(b"+++"):
|
|
2657
|
+
text = colorize(text, DIFF_ADD_COLOR)
|
|
2658
|
+
elif raw_line.startswith(b"-") and not raw_line.startswith(b"---"):
|
|
2659
|
+
text = colorize(text, FAIL_COLOR)
|
|
2660
|
+
diff_lines.append(text)
|
|
2661
|
+
rel_path = _relativize_path(path)
|
|
2662
|
+
rel_path_str = os.fspath(rel_path)
|
|
2663
|
+
lines: list[str] = []
|
|
2664
|
+
total_changed = added + removed
|
|
2665
|
+
if not show_stat and not show_diff:
|
|
2666
|
+
return
|
|
2667
|
+
header = (
|
|
2668
|
+
_format_stat_header(rel_path, added, removed)
|
|
2669
|
+
if show_stat
|
|
2670
|
+
else f"{_BLOCK_INDENT}┌ {rel_path_str}"
|
|
2671
|
+
)
|
|
2672
|
+
lines.append(header)
|
|
2673
|
+
if show_diff and diff_lines:
|
|
2674
|
+
for diff_line in diff_lines:
|
|
2675
|
+
lines.append(f"{_BLOCK_INDENT}│ {diff_line}")
|
|
2676
|
+
if show_stat or (show_diff and total_changed > 0):
|
|
2677
|
+
lines.append(_format_lines_changed(total_changed))
|
|
2678
|
+
with stdout_lock:
|
|
2679
|
+
for output_line in lines:
|
|
2680
|
+
print(output_line)
|
|
2681
|
+
|
|
2682
|
+
|
|
2683
|
+
def check_group_is_empty(pgid: int) -> None:
|
|
2684
|
+
try:
|
|
2685
|
+
os.killpg(pgid, 0)
|
|
2686
|
+
except ProcessLookupError:
|
|
2687
|
+
return
|
|
2688
|
+
raise ValueError("leftover child processes!")
|
|
2689
|
+
|
|
2690
|
+
|
|
2691
|
+
def run_simple_test(
|
|
2692
|
+
test: Path,
|
|
2693
|
+
*,
|
|
2694
|
+
update: bool,
|
|
2695
|
+
args: Sequence[str] = (),
|
|
2696
|
+
output_ext: str,
|
|
2697
|
+
coverage: bool = False,
|
|
2698
|
+
) -> bool:
|
|
2699
|
+
try:
|
|
2700
|
+
# Parse test configuration
|
|
2701
|
+
test_config = parse_test_config(test, coverage=coverage)
|
|
2702
|
+
except ValueError as e:
|
|
2703
|
+
report_failure(test, format_failure_message(str(e)))
|
|
2704
|
+
return False
|
|
2705
|
+
|
|
2706
|
+
inputs_override = typing.cast(str | None, test_config.get("inputs"))
|
|
2707
|
+
env, config_args = get_test_env_and_config_args(test, inputs=inputs_override)
|
|
2708
|
+
fixtures = cast(tuple[str, ...], test_config.get("fixtures", tuple()))
|
|
2709
|
+
timeout = cast(int, test_config["timeout"])
|
|
2710
|
+
expect_error = bool(test_config.get("error", False))
|
|
2711
|
+
passthrough_mode = is_passthrough_enabled()
|
|
2712
|
+
|
|
2713
|
+
package_root = packages.find_package_root(test)
|
|
2714
|
+
package_args: list[str] = []
|
|
2715
|
+
if package_root is not None:
|
|
2716
|
+
env["TENZIR_PACKAGE_ROOT"] = str(package_root)
|
|
2717
|
+
package_tests_root = package_root / "tests"
|
|
2718
|
+
if inputs_override is None:
|
|
2719
|
+
env["TENZIR_INPUTS"] = str(package_tests_root / "inputs")
|
|
2720
|
+
package_args.append(f"--package-dirs={package_root}")
|
|
2721
|
+
|
|
2722
|
+
context_token = fixtures_impl.push_context(
|
|
2723
|
+
fixtures_impl.FixtureContext(
|
|
2724
|
+
test=test,
|
|
2725
|
+
config=cast(dict[str, Any], test_config),
|
|
2726
|
+
coverage=coverage,
|
|
2727
|
+
env=env,
|
|
2728
|
+
config_args=tuple(config_args),
|
|
2729
|
+
tenzir_binary=TENZIR_BINARY,
|
|
2730
|
+
tenzir_node_binary=TENZIR_NODE_BINARY,
|
|
2731
|
+
)
|
|
2732
|
+
)
|
|
2733
|
+
try:
|
|
2734
|
+
with fixtures_impl.activate(fixtures) as fixture_env:
|
|
2735
|
+
env.update(fixture_env)
|
|
2736
|
+
_apply_fixture_env(env, fixtures)
|
|
2737
|
+
|
|
2738
|
+
# Set up environment for code coverage if enabled
|
|
2739
|
+
if coverage:
|
|
2740
|
+
coverage_dir = os.environ.get(
|
|
2741
|
+
"CMAKE_COVERAGE_OUTPUT_DIRECTORY", os.path.join(os.getcwd(), "coverage")
|
|
2742
|
+
)
|
|
2743
|
+
source_dir = os.environ.get("COVERAGE_SOURCE_DIR", os.getcwd())
|
|
2744
|
+
os.makedirs(coverage_dir, exist_ok=True)
|
|
2745
|
+
test_name = test.stem
|
|
2746
|
+
profile_path = os.path.join(coverage_dir, f"{test_name}-%p.profraw")
|
|
2747
|
+
env["LLVM_PROFILE_FILE"] = profile_path
|
|
2748
|
+
env["COVERAGE_SOURCE_DIR"] = source_dir
|
|
2749
|
+
|
|
2750
|
+
node_args: list[str] = []
|
|
2751
|
+
node_requested = "node" in fixtures
|
|
2752
|
+
if node_requested:
|
|
2753
|
+
endpoint = env.get("TENZIR_NODE_CLIENT_ENDPOINT")
|
|
2754
|
+
if not endpoint:
|
|
2755
|
+
raise RuntimeError("node fixture did not provide TENZIR_NODE_CLIENT_ENDPOINT")
|
|
2756
|
+
node_args.append(f"--endpoint={endpoint}")
|
|
2757
|
+
|
|
2758
|
+
if not TENZIR_BINARY:
|
|
2759
|
+
raise RuntimeError("TENZIR_BINARY must be configured before running tests")
|
|
2760
|
+
cmd: list[str] = [
|
|
2761
|
+
TENZIR_BINARY,
|
|
2762
|
+
"--bare-mode",
|
|
2763
|
+
"--console-verbosity=warning",
|
|
2764
|
+
"--multi",
|
|
2765
|
+
*config_args,
|
|
2766
|
+
*node_args,
|
|
2767
|
+
*package_args,
|
|
2768
|
+
*args,
|
|
2769
|
+
"-f",
|
|
2770
|
+
str(test),
|
|
2771
|
+
]
|
|
2772
|
+
completed = run_subprocess(
|
|
2773
|
+
cmd,
|
|
2774
|
+
timeout=timeout,
|
|
2775
|
+
env=env,
|
|
2776
|
+
capture_output=not passthrough_mode,
|
|
2777
|
+
cwd=str(ROOT),
|
|
2778
|
+
)
|
|
2779
|
+
good = completed.returncode == 0
|
|
2780
|
+
output = b""
|
|
2781
|
+
stderr_output = b""
|
|
2782
|
+
if not passthrough_mode:
|
|
2783
|
+
root_bytes = str(ROOT).encode() + b"/"
|
|
2784
|
+
captured_stdout = completed.stdout or b""
|
|
2785
|
+
output = captured_stdout.replace(root_bytes, b"")
|
|
2786
|
+
captured_stderr = completed.stderr or b""
|
|
2787
|
+
stderr_output = captured_stderr.replace(root_bytes, b"")
|
|
2788
|
+
except subprocess.TimeoutExpired:
|
|
2789
|
+
report_failure(
|
|
2790
|
+
test,
|
|
2791
|
+
format_failure_message(f"subprocess hit {timeout}s timeout"),
|
|
2792
|
+
)
|
|
2793
|
+
return False
|
|
2794
|
+
except subprocess.CalledProcessError as e:
|
|
2795
|
+
report_failure(test, format_failure_message(f"subprocess error: {e}"))
|
|
2796
|
+
return False
|
|
2797
|
+
except Exception as e:
|
|
2798
|
+
report_failure(test, format_failure_message(f"unexpected exception: {e}"))
|
|
2799
|
+
return False
|
|
2800
|
+
finally:
|
|
2801
|
+
fixtures_impl.pop_context(context_token)
|
|
2802
|
+
cleanup_test_tmp_dir(env.get(TEST_TMP_ENV_VAR))
|
|
2803
|
+
|
|
2804
|
+
if expect_error == good:
|
|
2805
|
+
interrupted = _is_interrupt_exit(completed.returncode) or interrupt_requested()
|
|
2806
|
+
if should_suppress_failure_output() and not interrupted:
|
|
2807
|
+
return False
|
|
2808
|
+
if interrupted:
|
|
2809
|
+
_request_interrupt()
|
|
2810
|
+
summary_line = (
|
|
2811
|
+
_INTERRUPTED_NOTICE
|
|
2812
|
+
if interrupted
|
|
2813
|
+
else format_failure_message(f"got unexpected exit code {completed.returncode}")
|
|
2814
|
+
)
|
|
2815
|
+
if passthrough_mode:
|
|
2816
|
+
report_failure(test, summary_line)
|
|
2817
|
+
else:
|
|
2818
|
+
with stdout_lock:
|
|
2819
|
+
fail(test)
|
|
2820
|
+
if not interrupted:
|
|
2821
|
+
line_prefix = "│ ".encode()
|
|
2822
|
+
for line in output.splitlines():
|
|
2823
|
+
sys.stdout.buffer.write(line_prefix + line + b"\n")
|
|
2824
|
+
if completed.returncode != 0 and stderr_output:
|
|
2825
|
+
sys.stdout.write("├─▶ stderr\n")
|
|
2826
|
+
detail_prefix = DETAIL_COLOR.encode()
|
|
2827
|
+
reset_bytes = RESET_COLOR.encode()
|
|
2828
|
+
for line in stderr_output.splitlines():
|
|
2829
|
+
sys.stdout.buffer.write(
|
|
2830
|
+
line_prefix + detail_prefix + line + reset_bytes + b"\n"
|
|
2831
|
+
)
|
|
2832
|
+
if summary_line:
|
|
2833
|
+
sys.stdout.write(summary_line + "\n")
|
|
2834
|
+
return False
|
|
2835
|
+
if passthrough_mode:
|
|
2836
|
+
success(test)
|
|
2837
|
+
return True
|
|
2838
|
+
if not good:
|
|
2839
|
+
output_ext = "txt"
|
|
2840
|
+
ref_path = test.with_suffix(f".{output_ext}")
|
|
2841
|
+
if update:
|
|
2842
|
+
with ref_path.open("wb") as f:
|
|
2843
|
+
f.write(output)
|
|
2844
|
+
else:
|
|
2845
|
+
if not ref_path.exists():
|
|
2846
|
+
report_failure(test, format_failure_message(f'Failed to find ref file: "{ref_path}"'))
|
|
2847
|
+
return False
|
|
2848
|
+
log_comparison(test, ref_path, mode="comparing")
|
|
2849
|
+
expected = ref_path.read_bytes()
|
|
2850
|
+
if expected != output:
|
|
2851
|
+
if interrupt_requested():
|
|
2852
|
+
report_interrupted_test(test)
|
|
2853
|
+
else:
|
|
2854
|
+
report_failure(test, "")
|
|
2855
|
+
print_diff(expected, output, ref_path)
|
|
2856
|
+
return False
|
|
2857
|
+
success(test)
|
|
2858
|
+
return True
|
|
2859
|
+
|
|
2860
|
+
|
|
2861
|
+
def handle_skip(reason: str, test: Path, update: bool, output_ext: str) -> bool | str:
|
|
2862
|
+
rel_path = _relativize_path(test)
|
|
2863
|
+
suite_suffix = _format_suite_suffix()
|
|
2864
|
+
print(f"{SKIP} skipped {rel_path}{suite_suffix}: {reason}")
|
|
2865
|
+
ref_path = test.with_suffix(f".{output_ext}")
|
|
2866
|
+
if update:
|
|
2867
|
+
with ref_path.open("wb") as f:
|
|
2868
|
+
f.write(b"")
|
|
2869
|
+
else:
|
|
2870
|
+
if ref_path.exists():
|
|
2871
|
+
expected = ref_path.read_bytes()
|
|
2872
|
+
if expected != b"":
|
|
2873
|
+
report_failure(
|
|
2874
|
+
test,
|
|
2875
|
+
format_failure_message(
|
|
2876
|
+
f'Reference file for skipped test must be empty: "{ref_path}"'
|
|
2877
|
+
),
|
|
2878
|
+
)
|
|
2879
|
+
return False
|
|
2880
|
+
return "skipped"
|
|
2881
|
+
|
|
2882
|
+
|
|
2883
|
+
def refresh_runner_metadata() -> None:
|
|
2884
|
+
_refresh_registry()
|
|
2885
|
+
|
|
2886
|
+
|
|
2887
|
+
refresh_runner_metadata()
|
|
2888
|
+
|
|
2889
|
+
|
|
2890
|
+
SUITE_DEBUG_LOGGING = False
|
|
2891
|
+
|
|
2892
|
+
|
|
2893
|
+
def set_suite_debug_logging(enabled: bool) -> None:
|
|
2894
|
+
global SUITE_DEBUG_LOGGING
|
|
2895
|
+
SUITE_DEBUG_LOGGING = enabled
|
|
2896
|
+
|
|
2897
|
+
|
|
2898
|
+
def _log_suite_event(
|
|
2899
|
+
suite: SuiteInfo,
|
|
2900
|
+
*,
|
|
2901
|
+
event: Literal["setup", "teardown"],
|
|
2902
|
+
total: int,
|
|
2903
|
+
) -> None:
|
|
2904
|
+
if not SUITE_DEBUG_LOGGING:
|
|
2905
|
+
return
|
|
2906
|
+
rel_dir = _relativize_path(suite.directory)
|
|
2907
|
+
action = "setting up" if event == "setup" else "tearing down"
|
|
2908
|
+
_CLI_LOGGER.debug("suite %s %s (%d tests) @ %s", action, suite.name, total, rel_dir)
|
|
2909
|
+
|
|
2910
|
+
|
|
2911
|
+
class Worker:
|
|
2912
|
+
def __init__(
|
|
2913
|
+
self,
|
|
2914
|
+
queue: list[RunnerQueueItem],
|
|
2915
|
+
*,
|
|
2916
|
+
update: bool,
|
|
2917
|
+
coverage: bool = False,
|
|
2918
|
+
runner_versions: Mapping[str, str] | None = None,
|
|
2919
|
+
debug: bool = False,
|
|
2920
|
+
) -> None:
|
|
2921
|
+
self._queue = queue
|
|
2922
|
+
self._result: Summary | None = None
|
|
2923
|
+
self._exception: BaseException | None = None
|
|
2924
|
+
self._update = update
|
|
2925
|
+
self._coverage = coverage
|
|
2926
|
+
self._runner_versions = dict(runner_versions or {})
|
|
2927
|
+
self._debug = debug
|
|
2928
|
+
self._thread = threading.Thread(target=self._work)
|
|
2929
|
+
|
|
2930
|
+
def start(self) -> None:
|
|
2931
|
+
self._thread.start()
|
|
2932
|
+
|
|
2933
|
+
def join(self) -> Summary:
|
|
2934
|
+
self._thread.join()
|
|
2935
|
+
if self._exception:
|
|
2936
|
+
raise self._exception
|
|
2937
|
+
if self._result is None:
|
|
2938
|
+
raise RuntimeError("worker finished without producing a result")
|
|
2939
|
+
return self._result
|
|
2940
|
+
|
|
2941
|
+
def _work(self) -> Summary:
|
|
2942
|
+
try:
|
|
2943
|
+
self._result = Summary()
|
|
2944
|
+
result = self._result
|
|
2945
|
+
while True:
|
|
2946
|
+
if interrupt_requested():
|
|
2947
|
+
break
|
|
2948
|
+
try:
|
|
2949
|
+
queue_item = self._queue.pop()
|
|
2950
|
+
except IndexError:
|
|
2951
|
+
break
|
|
2952
|
+
|
|
2953
|
+
if isinstance(queue_item, SuiteQueueItem):
|
|
2954
|
+
self._run_suite(queue_item, result)
|
|
2955
|
+
else:
|
|
2956
|
+
self._run_test_item(queue_item, result)
|
|
2957
|
+
if interrupt_requested():
|
|
2958
|
+
break
|
|
2959
|
+
return result
|
|
2960
|
+
except Exception as exc: # pragma: no cover - defensive logging
|
|
2961
|
+
self._exception = exc
|
|
2962
|
+
if self._result is None:
|
|
2963
|
+
self._result = Summary()
|
|
2964
|
+
return self._result
|
|
2965
|
+
|
|
2966
|
+
def _run_suite(self, suite_item: SuiteQueueItem, summary: Summary) -> None:
|
|
2967
|
+
tests = suite_item.tests
|
|
2968
|
+
total = len(tests)
|
|
2969
|
+
if total == 0:
|
|
2970
|
+
return
|
|
2971
|
+
primary_test = tests[0].path
|
|
2972
|
+
try:
|
|
2973
|
+
primary_config = parse_test_config(primary_test, coverage=self._coverage)
|
|
2974
|
+
except ValueError as exc:
|
|
2975
|
+
raise RuntimeError(f"failed to parse suite config for {primary_test}: {exc}") from exc
|
|
2976
|
+
inputs_override = typing.cast(str | None, primary_config.get("inputs"))
|
|
2977
|
+
env, config_args = get_test_env_and_config_args(primary_test, inputs=inputs_override)
|
|
2978
|
+
package_root = packages.find_package_root(primary_test)
|
|
2979
|
+
if package_root is not None:
|
|
2980
|
+
env["TENZIR_PACKAGE_ROOT"] = str(package_root)
|
|
2981
|
+
if inputs_override is None:
|
|
2982
|
+
env["TENZIR_INPUTS"] = str((package_root / "tests" / "inputs"))
|
|
2983
|
+
_apply_fixture_env(env, suite_item.fixtures)
|
|
2984
|
+
context_token = fixtures_impl.push_context(
|
|
2985
|
+
fixtures_impl.FixtureContext(
|
|
2986
|
+
test=primary_test,
|
|
2987
|
+
config=typing.cast(dict[str, Any], primary_config),
|
|
2988
|
+
coverage=self._coverage,
|
|
2989
|
+
env=env,
|
|
2990
|
+
config_args=tuple(config_args),
|
|
2991
|
+
tenzir_binary=TENZIR_BINARY,
|
|
2992
|
+
tenzir_node_binary=TENZIR_NODE_BINARY,
|
|
2993
|
+
)
|
|
2994
|
+
)
|
|
2995
|
+
_log_suite_event(suite_item.suite, event="setup", total=total)
|
|
2996
|
+
interrupted = False
|
|
2997
|
+
try:
|
|
2998
|
+
with fixtures_impl.suite_scope(suite_item.fixtures):
|
|
2999
|
+
for index, test_item in enumerate(tests, start=1):
|
|
3000
|
+
if interrupt_requested():
|
|
3001
|
+
interrupted = True
|
|
3002
|
+
break
|
|
3003
|
+
interrupted = self._run_test_item(
|
|
3004
|
+
test_item,
|
|
3005
|
+
summary,
|
|
3006
|
+
suite_progress=(suite_item.suite.name, index, total),
|
|
3007
|
+
suite_fixtures=suite_item.fixtures,
|
|
3008
|
+
)
|
|
3009
|
+
if interrupted:
|
|
3010
|
+
break
|
|
3011
|
+
finally:
|
|
3012
|
+
_log_suite_event(suite_item.suite, event="teardown", total=total)
|
|
3013
|
+
fixtures_impl.pop_context(context_token)
|
|
3014
|
+
cleanup_test_tmp_dir(env.get(TEST_TMP_ENV_VAR))
|
|
3015
|
+
if interrupted:
|
|
3016
|
+
_request_interrupt()
|
|
3017
|
+
|
|
3018
|
+
def _run_test_item(
|
|
3019
|
+
self,
|
|
3020
|
+
test_item: TestQueueItem,
|
|
3021
|
+
summary: Summary,
|
|
3022
|
+
*,
|
|
3023
|
+
suite_progress: tuple[str, int, int] | None = None,
|
|
3024
|
+
suite_fixtures: tuple[str, ...] | None = None,
|
|
3025
|
+
) -> bool:
|
|
3026
|
+
test_path = test_item.path
|
|
3027
|
+
runner = test_item.runner
|
|
3028
|
+
rel_path = _relativize_path(test_path)
|
|
3029
|
+
configured_fixtures = suite_fixtures or _get_test_fixtures(
|
|
3030
|
+
test_path, coverage=self._coverage
|
|
3031
|
+
)
|
|
3032
|
+
fixtures = configured_fixtures
|
|
3033
|
+
retry_limit = 1
|
|
3034
|
+
config: TestConfig | None = None
|
|
3035
|
+
parse_error: str | None = None
|
|
3036
|
+
try:
|
|
3037
|
+
config = parse_test_config(test_path, coverage=self._coverage)
|
|
3038
|
+
except ValueError as exc:
|
|
3039
|
+
parse_error = str(exc)
|
|
3040
|
+
config = None
|
|
3041
|
+
if config is not None:
|
|
3042
|
+
raw_retry = config.get("retry", 0)
|
|
3043
|
+
if isinstance(raw_retry, int):
|
|
3044
|
+
retry_limit = max(1, raw_retry)
|
|
3045
|
+
config_fixtures = cast(tuple[str, ...], config.get("fixtures", tuple()))
|
|
3046
|
+
if suite_fixtures is not None and config_fixtures != suite_fixtures:
|
|
3047
|
+
raise RuntimeError(
|
|
3048
|
+
f"fixture mismatch for suite test {test_path}: "
|
|
3049
|
+
f"expected {suite_fixtures}, got {config_fixtures}"
|
|
3050
|
+
)
|
|
3051
|
+
if suite_fixtures is None:
|
|
3052
|
+
configured_fixtures = config_fixtures
|
|
3053
|
+
if parse_error is not None:
|
|
3054
|
+
message = format_failure_message(parse_error)
|
|
3055
|
+
report_failure(test_path, message)
|
|
3056
|
+
summary.total += 1
|
|
3057
|
+
summary.record_runner_outcome(runner.name, False)
|
|
3058
|
+
if fixtures:
|
|
3059
|
+
summary.record_fixture_outcome(fixtures, False)
|
|
3060
|
+
summary.failed += 1
|
|
3061
|
+
summary.failed_paths.append(rel_path)
|
|
3062
|
+
return False
|
|
3063
|
+
detail_bits = [f"runner={runner.name}"]
|
|
3064
|
+
if fixtures:
|
|
3065
|
+
detail_bits.append(f"fixtures={', '.join(fixtures)}")
|
|
3066
|
+
if suite_progress:
|
|
3067
|
+
name, index, total = suite_progress
|
|
3068
|
+
detail_bits.append(f"suite={name} ({index}/{total})")
|
|
3069
|
+
detail_segment = f" ({', '.join(detail_bits)})" if detail_bits else ""
|
|
3070
|
+
if is_passthrough_enabled():
|
|
3071
|
+
with stdout_lock:
|
|
3072
|
+
print(f"{INFO} running {rel_path}{detail_segment} [passthrough]")
|
|
3073
|
+
elif self._debug:
|
|
3074
|
+
_CLI_LOGGER.debug("running %s%s", rel_path, detail_segment)
|
|
3075
|
+
max_attempts = retry_limit
|
|
3076
|
+
attempts = 0
|
|
3077
|
+
final_outcome: bool | str = False
|
|
3078
|
+
final_interrupted = False
|
|
3079
|
+
while attempts < max_attempts:
|
|
3080
|
+
if interrupt_requested():
|
|
3081
|
+
final_interrupted = True
|
|
3082
|
+
break
|
|
3083
|
+
attempts += 1
|
|
3084
|
+
with _push_retry_context(attempt=attempts, max_attempts=max_attempts):
|
|
3085
|
+
attempt_context = contextlib.ExitStack()
|
|
3086
|
+
if suite_progress is not None:
|
|
3087
|
+
name, index, total = suite_progress
|
|
3088
|
+
attempt_context.enter_context(
|
|
3089
|
+
_push_suite_context(name=name, index=index, total=total)
|
|
3090
|
+
)
|
|
3091
|
+
interrupted = False
|
|
3092
|
+
try:
|
|
3093
|
+
with attempt_context:
|
|
3094
|
+
outcome = runner.run(test_path, self._update, self._coverage)
|
|
3095
|
+
except KeyboardInterrupt: # pragma: no cover - defensive guard
|
|
3096
|
+
_request_interrupt()
|
|
3097
|
+
interrupted = True
|
|
3098
|
+
outcome = False
|
|
3099
|
+
except Exception as exc:
|
|
3100
|
+
error_message = format_failure_message(str(exc))
|
|
3101
|
+
report_failure(test_path, error_message)
|
|
3102
|
+
outcome = False
|
|
3103
|
+
final_interrupted = False
|
|
3104
|
+
final_outcome = outcome
|
|
3105
|
+
break
|
|
3106
|
+
|
|
3107
|
+
if interrupted:
|
|
3108
|
+
report_failure(test_path, _INTERRUPTED_NOTICE)
|
|
3109
|
+
final_interrupted = True
|
|
3110
|
+
final_outcome = outcome
|
|
3111
|
+
if final_interrupted or interrupt_requested():
|
|
3112
|
+
break
|
|
3113
|
+
if outcome == "skipped" or outcome:
|
|
3114
|
+
break
|
|
3115
|
+
if attempts < max_attempts:
|
|
3116
|
+
continue
|
|
3117
|
+
summary.total += 1
|
|
3118
|
+
summary.record_runner_outcome(runner.name, final_outcome)
|
|
3119
|
+
if fixtures:
|
|
3120
|
+
summary.record_fixture_outcome(fixtures, final_outcome)
|
|
3121
|
+
if final_outcome == "skipped":
|
|
3122
|
+
summary.skipped += 1
|
|
3123
|
+
summary.skipped_paths.append(rel_path)
|
|
3124
|
+
elif not final_outcome:
|
|
3125
|
+
summary.failed += 1
|
|
3126
|
+
summary.failed_paths.append(rel_path)
|
|
3127
|
+
return final_interrupted or interrupt_requested()
|
|
3128
|
+
|
|
3129
|
+
|
|
3130
|
+
def get_runner_for_test(test_path: Path) -> Runner:
|
|
3131
|
+
"""Determine the appropriate runner for a test based on its configuration."""
|
|
3132
|
+
return runners_get_runner(test_path)
|
|
3133
|
+
|
|
3134
|
+
|
|
3135
|
+
def collect_all_tests(directory: Path) -> Iterator[Path]:
|
|
3136
|
+
if directory.name in {"fixtures", "runners"}:
|
|
3137
|
+
return
|
|
3138
|
+
extensions = _allowed_extensions or {
|
|
3139
|
+
ext
|
|
3140
|
+
for runner in runners_iter_runners()
|
|
3141
|
+
if (ext := getattr(runner, "_ext", None)) is not None
|
|
3142
|
+
}
|
|
3143
|
+
for ext in extensions:
|
|
3144
|
+
for candidate in directory.glob(f"**/*.{ext}"):
|
|
3145
|
+
if _is_inputs_path(candidate):
|
|
3146
|
+
continue
|
|
3147
|
+
yield candidate
|
|
3148
|
+
|
|
3149
|
+
|
|
3150
|
+
def run_cli(
|
|
3151
|
+
*,
|
|
3152
|
+
root: Path | None,
|
|
3153
|
+
tenzir_binary: Path | None,
|
|
3154
|
+
tenzir_node_binary: Path | None,
|
|
3155
|
+
tests: Sequence[Path],
|
|
3156
|
+
update: bool,
|
|
3157
|
+
debug: bool,
|
|
3158
|
+
purge: bool,
|
|
3159
|
+
coverage: bool,
|
|
3160
|
+
coverage_source_dir: Path | None,
|
|
3161
|
+
runner_summary: bool,
|
|
3162
|
+
fixture_summary: bool,
|
|
3163
|
+
show_summary: bool,
|
|
3164
|
+
show_diff_output: bool,
|
|
3165
|
+
show_diff_stat: bool,
|
|
3166
|
+
jobs: int,
|
|
3167
|
+
keep_tmp_dirs: bool,
|
|
3168
|
+
passthrough: bool,
|
|
3169
|
+
jobs_overridden: bool = False,
|
|
3170
|
+
all_projects: bool = False,
|
|
3171
|
+
) -> ExecutionResult:
|
|
3172
|
+
"""Execute the harness and return a structured result for library consumers."""
|
|
3173
|
+
from tenzir_test.engine import state as engine_state
|
|
3174
|
+
|
|
3175
|
+
try:
|
|
3176
|
+
debug_enabled = bool(debug or _default_debug_logging)
|
|
3177
|
+
set_debug_logging(debug_enabled)
|
|
3178
|
+
|
|
3179
|
+
fixture_logger = logging.getLogger("tenzir_test.fixtures")
|
|
3180
|
+
root_logger = logging.getLogger()
|
|
3181
|
+
|
|
3182
|
+
_set_discovery_logging(debug_enabled)
|
|
3183
|
+
set_suite_debug_logging(debug_enabled)
|
|
3184
|
+
|
|
3185
|
+
debug_formatter = logging.Formatter(f"{DEBUG_PREFIX} %(message)s")
|
|
3186
|
+
default_formatter = logging.Formatter("%(levelname)s:%(name)s:%(message)s")
|
|
3187
|
+
|
|
3188
|
+
if debug_enabled:
|
|
3189
|
+
if not root_logger.handlers:
|
|
3190
|
+
stream_handler = logging.StreamHandler()
|
|
3191
|
+
stream_handler.setFormatter(debug_formatter)
|
|
3192
|
+
root_logger.addHandler(stream_handler)
|
|
3193
|
+
else:
|
|
3194
|
+
for existing_handler in list(root_logger.handlers):
|
|
3195
|
+
existing_handler.setFormatter(debug_formatter)
|
|
3196
|
+
root_logger.setLevel(logging.INFO)
|
|
3197
|
+
if not fixture_logger.handlers:
|
|
3198
|
+
fixture_logger.addHandler(logging.StreamHandler())
|
|
3199
|
+
for handler in list(fixture_logger.handlers):
|
|
3200
|
+
handler.setLevel(logging.DEBUG)
|
|
3201
|
+
handler.setFormatter(debug_formatter)
|
|
3202
|
+
fixture_logger.setLevel(logging.DEBUG)
|
|
3203
|
+
fixture_logger.propagate = False
|
|
3204
|
+
else:
|
|
3205
|
+
for existing_handler in list(root_logger.handlers):
|
|
3206
|
+
existing_handler.setFormatter(default_formatter)
|
|
3207
|
+
root_logger.setLevel(logging.WARNING)
|
|
3208
|
+
for handler in list(fixture_logger.handlers):
|
|
3209
|
+
handler.setLevel(logging.INFO)
|
|
3210
|
+
handler.setFormatter(default_formatter)
|
|
3211
|
+
fixture_logger.setLevel(logging.WARNING)
|
|
3212
|
+
fixture_logger.propagate = True
|
|
3213
|
+
|
|
3214
|
+
set_keep_tmp_dirs(bool(os.environ.get(_TMP_KEEP_ENV_VAR)) or keep_tmp_dirs)
|
|
3215
|
+
set_show_diff_output(show_diff_output)
|
|
3216
|
+
set_show_diff_stat(show_diff_stat)
|
|
3217
|
+
if passthrough:
|
|
3218
|
+
harness_mode = HarnessMode.PASSTHROUGH
|
|
3219
|
+
elif update:
|
|
3220
|
+
harness_mode = HarnessMode.UPDATE
|
|
3221
|
+
else:
|
|
3222
|
+
harness_mode = HarnessMode.COMPARE
|
|
3223
|
+
set_harness_mode(harness_mode)
|
|
3224
|
+
passthrough_mode = harness_mode is HarnessMode.PASSTHROUGH
|
|
3225
|
+
if passthrough_mode and jobs > 1:
|
|
3226
|
+
if jobs_overridden:
|
|
3227
|
+
print(f"{INFO} forcing --jobs=1 in passthrough mode to preserve output ordering")
|
|
3228
|
+
jobs = 1
|
|
3229
|
+
if passthrough_mode and update:
|
|
3230
|
+
print(f"{INFO} ignoring --update in passthrough mode")
|
|
3231
|
+
update = False
|
|
3232
|
+
|
|
3233
|
+
settings = discover_settings(
|
|
3234
|
+
root=root,
|
|
3235
|
+
tenzir_binary=tenzir_binary,
|
|
3236
|
+
tenzir_node_binary=tenzir_node_binary,
|
|
3237
|
+
)
|
|
3238
|
+
apply_settings(settings)
|
|
3239
|
+
selected_tests = list(tests)
|
|
3240
|
+
|
|
3241
|
+
plan: ExecutionPlan | None = None
|
|
3242
|
+
if selected_tests:
|
|
3243
|
+
plan = _build_execution_plan(
|
|
3244
|
+
ROOT,
|
|
3245
|
+
selected_tests,
|
|
3246
|
+
root_explicit=root is not None,
|
|
3247
|
+
all_projects=all_projects,
|
|
3248
|
+
)
|
|
3249
|
+
|
|
3250
|
+
if not _is_project_root(ROOT):
|
|
3251
|
+
if all_projects:
|
|
3252
|
+
raise HarnessError(
|
|
3253
|
+
"error: --all-projects requires a project root; specify one with --root"
|
|
3254
|
+
)
|
|
3255
|
+
if not selected_tests:
|
|
3256
|
+
message = (
|
|
3257
|
+
f"{INFO} no tenzir-test project detected at {ROOT}.\n"
|
|
3258
|
+
f"{INFO} run from your project root or provide --root."
|
|
3259
|
+
)
|
|
3260
|
+
print(message)
|
|
3261
|
+
raise HarnessError(message, show_message=False)
|
|
3262
|
+
assert plan is not None
|
|
3263
|
+
runnable_satellites = [item for item in plan.satellites if item.should_run()]
|
|
3264
|
+
if not runnable_satellites:
|
|
3265
|
+
message = (
|
|
3266
|
+
f"{INFO} no tenzir-test project detected at {ROOT}.\n"
|
|
3267
|
+
f"{INFO} run from your project root or provide --root."
|
|
3268
|
+
)
|
|
3269
|
+
print(message)
|
|
3270
|
+
sample = ", ".join(str(path) for path in selected_tests[:3])
|
|
3271
|
+
if len(selected_tests) > 3:
|
|
3272
|
+
sample += ", ..."
|
|
3273
|
+
print(f"{INFO} ignoring provided selection(s): {sample}")
|
|
3274
|
+
raise HarnessError(
|
|
3275
|
+
"no runnable tests selected outside of a project root", show_message=False
|
|
3276
|
+
)
|
|
3277
|
+
if plan is None:
|
|
3278
|
+
plan = _build_execution_plan(
|
|
3279
|
+
ROOT,
|
|
3280
|
+
selected_tests,
|
|
3281
|
+
root_explicit=root is not None,
|
|
3282
|
+
all_projects=all_projects,
|
|
3283
|
+
)
|
|
3284
|
+
display_base = Path.cwd().resolve()
|
|
3285
|
+
project_count = _print_execution_plan(plan, display_base=display_base)
|
|
3286
|
+
if project_count:
|
|
3287
|
+
print()
|
|
3288
|
+
|
|
3289
|
+
with _install_interrupt_handler():
|
|
3290
|
+
engine_state.refresh()
|
|
3291
|
+
|
|
3292
|
+
overall_summary = Summary()
|
|
3293
|
+
overall_queue_count = 0
|
|
3294
|
+
executed_projects: list[ProjectSelection] = []
|
|
3295
|
+
project_results: list[ProjectResult] = []
|
|
3296
|
+
printed_projects = 0
|
|
3297
|
+
interrupted = False
|
|
3298
|
+
|
|
3299
|
+
for selection in plan.projects():
|
|
3300
|
+
if interrupt_requested():
|
|
3301
|
+
break
|
|
3302
|
+
if not selection.should_run():
|
|
3303
|
+
if selection.kind == "root":
|
|
3304
|
+
_set_project_root(selection.root)
|
|
3305
|
+
engine_state.refresh()
|
|
3306
|
+
try:
|
|
3307
|
+
_load_project_runners(selection.root, expose_namespace=True)
|
|
3308
|
+
_load_project_fixtures(selection.root, expose_namespace=True)
|
|
3309
|
+
except RuntimeError as exc:
|
|
3310
|
+
raise HarnessError(f"error: {exc}") from exc
|
|
3311
|
+
refresh_runner_metadata()
|
|
3312
|
+
_set_project_root(settings.root)
|
|
3313
|
+
engine_state.refresh()
|
|
3314
|
+
continue
|
|
3315
|
+
|
|
3316
|
+
if printed_projects:
|
|
3317
|
+
print()
|
|
3318
|
+
|
|
3319
|
+
_set_project_root(selection.root)
|
|
3320
|
+
engine_state.refresh()
|
|
3321
|
+
expose_namespace = selection.kind == "root"
|
|
3322
|
+
try:
|
|
3323
|
+
_load_project_runners(selection.root, expose_namespace=expose_namespace)
|
|
3324
|
+
_load_project_fixtures(selection.root, expose_namespace=expose_namespace)
|
|
3325
|
+
except RuntimeError as exc:
|
|
3326
|
+
raise HarnessError(f"error: {exc}") from exc
|
|
3327
|
+
refresh_runner_metadata()
|
|
3328
|
+
|
|
3329
|
+
tests_to_run = selection.selectors if not selection.run_all else [selection.root]
|
|
3330
|
+
|
|
3331
|
+
if purge:
|
|
3332
|
+
continue
|
|
3333
|
+
|
|
3334
|
+
collected_paths: set[Path] = set()
|
|
3335
|
+
for test in tests_to_run:
|
|
3336
|
+
if test.resolve() == selection.root.resolve():
|
|
3337
|
+
all_tests = []
|
|
3338
|
+
for tests_dir in _iter_project_test_directories(selection.root):
|
|
3339
|
+
all_tests.extend(list(collect_all_tests(tests_dir)))
|
|
3340
|
+
for test_path in all_tests:
|
|
3341
|
+
collected_paths.add(test_path.resolve())
|
|
3342
|
+
continue
|
|
3343
|
+
|
|
3344
|
+
resolved = test.resolve()
|
|
3345
|
+
if not resolved.exists():
|
|
3346
|
+
raise HarnessError(f"error: test path `{test}` does not exist")
|
|
3347
|
+
|
|
3348
|
+
if resolved.is_dir():
|
|
3349
|
+
if _is_inputs_path(resolved):
|
|
3350
|
+
continue
|
|
3351
|
+
tql_files = list(collect_all_tests(resolved))
|
|
3352
|
+
if not tql_files:
|
|
3353
|
+
raise HarnessError(
|
|
3354
|
+
f"error: no {_allowed_extensions} files found in {resolved}"
|
|
3355
|
+
)
|
|
3356
|
+
for file_path in tql_files:
|
|
3357
|
+
suite_info = _resolve_suite_for_test(file_path)
|
|
3358
|
+
if suite_info is None:
|
|
3359
|
+
continue
|
|
3360
|
+
suite_dir = suite_info.directory
|
|
3361
|
+
if resolved == suite_dir:
|
|
3362
|
+
continue
|
|
3363
|
+
if _path_is_within(resolved, suite_dir):
|
|
3364
|
+
rel_target = _relativize_path(resolved)
|
|
3365
|
+
rel_suite = _relativize_path(suite_dir)
|
|
3366
|
+
detail = (
|
|
3367
|
+
f"cannot select {rel_target} directly because it is inside the suite "
|
|
3368
|
+
f"'{suite_info.name}' defined in {rel_suite / _CONFIG_FILE_NAME}."
|
|
3369
|
+
)
|
|
3370
|
+
print(f"{CROSS} {detail}", file=sys.stderr)
|
|
3371
|
+
print(
|
|
3372
|
+
f"{INFO} select the suite directory instead",
|
|
3373
|
+
file=sys.stderr,
|
|
3374
|
+
)
|
|
3375
|
+
raise HarnessError(
|
|
3376
|
+
f"invalid partial suite selection at {rel_target}",
|
|
3377
|
+
show_message=False,
|
|
3378
|
+
)
|
|
3379
|
+
for file_path in tql_files:
|
|
3380
|
+
collected_paths.add(file_path.resolve())
|
|
3381
|
+
elif resolved.is_file():
|
|
3382
|
+
if _is_inputs_path(resolved):
|
|
3383
|
+
continue
|
|
3384
|
+
if resolved.suffix[1:] in _allowed_extensions:
|
|
3385
|
+
suite_info = _resolve_suite_for_test(resolved)
|
|
3386
|
+
if suite_info is not None and _path_is_within(
|
|
3387
|
+
resolved, suite_info.directory
|
|
3388
|
+
):
|
|
3389
|
+
rel_file = _relativize_path(resolved)
|
|
3390
|
+
rel_suite = _relativize_path(suite_info.directory)
|
|
3391
|
+
detail = (
|
|
3392
|
+
f"cannot select {rel_file} directly because it belongs to the suite "
|
|
3393
|
+
f"'{suite_info.name}' defined in {rel_suite / _CONFIG_FILE_NAME}."
|
|
3394
|
+
)
|
|
3395
|
+
print(f"{CROSS} {detail}", file=sys.stderr)
|
|
3396
|
+
print(f"{INFO} select the suite directory instead", file=sys.stderr)
|
|
3397
|
+
raise HarnessError(
|
|
3398
|
+
f"invalid suite selection for {rel_file}",
|
|
3399
|
+
show_message=False,
|
|
3400
|
+
)
|
|
3401
|
+
collected_paths.add(resolved.resolve())
|
|
3402
|
+
else:
|
|
3403
|
+
raise HarnessError(
|
|
3404
|
+
f"error: unsupported file type {resolved.suffix} for {resolved} - only {_allowed_extensions} files are supported"
|
|
3405
|
+
)
|
|
3406
|
+
else:
|
|
3407
|
+
raise HarnessError(f"error: `{test}` is neither a file nor a directory")
|
|
3408
|
+
|
|
3409
|
+
if interrupt_requested():
|
|
3410
|
+
break
|
|
3411
|
+
|
|
3412
|
+
queue = _build_queue_from_paths(collected_paths, coverage=coverage)
|
|
3413
|
+
queue.sort(key=_queue_sort_key, reverse=True)
|
|
3414
|
+
project_queue_size = _count_queue_tests(queue)
|
|
3415
|
+
project_summary = Summary()
|
|
3416
|
+
job_count, enabled_flags, verb = _summarize_harness_configuration(
|
|
3417
|
+
jobs=jobs,
|
|
3418
|
+
update=update,
|
|
3419
|
+
coverage=coverage,
|
|
3420
|
+
debug=debug_enabled,
|
|
3421
|
+
show_summary=show_summary,
|
|
3422
|
+
runner_summary=runner_summary,
|
|
3423
|
+
fixture_summary=fixture_summary,
|
|
3424
|
+
passthrough=passthrough_mode,
|
|
3425
|
+
)
|
|
3426
|
+
|
|
3427
|
+
if not project_queue_size:
|
|
3428
|
+
overall_queue_count += project_queue_size
|
|
3429
|
+
executed_projects.append(selection)
|
|
3430
|
+
project_results.append(
|
|
3431
|
+
ProjectResult(
|
|
3432
|
+
selection=selection,
|
|
3433
|
+
summary=project_summary,
|
|
3434
|
+
queue_size=project_queue_size,
|
|
3435
|
+
)
|
|
3436
|
+
)
|
|
3437
|
+
continue
|
|
3438
|
+
|
|
3439
|
+
os.environ["TENZIR_EXEC__DUMP_DIAGNOSTICS"] = "true"
|
|
3440
|
+
if not TENZIR_BINARY:
|
|
3441
|
+
raise HarnessError(
|
|
3442
|
+
f"error: could not find TENZIR_BINARY executable `{TENZIR_BINARY}`"
|
|
3443
|
+
)
|
|
3444
|
+
try:
|
|
3445
|
+
tenzir_version = get_version()
|
|
3446
|
+
except FileNotFoundError:
|
|
3447
|
+
raise HarnessError(
|
|
3448
|
+
f"error: could not find TENZIR_BINARY executable `{TENZIR_BINARY}`"
|
|
3449
|
+
)
|
|
3450
|
+
|
|
3451
|
+
runner_versions = _collect_runner_versions(queue, tenzir_version=tenzir_version)
|
|
3452
|
+
runner_breakdown = _runner_breakdown(
|
|
3453
|
+
queue,
|
|
3454
|
+
tenzir_version=tenzir_version,
|
|
3455
|
+
runner_versions=runner_versions,
|
|
3456
|
+
)
|
|
3457
|
+
|
|
3458
|
+
_print_project_start(
|
|
3459
|
+
selection=selection,
|
|
3460
|
+
display_base=display_base,
|
|
3461
|
+
queue_size=project_queue_size,
|
|
3462
|
+
job_count=job_count,
|
|
3463
|
+
enabled_flags=enabled_flags,
|
|
3464
|
+
verb=verb,
|
|
3465
|
+
)
|
|
3466
|
+
count_width = max((len(str(count)) for _, count, _ in runner_breakdown), default=1)
|
|
3467
|
+
for name, count, version in runner_breakdown:
|
|
3468
|
+
version_segment = f" (v{version})" if version else ""
|
|
3469
|
+
print(f"{INFO} {count:>{count_width}}× {name}{version_segment}")
|
|
3470
|
+
printed_projects += 1
|
|
3471
|
+
|
|
3472
|
+
workers = [
|
|
3473
|
+
Worker(
|
|
3474
|
+
queue,
|
|
3475
|
+
update=update,
|
|
3476
|
+
coverage=coverage,
|
|
3477
|
+
runner_versions=runner_versions,
|
|
3478
|
+
debug=debug_enabled,
|
|
3479
|
+
)
|
|
3480
|
+
for _ in range(jobs)
|
|
3481
|
+
]
|
|
3482
|
+
for worker in workers:
|
|
3483
|
+
worker.start()
|
|
3484
|
+
try:
|
|
3485
|
+
for worker in workers:
|
|
3486
|
+
project_summary += worker.join()
|
|
3487
|
+
except KeyboardInterrupt: # pragma: no cover - defensive guard
|
|
3488
|
+
_request_interrupt()
|
|
3489
|
+
for worker in workers:
|
|
3490
|
+
worker.join()
|
|
3491
|
+
interrupted = True
|
|
3492
|
+
break
|
|
3493
|
+
|
|
3494
|
+
_print_compact_summary(project_summary)
|
|
3495
|
+
summary_enabled = show_summary or runner_summary or fixture_summary
|
|
3496
|
+
if summary_enabled:
|
|
3497
|
+
_print_detailed_summary(project_summary)
|
|
3498
|
+
_print_ascii_summary(
|
|
3499
|
+
project_summary,
|
|
3500
|
+
include_runner=runner_summary,
|
|
3501
|
+
include_fixture=fixture_summary,
|
|
3502
|
+
)
|
|
3503
|
+
|
|
3504
|
+
if coverage:
|
|
3505
|
+
coverage_dir = os.environ.get(
|
|
3506
|
+
"CMAKE_COVERAGE_OUTPUT_DIRECTORY", os.path.join(os.getcwd(), "coverage")
|
|
3507
|
+
)
|
|
3508
|
+
source_dir = str(coverage_source_dir) if coverage_source_dir else os.getcwd()
|
|
3509
|
+
print(f"{INFO} Code coverage data collected in {coverage_dir}")
|
|
3510
|
+
print(f"{INFO} Source directory for coverage mapping: {source_dir}")
|
|
3511
|
+
|
|
3512
|
+
overall_summary += project_summary
|
|
3513
|
+
overall_queue_count += project_queue_size
|
|
3514
|
+
executed_projects.append(selection)
|
|
3515
|
+
project_results.append(
|
|
3516
|
+
ProjectResult(
|
|
3517
|
+
selection=selection,
|
|
3518
|
+
summary=project_summary,
|
|
3519
|
+
queue_size=project_queue_size,
|
|
3520
|
+
)
|
|
3521
|
+
)
|
|
3522
|
+
|
|
3523
|
+
if interrupt_requested():
|
|
3524
|
+
break
|
|
3525
|
+
|
|
3526
|
+
# Restore root project context for subsequent operations.
|
|
3527
|
+
_set_project_root(settings.root)
|
|
3528
|
+
engine_state.refresh()
|
|
3529
|
+
|
|
3530
|
+
if purge:
|
|
3531
|
+
for runner in runners_iter_runners():
|
|
3532
|
+
runner.purge()
|
|
3533
|
+
return ExecutionResult(
|
|
3534
|
+
summary=overall_summary,
|
|
3535
|
+
project_results=tuple(project_results),
|
|
3536
|
+
queue_size=overall_queue_count,
|
|
3537
|
+
exit_code=0,
|
|
3538
|
+
interrupted=interrupted,
|
|
3539
|
+
)
|
|
3540
|
+
|
|
3541
|
+
if overall_queue_count == 0:
|
|
3542
|
+
print(f"{INFO} no tests selected")
|
|
3543
|
+
return ExecutionResult(
|
|
3544
|
+
summary=overall_summary,
|
|
3545
|
+
project_results=tuple(project_results),
|
|
3546
|
+
queue_size=overall_queue_count,
|
|
3547
|
+
exit_code=0,
|
|
3548
|
+
interrupted=interrupted,
|
|
3549
|
+
)
|
|
3550
|
+
|
|
3551
|
+
if len(executed_projects) > 1:
|
|
3552
|
+
_print_aggregate_totals(len(executed_projects), overall_summary)
|
|
3553
|
+
|
|
3554
|
+
if interrupted:
|
|
3555
|
+
return ExecutionResult(
|
|
3556
|
+
summary=overall_summary,
|
|
3557
|
+
project_results=tuple(project_results),
|
|
3558
|
+
queue_size=overall_queue_count,
|
|
3559
|
+
exit_code=130,
|
|
3560
|
+
interrupted=True,
|
|
3561
|
+
)
|
|
3562
|
+
|
|
3563
|
+
exit_code = 1 if overall_summary.failed > 0 else 0
|
|
3564
|
+
return ExecutionResult(
|
|
3565
|
+
summary=overall_summary,
|
|
3566
|
+
project_results=tuple(project_results),
|
|
3567
|
+
queue_size=overall_queue_count,
|
|
3568
|
+
exit_code=exit_code,
|
|
3569
|
+
interrupted=False,
|
|
3570
|
+
)
|
|
3571
|
+
|
|
3572
|
+
finally:
|
|
3573
|
+
_cleanup_all_tmp_dirs()
|
|
3574
|
+
|
|
3575
|
+
|
|
3576
|
+
def execute(
|
|
3577
|
+
*,
|
|
3578
|
+
root: Path | None = None,
|
|
3579
|
+
tenzir_binary: Path | None = None,
|
|
3580
|
+
tenzir_node_binary: Path | None = None,
|
|
3581
|
+
tests: Sequence[Path] = (),
|
|
3582
|
+
update: bool = False,
|
|
3583
|
+
debug: bool = False,
|
|
3584
|
+
purge: bool = False,
|
|
3585
|
+
coverage: bool = False,
|
|
3586
|
+
coverage_source_dir: Path | None = None,
|
|
3587
|
+
runner_summary: bool = False,
|
|
3588
|
+
fixture_summary: bool = False,
|
|
3589
|
+
show_summary: bool = False,
|
|
3590
|
+
show_diff_output: bool = True,
|
|
3591
|
+
show_diff_stat: bool = True,
|
|
3592
|
+
jobs: int | None = None,
|
|
3593
|
+
keep_tmp_dirs: bool = False,
|
|
3594
|
+
passthrough: bool = False,
|
|
3595
|
+
jobs_overridden: bool = False,
|
|
3596
|
+
all_projects: bool = False,
|
|
3597
|
+
) -> ExecutionResult:
|
|
3598
|
+
"""Library-oriented wrapper around `run_cli` with defaulted parameters."""
|
|
3599
|
+
|
|
3600
|
+
resolved_jobs = jobs if jobs is not None else get_default_jobs()
|
|
3601
|
+
return run_cli(
|
|
3602
|
+
root=root,
|
|
3603
|
+
tenzir_binary=tenzir_binary,
|
|
3604
|
+
tenzir_node_binary=tenzir_node_binary,
|
|
3605
|
+
tests=list(tests),
|
|
3606
|
+
update=update,
|
|
3607
|
+
debug=debug,
|
|
3608
|
+
purge=purge,
|
|
3609
|
+
coverage=coverage,
|
|
3610
|
+
coverage_source_dir=coverage_source_dir,
|
|
3611
|
+
runner_summary=runner_summary,
|
|
3612
|
+
fixture_summary=fixture_summary,
|
|
3613
|
+
show_summary=show_summary,
|
|
3614
|
+
show_diff_output=show_diff_output,
|
|
3615
|
+
show_diff_stat=show_diff_stat,
|
|
3616
|
+
jobs=resolved_jobs,
|
|
3617
|
+
keep_tmp_dirs=keep_tmp_dirs,
|
|
3618
|
+
passthrough=passthrough,
|
|
3619
|
+
jobs_overridden=jobs_overridden,
|
|
3620
|
+
all_projects=all_projects,
|
|
3621
|
+
)
|
|
3622
|
+
|
|
3623
|
+
|
|
3624
|
+
def main(argv: Sequence[str] | None = None) -> None:
|
|
3625
|
+
import click
|
|
3626
|
+
|
|
3627
|
+
from . import cli as cli_module
|
|
3628
|
+
|
|
3629
|
+
try:
|
|
3630
|
+
result = cli_module.cli.main(
|
|
3631
|
+
args=list(argv) if argv is not None else None,
|
|
3632
|
+
standalone_mode=False,
|
|
3633
|
+
)
|
|
3634
|
+
except click.exceptions.ClickException as exc:
|
|
3635
|
+
exc.show(file=sys.stderr)
|
|
3636
|
+
exit_code = getattr(exc, "exit_code", 1)
|
|
3637
|
+
raise SystemExit(exit_code) from exc
|
|
3638
|
+
except click.exceptions.Exit as exc:
|
|
3639
|
+
raise SystemExit(exc.exit_code) from exc
|
|
3640
|
+
except click.exceptions.Abort as exc:
|
|
3641
|
+
raise SystemExit(1) from exc
|
|
3642
|
+
exit_code = cli_module._normalize_exit_code(result)
|
|
3643
|
+
if exit_code:
|
|
3644
|
+
raise SystemExit(exit_code)
|
|
3645
|
+
|
|
3646
|
+
|
|
3647
|
+
if __name__ == "__main__":
|
|
3648
|
+
main()
|
|
3649
|
+
|
|
3650
|
+
|
|
3651
|
+
def _print_project_start(
|
|
3652
|
+
*,
|
|
3653
|
+
selection: ProjectSelection,
|
|
3654
|
+
display_base: Path,
|
|
3655
|
+
queue_size: int,
|
|
3656
|
+
job_count: int,
|
|
3657
|
+
enabled_flags: str,
|
|
3658
|
+
verb: str,
|
|
3659
|
+
) -> None:
|
|
3660
|
+
project_name = selection.root.name or selection.root.as_posix()
|
|
3661
|
+
if selection.kind == "root":
|
|
3662
|
+
project_kind = "root project"
|
|
3663
|
+
elif packages.is_package_dir(selection.root):
|
|
3664
|
+
project_kind = "package project"
|
|
3665
|
+
else:
|
|
3666
|
+
project_kind = "satellite project"
|
|
3667
|
+
|
|
3668
|
+
location = _format_relative_path(selection.root, display_base)
|
|
3669
|
+
if location != "." and not location.startswith(("./", "../")):
|
|
3670
|
+
location_display = f"./{location}"
|
|
3671
|
+
else:
|
|
3672
|
+
location_display = location
|
|
3673
|
+
project_display = f"{BOLD}{project_name}{RESET_COLOR}"
|
|
3674
|
+
toggles = f"; {enabled_flags}" if enabled_flags else ""
|
|
3675
|
+
jobs_segment = f" ({job_count} jobs)" if job_count else ""
|
|
3676
|
+
print(
|
|
3677
|
+
f"{INFO} {project_display}: {verb} {queue_size} tests{jobs_segment} from {project_kind} at {location_display}{toggles}"
|
|
3678
|
+
)
|