invar-tools 1.17.18__py3-none-any.whl → 1.17.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- invar/node_tools/eslint-plugin/cli.js +81 -42
- invar/node_tools/eslint-plugin/rules/require-schema-validation.js +80 -66
- invar/shell/commands/guard.py +11 -2
- invar/shell/config.py +38 -5
- invar/shell/git.py +10 -11
- invar/shell/guard_helpers.py +69 -41
- invar/shell/property_tests.py +85 -38
- invar/shell/prove/crosshair.py +19 -12
- invar/shell/subprocess_env.py +9 -5
- invar/shell/testing.py +57 -30
- {invar_tools-1.17.18.dist-info → invar_tools-1.17.20.dist-info}/METADATA +3 -3
- {invar_tools-1.17.18.dist-info → invar_tools-1.17.20.dist-info}/RECORD +17 -17
- {invar_tools-1.17.18.dist-info → invar_tools-1.17.20.dist-info}/WHEEL +0 -0
- {invar_tools-1.17.18.dist-info → invar_tools-1.17.20.dist-info}/entry_points.txt +0 -0
- {invar_tools-1.17.18.dist-info → invar_tools-1.17.20.dist-info}/licenses/LICENSE +0 -0
- {invar_tools-1.17.18.dist-info → invar_tools-1.17.20.dist-info}/licenses/LICENSE-GPL +0 -0
- {invar_tools-1.17.18.dist-info → invar_tools-1.17.20.dist-info}/licenses/NOTICE +0 -0
invar/shell/guard_helpers.py
CHANGED
|
@@ -36,24 +36,36 @@ def handle_changed_mode(
|
|
|
36
36
|
if isinstance(changed_result, Failure):
|
|
37
37
|
return Failure(changed_result.failure())
|
|
38
38
|
|
|
39
|
-
|
|
39
|
+
all_files = changed_result.unwrap()
|
|
40
|
+
only_files = {p for p in all_files if p.is_relative_to(path)}
|
|
40
41
|
if not only_files:
|
|
41
|
-
return Failure("NO_CHANGES")
|
|
42
|
+
return Failure("NO_CHANGES")
|
|
42
43
|
|
|
43
44
|
return Success((only_files, list(only_files)))
|
|
44
45
|
|
|
45
46
|
|
|
46
47
|
# @shell_orchestration: Coordinates path classification and file collection
|
|
47
48
|
# @shell_complexity: File collection with path normalization
|
|
48
|
-
def collect_files_to_check(
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
from invar.shell.config import get_path_classification
|
|
49
|
+
def collect_files_to_check(path: Path, checked_files: list[Path]) -> list[Path]:
|
|
50
|
+
"""Collect Python files for runtime phases, honoring exclude_paths."""
|
|
51
|
+
from invar.shell.config import get_exclude_paths, get_path_classification
|
|
52
|
+
from invar.shell.fs import _is_excluded
|
|
53
53
|
|
|
54
54
|
if checked_files:
|
|
55
55
|
return checked_files
|
|
56
56
|
|
|
57
|
+
exclude_result = get_exclude_paths(path)
|
|
58
|
+
exclude_patterns = exclude_result.unwrap() if isinstance(exclude_result, Success) else []
|
|
59
|
+
|
|
60
|
+
def _add_py_files_under(root: Path) -> None:
|
|
61
|
+
for py_file in root.rglob("*.py"):
|
|
62
|
+
try:
|
|
63
|
+
rel = str(py_file.relative_to(path))
|
|
64
|
+
except ValueError:
|
|
65
|
+
rel = str(py_file)
|
|
66
|
+
if not _is_excluded(rel, exclude_patterns):
|
|
67
|
+
result_files.append(py_file)
|
|
68
|
+
|
|
57
69
|
result_files: list[Path] = []
|
|
58
70
|
|
|
59
71
|
path_result = get_path_classification(path)
|
|
@@ -62,26 +74,33 @@ def collect_files_to_check(
|
|
|
62
74
|
else:
|
|
63
75
|
core_paths, shell_paths = ["src/core"], ["src/shell"]
|
|
64
76
|
|
|
65
|
-
# Scan core/shell paths
|
|
66
77
|
for core_path in core_paths:
|
|
67
78
|
full_path = path / core_path
|
|
68
79
|
if full_path.exists():
|
|
69
|
-
|
|
80
|
+
_add_py_files_under(full_path)
|
|
70
81
|
|
|
71
82
|
for shell_path in shell_paths:
|
|
72
83
|
full_path = path / shell_path
|
|
73
84
|
if full_path.exists():
|
|
74
|
-
|
|
85
|
+
_add_py_files_under(full_path)
|
|
75
86
|
|
|
76
|
-
# Fallback: scan path directly
|
|
77
87
|
if not result_files and path.exists():
|
|
78
|
-
|
|
88
|
+
_add_py_files_under(path)
|
|
89
|
+
|
|
90
|
+
seen: set[str] = set()
|
|
91
|
+
unique: list[Path] = []
|
|
92
|
+
for f in result_files:
|
|
93
|
+
key = str(f)
|
|
94
|
+
if key not in seen:
|
|
95
|
+
seen.add(key)
|
|
96
|
+
unique.append(f)
|
|
79
97
|
|
|
80
|
-
return
|
|
98
|
+
return unique
|
|
81
99
|
|
|
82
100
|
|
|
83
101
|
# @shell_orchestration: Coordinates doctest execution via testing module
|
|
84
102
|
def run_doctests_phase(
|
|
103
|
+
project_root: Path,
|
|
85
104
|
checked_files: list[Path],
|
|
86
105
|
explain: bool,
|
|
87
106
|
timeout: int = 60,
|
|
@@ -103,12 +122,20 @@ def run_doctests_phase(
|
|
|
103
122
|
return True, "", None
|
|
104
123
|
|
|
105
124
|
doctest_result = run_doctests_on_files(
|
|
106
|
-
checked_files,
|
|
125
|
+
checked_files,
|
|
126
|
+
verbose=explain,
|
|
127
|
+
timeout=timeout,
|
|
128
|
+
collect_coverage=collect_coverage,
|
|
129
|
+
cwd=project_root,
|
|
107
130
|
)
|
|
108
131
|
if isinstance(doctest_result, Success):
|
|
109
132
|
result_data = doctest_result.unwrap()
|
|
110
133
|
passed = result_data.get("status") in ("passed", "skipped")
|
|
111
|
-
|
|
134
|
+
stdout = result_data.get("stdout", "")
|
|
135
|
+
stderr = result_data.get("stderr", "")
|
|
136
|
+
output = stdout
|
|
137
|
+
if not passed and stderr:
|
|
138
|
+
output = f"{stdout}\n{stderr}" if stdout else stderr
|
|
112
139
|
# DX-37: Return coverage data if collected
|
|
113
140
|
coverage_data = {"collected": result_data.get("coverage_collected", False)}
|
|
114
141
|
return passed, output, coverage_data if collect_coverage else None
|
|
@@ -176,6 +203,7 @@ def run_crosshair_phase(
|
|
|
176
203
|
cache=cache,
|
|
177
204
|
timeout=timeout,
|
|
178
205
|
per_condition_timeout=per_condition_timeout,
|
|
206
|
+
project_root=path,
|
|
179
207
|
)
|
|
180
208
|
|
|
181
209
|
if isinstance(crosshair_result, Success):
|
|
@@ -230,26 +258,17 @@ def output_verification_status(
|
|
|
230
258
|
console.print(doctest_output)
|
|
231
259
|
|
|
232
260
|
# CrossHair results
|
|
233
|
-
_output_crosshair_status(
|
|
234
|
-
static_exit_code, doctest_passed, crosshair_output
|
|
235
|
-
)
|
|
261
|
+
_output_crosshair_status(static_exit_code, doctest_passed, crosshair_output)
|
|
236
262
|
|
|
237
263
|
# Property tests results
|
|
238
264
|
if property_output:
|
|
239
|
-
_output_property_tests_status(
|
|
240
|
-
static_exit_code, doctest_passed, property_output
|
|
241
|
-
)
|
|
265
|
+
_output_property_tests_status(static_exit_code, doctest_passed, property_output)
|
|
242
266
|
else:
|
|
243
267
|
console.print("[dim]⊘ Runtime tests skipped (static errors)[/dim]")
|
|
244
268
|
|
|
245
269
|
# DX-26: Combined conclusion after all phases
|
|
246
270
|
console.print("-" * 40)
|
|
247
|
-
all_passed =
|
|
248
|
-
static_exit_code == 0
|
|
249
|
-
and doctest_passed
|
|
250
|
-
and crosshair_passed
|
|
251
|
-
and property_passed
|
|
252
|
-
)
|
|
271
|
+
all_passed = static_exit_code == 0 and doctest_passed and crosshair_passed and property_passed
|
|
253
272
|
# In strict mode, warnings also cause failure (but exit code already reflects this)
|
|
254
273
|
status = "passed" if all_passed else "failed"
|
|
255
274
|
color = "green" if all_passed else "red"
|
|
@@ -259,6 +278,7 @@ def output_verification_status(
|
|
|
259
278
|
# @shell_orchestration: Coordinates shell module calls for property testing
|
|
260
279
|
# @shell_complexity: Property tests with result aggregation
|
|
261
280
|
def run_property_tests_phase(
|
|
281
|
+
project_root: Path,
|
|
262
282
|
checked_files: list[Path],
|
|
263
283
|
doctest_passed: bool,
|
|
264
284
|
static_exit_code: int,
|
|
@@ -290,7 +310,12 @@ def run_property_tests_phase(
|
|
|
290
310
|
if not core_files:
|
|
291
311
|
return True, {"status": "skipped", "reason": "no core files"}, None
|
|
292
312
|
|
|
293
|
-
result = run_property_tests_on_files(
|
|
313
|
+
result = run_property_tests_on_files(
|
|
314
|
+
core_files,
|
|
315
|
+
max_examples,
|
|
316
|
+
collect_coverage=collect_coverage,
|
|
317
|
+
project_root=project_root,
|
|
318
|
+
)
|
|
294
319
|
|
|
295
320
|
if isinstance(result, Success):
|
|
296
321
|
report, coverage_data = result.unwrap()
|
|
@@ -305,15 +330,19 @@ def run_property_tests_phase(
|
|
|
305
330
|
for r in report.results
|
|
306
331
|
if not r.passed
|
|
307
332
|
]
|
|
308
|
-
return
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
333
|
+
return (
|
|
334
|
+
report.all_passed(),
|
|
335
|
+
{
|
|
336
|
+
"status": "passed" if report.all_passed() else "failed",
|
|
337
|
+
"functions_tested": report.functions_tested,
|
|
338
|
+
"functions_passed": report.functions_passed,
|
|
339
|
+
"functions_failed": report.functions_failed,
|
|
340
|
+
"total_examples": report.total_examples,
|
|
341
|
+
"failures": failures, # DX-26: Structured failure info
|
|
342
|
+
"errors": report.errors,
|
|
343
|
+
},
|
|
344
|
+
coverage_data,
|
|
345
|
+
)
|
|
317
346
|
|
|
318
347
|
return False, {"status": "error", "error": result.failure()}, None
|
|
319
348
|
|
|
@@ -366,8 +395,8 @@ def _output_property_tests_status(
|
|
|
366
395
|
# Show reproduction command with seed
|
|
367
396
|
if seed:
|
|
368
397
|
console.print(
|
|
369
|
-
f
|
|
370
|
-
f
|
|
398
|
+
f' [dim]Reproduce: python -c "from hypothesis import reproduce_failure; '
|
|
399
|
+
f'import {func_name}" --seed={seed}[/dim]'
|
|
371
400
|
)
|
|
372
401
|
# Fallback for errors without structured failures
|
|
373
402
|
for error in property_output.get("errors", [])[:5]:
|
|
@@ -406,8 +435,7 @@ def _output_crosshair_status(
|
|
|
406
435
|
if workers > 1:
|
|
407
436
|
stats += f", {workers} workers"
|
|
408
437
|
console.print(
|
|
409
|
-
f"[green]✓ CrossHair verified[/green] "
|
|
410
|
-
f"[dim]({stats}, {time_sec:.1f}s)[/dim]"
|
|
438
|
+
f"[green]✓ CrossHair verified[/green] [dim]({stats}, {time_sec:.1f}s)[/dim]"
|
|
411
439
|
)
|
|
412
440
|
else:
|
|
413
441
|
console.print("[green]✓ CrossHair verified[/green]")
|
invar/shell/property_tests.py
CHANGED
|
@@ -9,28 +9,57 @@ from __future__ import annotations
|
|
|
9
9
|
|
|
10
10
|
import importlib.util
|
|
11
11
|
import sys
|
|
12
|
+
from contextlib import contextmanager, suppress
|
|
12
13
|
from typing import TYPE_CHECKING
|
|
13
14
|
|
|
15
|
+
if TYPE_CHECKING:
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
|
|
14
18
|
from returns.result import Failure, Result, Success
|
|
15
19
|
from rich.console import Console
|
|
16
20
|
|
|
17
|
-
from invar.core.property_gen import
|
|
18
|
-
|
|
19
|
-
find_contracted_functions,
|
|
20
|
-
run_property_test,
|
|
21
|
-
)
|
|
22
|
-
|
|
23
|
-
if TYPE_CHECKING:
|
|
24
|
-
from pathlib import Path
|
|
21
|
+
from invar.core.property_gen import PropertyTestReport, find_contracted_functions, run_property_test
|
|
22
|
+
from invar.shell.subprocess_env import detect_project_venv, find_site_packages
|
|
25
23
|
|
|
26
24
|
console = Console()
|
|
27
25
|
|
|
28
26
|
|
|
27
|
+
# @shell_orchestration: Temporarily inject venv site-packages for module imports
|
|
28
|
+
@contextmanager
|
|
29
|
+
def _inject_project_site_packages(project_root: Path):
|
|
30
|
+
venv = detect_project_venv(project_root)
|
|
31
|
+
site_packages = find_site_packages(venv) if venv is not None else None
|
|
32
|
+
|
|
33
|
+
if site_packages is None:
|
|
34
|
+
yield
|
|
35
|
+
return
|
|
36
|
+
|
|
37
|
+
src_dir = project_root / "src"
|
|
38
|
+
|
|
39
|
+
added: list[str] = []
|
|
40
|
+
if src_dir.exists():
|
|
41
|
+
src_dir_str = str(src_dir)
|
|
42
|
+
sys.path.insert(0, src_dir_str)
|
|
43
|
+
added.append(src_dir_str)
|
|
44
|
+
|
|
45
|
+
site_packages_str = str(site_packages)
|
|
46
|
+
sys.path.insert(0, site_packages_str)
|
|
47
|
+
added.append(site_packages_str)
|
|
48
|
+
|
|
49
|
+
try:
|
|
50
|
+
yield
|
|
51
|
+
finally:
|
|
52
|
+
for p in added:
|
|
53
|
+
with suppress(ValueError):
|
|
54
|
+
sys.path.remove(p)
|
|
55
|
+
|
|
56
|
+
|
|
29
57
|
# @shell_complexity: Property test orchestration with module import
|
|
30
58
|
def run_property_tests_on_file(
|
|
31
59
|
file_path: Path,
|
|
32
60
|
max_examples: int = 100,
|
|
33
61
|
verbose: bool = False,
|
|
62
|
+
project_root: Path | None = None,
|
|
34
63
|
) -> Result[PropertyTestReport, str]:
|
|
35
64
|
"""
|
|
36
65
|
Run property tests on all contracted functions in a file.
|
|
@@ -66,8 +95,10 @@ def run_property_tests_on_file(
|
|
|
66
95
|
if not contracted:
|
|
67
96
|
return Success(PropertyTestReport()) # No contracted functions, skip
|
|
68
97
|
|
|
69
|
-
|
|
70
|
-
|
|
98
|
+
root = project_root or file_path.parent
|
|
99
|
+
with _inject_project_site_packages(root):
|
|
100
|
+
module = _import_module_from_path(file_path)
|
|
101
|
+
|
|
71
102
|
if module is None:
|
|
72
103
|
return Failure(f"Could not import module: {file_path}")
|
|
73
104
|
|
|
@@ -105,6 +136,7 @@ def run_property_tests_on_files(
|
|
|
105
136
|
max_examples: int = 100,
|
|
106
137
|
verbose: bool = False,
|
|
107
138
|
collect_coverage: bool = False,
|
|
139
|
+
project_root: Path | None = None,
|
|
108
140
|
) -> Result[tuple[PropertyTestReport, dict | None], str]:
|
|
109
141
|
"""
|
|
110
142
|
Run property tests on multiple files.
|
|
@@ -122,9 +154,9 @@ def run_property_tests_on_files(
|
|
|
122
154
|
try:
|
|
123
155
|
import hypothesis # noqa: F401
|
|
124
156
|
except ImportError:
|
|
125
|
-
return Success(
|
|
126
|
-
errors=["Hypothesis not installed (pip install hypothesis)"]
|
|
127
|
-
)
|
|
157
|
+
return Success(
|
|
158
|
+
(PropertyTestReport(errors=["Hypothesis not installed (pip install hypothesis)"]), None)
|
|
159
|
+
)
|
|
128
160
|
|
|
129
161
|
combined_report = PropertyTestReport()
|
|
130
162
|
coverage_data = None
|
|
@@ -138,7 +170,9 @@ def run_property_tests_on_files(
|
|
|
138
170
|
source_dirs = list({f.parent for f in files})
|
|
139
171
|
with cov_ctx(source_dirs) as cov:
|
|
140
172
|
for file_path in files:
|
|
141
|
-
result = run_property_tests_on_file(
|
|
173
|
+
result = run_property_tests_on_file(
|
|
174
|
+
file_path, max_examples, verbose, project_root=project_root
|
|
175
|
+
)
|
|
142
176
|
_accumulate_report(combined_report, result)
|
|
143
177
|
|
|
144
178
|
# Extract coverage after all tests
|
|
@@ -151,11 +185,15 @@ def run_property_tests_on_files(
|
|
|
151
185
|
except ImportError:
|
|
152
186
|
# coverage not installed, run without it
|
|
153
187
|
for file_path in files:
|
|
154
|
-
result = run_property_tests_on_file(
|
|
188
|
+
result = run_property_tests_on_file(
|
|
189
|
+
file_path, max_examples, verbose, project_root=project_root
|
|
190
|
+
)
|
|
155
191
|
_accumulate_report(combined_report, result)
|
|
156
192
|
else:
|
|
157
193
|
for file_path in files:
|
|
158
|
-
result = run_property_tests_on_file(
|
|
194
|
+
result = run_property_tests_on_file(
|
|
195
|
+
file_path, max_examples, verbose, project_root=project_root
|
|
196
|
+
)
|
|
159
197
|
_accumulate_report(combined_report, result)
|
|
160
198
|
|
|
161
199
|
return Success((combined_report, coverage_data))
|
|
@@ -222,26 +260,29 @@ def format_property_test_report(
|
|
|
222
260
|
import json
|
|
223
261
|
|
|
224
262
|
if json_output:
|
|
225
|
-
return json.dumps(
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
263
|
+
return json.dumps(
|
|
264
|
+
{
|
|
265
|
+
"functions_tested": report.functions_tested,
|
|
266
|
+
"functions_passed": report.functions_passed,
|
|
267
|
+
"functions_failed": report.functions_failed,
|
|
268
|
+
"functions_skipped": report.functions_skipped,
|
|
269
|
+
"total_examples": report.total_examples,
|
|
270
|
+
"all_passed": report.all_passed(),
|
|
271
|
+
"results": [
|
|
272
|
+
{
|
|
273
|
+
"function": r.function_name,
|
|
274
|
+
"passed": r.passed,
|
|
275
|
+
"examples": r.examples_run,
|
|
276
|
+
"error": r.error,
|
|
277
|
+
"file_path": r.file_path, # DX-26
|
|
278
|
+
"seed": r.seed, # DX-26
|
|
279
|
+
}
|
|
280
|
+
for r in report.results
|
|
281
|
+
],
|
|
282
|
+
"errors": report.errors,
|
|
283
|
+
},
|
|
284
|
+
indent=2,
|
|
285
|
+
)
|
|
245
286
|
|
|
246
287
|
# Human-readable format
|
|
247
288
|
lines = []
|
|
@@ -263,10 +304,16 @@ def format_property_test_report(
|
|
|
263
304
|
for result in report.results:
|
|
264
305
|
if not result.passed:
|
|
265
306
|
# DX-26: file::function format
|
|
266
|
-
location =
|
|
307
|
+
location = (
|
|
308
|
+
f"{result.file_path}::{result.function_name}"
|
|
309
|
+
if result.file_path
|
|
310
|
+
else result.function_name
|
|
311
|
+
)
|
|
267
312
|
lines.append(f" [red]✗[/red] {location}")
|
|
268
313
|
if result.error:
|
|
269
|
-
short_error =
|
|
314
|
+
short_error = (
|
|
315
|
+
result.error[:100] + "..." if len(result.error) > 100 else result.error
|
|
316
|
+
)
|
|
270
317
|
lines.append(f" {short_error}")
|
|
271
318
|
if result.seed:
|
|
272
319
|
lines.append(f" [dim]Seed: {result.seed}[/dim]")
|
invar/shell/prove/crosshair.py
CHANGED
|
@@ -12,7 +12,7 @@ import os
|
|
|
12
12
|
import subprocess
|
|
13
13
|
import sys
|
|
14
14
|
from concurrent.futures import ProcessPoolExecutor, as_completed
|
|
15
|
-
from pathlib import Path
|
|
15
|
+
from pathlib import Path
|
|
16
16
|
from typing import TYPE_CHECKING
|
|
17
17
|
|
|
18
18
|
from returns.result import Failure, Result, Success
|
|
@@ -82,10 +82,7 @@ def has_verifiable_contracts(source: str) -> bool:
|
|
|
82
82
|
if isinstance(func, ast.Name) and func.id in contract_decorators:
|
|
83
83
|
return True
|
|
84
84
|
# @deal.pre(...) or @deal.post(...)
|
|
85
|
-
if (
|
|
86
|
-
isinstance(func, ast.Attribute)
|
|
87
|
-
and func.attr in contract_decorators
|
|
88
|
-
):
|
|
85
|
+
if isinstance(func, ast.Attribute) and func.attr in contract_decorators:
|
|
89
86
|
return True
|
|
90
87
|
|
|
91
88
|
return False
|
|
@@ -102,6 +99,7 @@ def _verify_single_file(
|
|
|
102
99
|
max_iterations: int = 5,
|
|
103
100
|
timeout: int = 300,
|
|
104
101
|
per_condition_timeout: int = 30,
|
|
102
|
+
project_root: str | None = None,
|
|
105
103
|
) -> dict[str, Any]:
|
|
106
104
|
"""
|
|
107
105
|
Verify a single file with CrossHair.
|
|
@@ -133,13 +131,14 @@ def _verify_single_file(
|
|
|
133
131
|
]
|
|
134
132
|
|
|
135
133
|
try:
|
|
136
|
-
|
|
134
|
+
env_root = Path(project_root) if project_root else None
|
|
137
135
|
result = subprocess.run(
|
|
138
136
|
cmd,
|
|
139
137
|
capture_output=True,
|
|
140
138
|
text=True,
|
|
141
139
|
timeout=timeout,
|
|
142
|
-
|
|
140
|
+
cwd=project_root,
|
|
141
|
+
env=build_subprocess_env(cwd=env_root),
|
|
143
142
|
)
|
|
144
143
|
|
|
145
144
|
elapsed_ms = int((time.time() - start_time) * 1000)
|
|
@@ -222,6 +221,7 @@ def run_crosshair_parallel(
|
|
|
222
221
|
cache: ProveCache | None = None,
|
|
223
222
|
timeout: int = 300,
|
|
224
223
|
per_condition_timeout: int = 30,
|
|
224
|
+
project_root: Path | None = None,
|
|
225
225
|
) -> Result[dict, str]:
|
|
226
226
|
"""Run CrossHair on multiple files in parallel (DX-13).
|
|
227
227
|
|
|
@@ -331,7 +331,12 @@ def run_crosshair_parallel(
|
|
|
331
331
|
with ProcessPoolExecutor(max_workers=max_workers) as executor:
|
|
332
332
|
futures = {
|
|
333
333
|
executor.submit(
|
|
334
|
-
_verify_single_file,
|
|
334
|
+
_verify_single_file,
|
|
335
|
+
str(f.resolve()),
|
|
336
|
+
max_iterations,
|
|
337
|
+
timeout,
|
|
338
|
+
per_condition_timeout,
|
|
339
|
+
str(project_root) if project_root else None,
|
|
335
340
|
): f
|
|
336
341
|
for f in files_to_verify
|
|
337
342
|
}
|
|
@@ -355,7 +360,11 @@ def run_crosshair_parallel(
|
|
|
355
360
|
# Sequential execution (single file or max_workers=1)
|
|
356
361
|
for py_file in files_to_verify:
|
|
357
362
|
result = _verify_single_file(
|
|
358
|
-
str(py_file),
|
|
363
|
+
str(py_file.resolve()),
|
|
364
|
+
max_iterations,
|
|
365
|
+
timeout,
|
|
366
|
+
per_condition_timeout,
|
|
367
|
+
str(project_root) if project_root else None,
|
|
359
368
|
)
|
|
360
369
|
_process_verification_result(
|
|
361
370
|
result,
|
|
@@ -368,9 +377,7 @@ def run_crosshair_parallel(
|
|
|
368
377
|
total_time_ms += result.get("time_ms", 0)
|
|
369
378
|
|
|
370
379
|
# Determine overall status
|
|
371
|
-
status =
|
|
372
|
-
CrossHairStatus.VERIFIED if not failed_files else CrossHairStatus.COUNTEREXAMPLE
|
|
373
|
-
)
|
|
380
|
+
status = CrossHairStatus.VERIFIED if not failed_files else CrossHairStatus.COUNTEREXAMPLE
|
|
374
381
|
|
|
375
382
|
return Success(
|
|
376
383
|
{
|
invar/shell/subprocess_env.py
CHANGED
|
@@ -135,13 +135,17 @@ def build_subprocess_env(cwd: Path | None = None) -> dict[str, str]:
|
|
|
135
135
|
if site_packages is None:
|
|
136
136
|
return env
|
|
137
137
|
|
|
138
|
-
# Prepend to PYTHONPATH (project packages have priority)
|
|
139
138
|
current = env.get("PYTHONPATH", "")
|
|
140
139
|
separator = ";" if os.name == "nt" else ":"
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
140
|
+
|
|
141
|
+
src_dir = project_root / "src"
|
|
142
|
+
prefix_parts: list[str] = []
|
|
143
|
+
if src_dir.exists():
|
|
144
|
+
prefix_parts.append(str(src_dir))
|
|
145
|
+
prefix_parts.append(str(site_packages))
|
|
146
|
+
|
|
147
|
+
prefix = separator.join(prefix_parts)
|
|
148
|
+
env["PYTHONPATH"] = f"{prefix}{separator}{current}" if current else prefix
|
|
145
149
|
|
|
146
150
|
return env
|
|
147
151
|
|