invar-tools 1.17.12__py3-none-any.whl → 1.17.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- invar/mcp/handlers.py +58 -2
- invar/node_tools/eslint-plugin/cli.js +105 -31
- invar/node_tools/eslint-plugin/rules/require-schema-validation.js +80 -66
- invar/shell/commands/guard.py +46 -6
- invar/shell/config.py +38 -5
- invar/shell/git.py +10 -11
- invar/shell/guard_helpers.py +69 -41
- invar/shell/property_tests.py +85 -38
- invar/shell/prove/crosshair.py +19 -12
- invar/shell/prove/guard_ts.py +39 -17
- invar/shell/subprocess_env.py +58 -5
- invar/shell/testing.py +59 -31
- {invar_tools-1.17.12.dist-info → invar_tools-1.17.21.dist-info}/METADATA +3 -3
- {invar_tools-1.17.12.dist-info → invar_tools-1.17.21.dist-info}/RECORD +19 -19
- {invar_tools-1.17.12.dist-info → invar_tools-1.17.21.dist-info}/WHEEL +0 -0
- {invar_tools-1.17.12.dist-info → invar_tools-1.17.21.dist-info}/entry_points.txt +0 -0
- {invar_tools-1.17.12.dist-info → invar_tools-1.17.21.dist-info}/licenses/LICENSE +0 -0
- {invar_tools-1.17.12.dist-info → invar_tools-1.17.21.dist-info}/licenses/LICENSE-GPL +0 -0
- {invar_tools-1.17.12.dist-info → invar_tools-1.17.21.dist-info}/licenses/NOTICE +0 -0
invar/shell/guard_helpers.py
CHANGED
|
@@ -36,24 +36,36 @@ def handle_changed_mode(
|
|
|
36
36
|
if isinstance(changed_result, Failure):
|
|
37
37
|
return Failure(changed_result.failure())
|
|
38
38
|
|
|
39
|
-
|
|
39
|
+
all_files = changed_result.unwrap()
|
|
40
|
+
only_files = {p for p in all_files if p.is_relative_to(path)}
|
|
40
41
|
if not only_files:
|
|
41
|
-
return Failure("NO_CHANGES")
|
|
42
|
+
return Failure("NO_CHANGES")
|
|
42
43
|
|
|
43
44
|
return Success((only_files, list(only_files)))
|
|
44
45
|
|
|
45
46
|
|
|
46
47
|
# @shell_orchestration: Coordinates path classification and file collection
|
|
47
48
|
# @shell_complexity: File collection with path normalization
|
|
48
|
-
def collect_files_to_check(
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
from invar.shell.config import get_path_classification
|
|
49
|
+
def collect_files_to_check(path: Path, checked_files: list[Path]) -> list[Path]:
|
|
50
|
+
"""Collect Python files for runtime phases, honoring exclude_paths."""
|
|
51
|
+
from invar.shell.config import get_exclude_paths, get_path_classification
|
|
52
|
+
from invar.shell.fs import _is_excluded
|
|
53
53
|
|
|
54
54
|
if checked_files:
|
|
55
55
|
return checked_files
|
|
56
56
|
|
|
57
|
+
exclude_result = get_exclude_paths(path)
|
|
58
|
+
exclude_patterns = exclude_result.unwrap() if isinstance(exclude_result, Success) else []
|
|
59
|
+
|
|
60
|
+
def _add_py_files_under(root: Path) -> None:
|
|
61
|
+
for py_file in root.rglob("*.py"):
|
|
62
|
+
try:
|
|
63
|
+
rel = str(py_file.relative_to(path))
|
|
64
|
+
except ValueError:
|
|
65
|
+
rel = str(py_file)
|
|
66
|
+
if not _is_excluded(rel, exclude_patterns):
|
|
67
|
+
result_files.append(py_file)
|
|
68
|
+
|
|
57
69
|
result_files: list[Path] = []
|
|
58
70
|
|
|
59
71
|
path_result = get_path_classification(path)
|
|
@@ -62,26 +74,33 @@ def collect_files_to_check(
|
|
|
62
74
|
else:
|
|
63
75
|
core_paths, shell_paths = ["src/core"], ["src/shell"]
|
|
64
76
|
|
|
65
|
-
# Scan core/shell paths
|
|
66
77
|
for core_path in core_paths:
|
|
67
78
|
full_path = path / core_path
|
|
68
79
|
if full_path.exists():
|
|
69
|
-
|
|
80
|
+
_add_py_files_under(full_path)
|
|
70
81
|
|
|
71
82
|
for shell_path in shell_paths:
|
|
72
83
|
full_path = path / shell_path
|
|
73
84
|
if full_path.exists():
|
|
74
|
-
|
|
85
|
+
_add_py_files_under(full_path)
|
|
75
86
|
|
|
76
|
-
# Fallback: scan path directly
|
|
77
87
|
if not result_files and path.exists():
|
|
78
|
-
|
|
88
|
+
_add_py_files_under(path)
|
|
89
|
+
|
|
90
|
+
seen: set[str] = set()
|
|
91
|
+
unique: list[Path] = []
|
|
92
|
+
for f in result_files:
|
|
93
|
+
key = str(f)
|
|
94
|
+
if key not in seen:
|
|
95
|
+
seen.add(key)
|
|
96
|
+
unique.append(f)
|
|
79
97
|
|
|
80
|
-
return
|
|
98
|
+
return unique
|
|
81
99
|
|
|
82
100
|
|
|
83
101
|
# @shell_orchestration: Coordinates doctest execution via testing module
|
|
84
102
|
def run_doctests_phase(
|
|
103
|
+
project_root: Path,
|
|
85
104
|
checked_files: list[Path],
|
|
86
105
|
explain: bool,
|
|
87
106
|
timeout: int = 60,
|
|
@@ -103,12 +122,20 @@ def run_doctests_phase(
|
|
|
103
122
|
return True, "", None
|
|
104
123
|
|
|
105
124
|
doctest_result = run_doctests_on_files(
|
|
106
|
-
checked_files,
|
|
125
|
+
checked_files,
|
|
126
|
+
verbose=explain,
|
|
127
|
+
timeout=timeout,
|
|
128
|
+
collect_coverage=collect_coverage,
|
|
129
|
+
cwd=project_root,
|
|
107
130
|
)
|
|
108
131
|
if isinstance(doctest_result, Success):
|
|
109
132
|
result_data = doctest_result.unwrap()
|
|
110
133
|
passed = result_data.get("status") in ("passed", "skipped")
|
|
111
|
-
|
|
134
|
+
stdout = result_data.get("stdout", "")
|
|
135
|
+
stderr = result_data.get("stderr", "")
|
|
136
|
+
output = stdout
|
|
137
|
+
if not passed and stderr:
|
|
138
|
+
output = f"{stdout}\n{stderr}" if stdout else stderr
|
|
112
139
|
# DX-37: Return coverage data if collected
|
|
113
140
|
coverage_data = {"collected": result_data.get("coverage_collected", False)}
|
|
114
141
|
return passed, output, coverage_data if collect_coverage else None
|
|
@@ -176,6 +203,7 @@ def run_crosshair_phase(
|
|
|
176
203
|
cache=cache,
|
|
177
204
|
timeout=timeout,
|
|
178
205
|
per_condition_timeout=per_condition_timeout,
|
|
206
|
+
project_root=path,
|
|
179
207
|
)
|
|
180
208
|
|
|
181
209
|
if isinstance(crosshair_result, Success):
|
|
@@ -230,26 +258,17 @@ def output_verification_status(
|
|
|
230
258
|
console.print(doctest_output)
|
|
231
259
|
|
|
232
260
|
# CrossHair results
|
|
233
|
-
_output_crosshair_status(
|
|
234
|
-
static_exit_code, doctest_passed, crosshair_output
|
|
235
|
-
)
|
|
261
|
+
_output_crosshair_status(static_exit_code, doctest_passed, crosshair_output)
|
|
236
262
|
|
|
237
263
|
# Property tests results
|
|
238
264
|
if property_output:
|
|
239
|
-
_output_property_tests_status(
|
|
240
|
-
static_exit_code, doctest_passed, property_output
|
|
241
|
-
)
|
|
265
|
+
_output_property_tests_status(static_exit_code, doctest_passed, property_output)
|
|
242
266
|
else:
|
|
243
267
|
console.print("[dim]⊘ Runtime tests skipped (static errors)[/dim]")
|
|
244
268
|
|
|
245
269
|
# DX-26: Combined conclusion after all phases
|
|
246
270
|
console.print("-" * 40)
|
|
247
|
-
all_passed =
|
|
248
|
-
static_exit_code == 0
|
|
249
|
-
and doctest_passed
|
|
250
|
-
and crosshair_passed
|
|
251
|
-
and property_passed
|
|
252
|
-
)
|
|
271
|
+
all_passed = static_exit_code == 0 and doctest_passed and crosshair_passed and property_passed
|
|
253
272
|
# In strict mode, warnings also cause failure (but exit code already reflects this)
|
|
254
273
|
status = "passed" if all_passed else "failed"
|
|
255
274
|
color = "green" if all_passed else "red"
|
|
@@ -259,6 +278,7 @@ def output_verification_status(
|
|
|
259
278
|
# @shell_orchestration: Coordinates shell module calls for property testing
|
|
260
279
|
# @shell_complexity: Property tests with result aggregation
|
|
261
280
|
def run_property_tests_phase(
|
|
281
|
+
project_root: Path,
|
|
262
282
|
checked_files: list[Path],
|
|
263
283
|
doctest_passed: bool,
|
|
264
284
|
static_exit_code: int,
|
|
@@ -290,7 +310,12 @@ def run_property_tests_phase(
|
|
|
290
310
|
if not core_files:
|
|
291
311
|
return True, {"status": "skipped", "reason": "no core files"}, None
|
|
292
312
|
|
|
293
|
-
result = run_property_tests_on_files(
|
|
313
|
+
result = run_property_tests_on_files(
|
|
314
|
+
core_files,
|
|
315
|
+
max_examples,
|
|
316
|
+
collect_coverage=collect_coverage,
|
|
317
|
+
project_root=project_root,
|
|
318
|
+
)
|
|
294
319
|
|
|
295
320
|
if isinstance(result, Success):
|
|
296
321
|
report, coverage_data = result.unwrap()
|
|
@@ -305,15 +330,19 @@ def run_property_tests_phase(
|
|
|
305
330
|
for r in report.results
|
|
306
331
|
if not r.passed
|
|
307
332
|
]
|
|
308
|
-
return
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
333
|
+
return (
|
|
334
|
+
report.all_passed(),
|
|
335
|
+
{
|
|
336
|
+
"status": "passed" if report.all_passed() else "failed",
|
|
337
|
+
"functions_tested": report.functions_tested,
|
|
338
|
+
"functions_passed": report.functions_passed,
|
|
339
|
+
"functions_failed": report.functions_failed,
|
|
340
|
+
"total_examples": report.total_examples,
|
|
341
|
+
"failures": failures, # DX-26: Structured failure info
|
|
342
|
+
"errors": report.errors,
|
|
343
|
+
},
|
|
344
|
+
coverage_data,
|
|
345
|
+
)
|
|
317
346
|
|
|
318
347
|
return False, {"status": "error", "error": result.failure()}, None
|
|
319
348
|
|
|
@@ -366,8 +395,8 @@ def _output_property_tests_status(
|
|
|
366
395
|
# Show reproduction command with seed
|
|
367
396
|
if seed:
|
|
368
397
|
console.print(
|
|
369
|
-
f
|
|
370
|
-
f
|
|
398
|
+
f' [dim]Reproduce: python -c "from hypothesis import reproduce_failure; '
|
|
399
|
+
f'import {func_name}" --seed={seed}[/dim]'
|
|
371
400
|
)
|
|
372
401
|
# Fallback for errors without structured failures
|
|
373
402
|
for error in property_output.get("errors", [])[:5]:
|
|
@@ -406,8 +435,7 @@ def _output_crosshair_status(
|
|
|
406
435
|
if workers > 1:
|
|
407
436
|
stats += f", {workers} workers"
|
|
408
437
|
console.print(
|
|
409
|
-
f"[green]✓ CrossHair verified[/green] "
|
|
410
|
-
f"[dim]({stats}, {time_sec:.1f}s)[/dim]"
|
|
438
|
+
f"[green]✓ CrossHair verified[/green] [dim]({stats}, {time_sec:.1f}s)[/dim]"
|
|
411
439
|
)
|
|
412
440
|
else:
|
|
413
441
|
console.print("[green]✓ CrossHair verified[/green]")
|
invar/shell/property_tests.py
CHANGED
|
@@ -9,28 +9,57 @@ from __future__ import annotations
|
|
|
9
9
|
|
|
10
10
|
import importlib.util
|
|
11
11
|
import sys
|
|
12
|
+
from contextlib import contextmanager, suppress
|
|
12
13
|
from typing import TYPE_CHECKING
|
|
13
14
|
|
|
15
|
+
if TYPE_CHECKING:
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
|
|
14
18
|
from returns.result import Failure, Result, Success
|
|
15
19
|
from rich.console import Console
|
|
16
20
|
|
|
17
|
-
from invar.core.property_gen import
|
|
18
|
-
|
|
19
|
-
find_contracted_functions,
|
|
20
|
-
run_property_test,
|
|
21
|
-
)
|
|
22
|
-
|
|
23
|
-
if TYPE_CHECKING:
|
|
24
|
-
from pathlib import Path
|
|
21
|
+
from invar.core.property_gen import PropertyTestReport, find_contracted_functions, run_property_test
|
|
22
|
+
from invar.shell.subprocess_env import detect_project_venv, find_site_packages
|
|
25
23
|
|
|
26
24
|
console = Console()
|
|
27
25
|
|
|
28
26
|
|
|
27
|
+
# @shell_orchestration: Temporarily inject venv site-packages for module imports
|
|
28
|
+
@contextmanager
|
|
29
|
+
def _inject_project_site_packages(project_root: Path):
|
|
30
|
+
venv = detect_project_venv(project_root)
|
|
31
|
+
site_packages = find_site_packages(venv) if venv is not None else None
|
|
32
|
+
|
|
33
|
+
if site_packages is None:
|
|
34
|
+
yield
|
|
35
|
+
return
|
|
36
|
+
|
|
37
|
+
src_dir = project_root / "src"
|
|
38
|
+
|
|
39
|
+
added: list[str] = []
|
|
40
|
+
if src_dir.exists():
|
|
41
|
+
src_dir_str = str(src_dir)
|
|
42
|
+
sys.path.insert(0, src_dir_str)
|
|
43
|
+
added.append(src_dir_str)
|
|
44
|
+
|
|
45
|
+
site_packages_str = str(site_packages)
|
|
46
|
+
sys.path.insert(0, site_packages_str)
|
|
47
|
+
added.append(site_packages_str)
|
|
48
|
+
|
|
49
|
+
try:
|
|
50
|
+
yield
|
|
51
|
+
finally:
|
|
52
|
+
for p in added:
|
|
53
|
+
with suppress(ValueError):
|
|
54
|
+
sys.path.remove(p)
|
|
55
|
+
|
|
56
|
+
|
|
29
57
|
# @shell_complexity: Property test orchestration with module import
|
|
30
58
|
def run_property_tests_on_file(
|
|
31
59
|
file_path: Path,
|
|
32
60
|
max_examples: int = 100,
|
|
33
61
|
verbose: bool = False,
|
|
62
|
+
project_root: Path | None = None,
|
|
34
63
|
) -> Result[PropertyTestReport, str]:
|
|
35
64
|
"""
|
|
36
65
|
Run property tests on all contracted functions in a file.
|
|
@@ -66,8 +95,10 @@ def run_property_tests_on_file(
|
|
|
66
95
|
if not contracted:
|
|
67
96
|
return Success(PropertyTestReport()) # No contracted functions, skip
|
|
68
97
|
|
|
69
|
-
|
|
70
|
-
|
|
98
|
+
root = project_root or file_path.parent
|
|
99
|
+
with _inject_project_site_packages(root):
|
|
100
|
+
module = _import_module_from_path(file_path)
|
|
101
|
+
|
|
71
102
|
if module is None:
|
|
72
103
|
return Failure(f"Could not import module: {file_path}")
|
|
73
104
|
|
|
@@ -105,6 +136,7 @@ def run_property_tests_on_files(
|
|
|
105
136
|
max_examples: int = 100,
|
|
106
137
|
verbose: bool = False,
|
|
107
138
|
collect_coverage: bool = False,
|
|
139
|
+
project_root: Path | None = None,
|
|
108
140
|
) -> Result[tuple[PropertyTestReport, dict | None], str]:
|
|
109
141
|
"""
|
|
110
142
|
Run property tests on multiple files.
|
|
@@ -122,9 +154,9 @@ def run_property_tests_on_files(
|
|
|
122
154
|
try:
|
|
123
155
|
import hypothesis # noqa: F401
|
|
124
156
|
except ImportError:
|
|
125
|
-
return Success(
|
|
126
|
-
errors=["Hypothesis not installed (pip install hypothesis)"]
|
|
127
|
-
)
|
|
157
|
+
return Success(
|
|
158
|
+
(PropertyTestReport(errors=["Hypothesis not installed (pip install hypothesis)"]), None)
|
|
159
|
+
)
|
|
128
160
|
|
|
129
161
|
combined_report = PropertyTestReport()
|
|
130
162
|
coverage_data = None
|
|
@@ -138,7 +170,9 @@ def run_property_tests_on_files(
|
|
|
138
170
|
source_dirs = list({f.parent for f in files})
|
|
139
171
|
with cov_ctx(source_dirs) as cov:
|
|
140
172
|
for file_path in files:
|
|
141
|
-
result = run_property_tests_on_file(
|
|
173
|
+
result = run_property_tests_on_file(
|
|
174
|
+
file_path, max_examples, verbose, project_root=project_root
|
|
175
|
+
)
|
|
142
176
|
_accumulate_report(combined_report, result)
|
|
143
177
|
|
|
144
178
|
# Extract coverage after all tests
|
|
@@ -151,11 +185,15 @@ def run_property_tests_on_files(
|
|
|
151
185
|
except ImportError:
|
|
152
186
|
# coverage not installed, run without it
|
|
153
187
|
for file_path in files:
|
|
154
|
-
result = run_property_tests_on_file(
|
|
188
|
+
result = run_property_tests_on_file(
|
|
189
|
+
file_path, max_examples, verbose, project_root=project_root
|
|
190
|
+
)
|
|
155
191
|
_accumulate_report(combined_report, result)
|
|
156
192
|
else:
|
|
157
193
|
for file_path in files:
|
|
158
|
-
result = run_property_tests_on_file(
|
|
194
|
+
result = run_property_tests_on_file(
|
|
195
|
+
file_path, max_examples, verbose, project_root=project_root
|
|
196
|
+
)
|
|
159
197
|
_accumulate_report(combined_report, result)
|
|
160
198
|
|
|
161
199
|
return Success((combined_report, coverage_data))
|
|
@@ -222,26 +260,29 @@ def format_property_test_report(
|
|
|
222
260
|
import json
|
|
223
261
|
|
|
224
262
|
if json_output:
|
|
225
|
-
return json.dumps(
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
263
|
+
return json.dumps(
|
|
264
|
+
{
|
|
265
|
+
"functions_tested": report.functions_tested,
|
|
266
|
+
"functions_passed": report.functions_passed,
|
|
267
|
+
"functions_failed": report.functions_failed,
|
|
268
|
+
"functions_skipped": report.functions_skipped,
|
|
269
|
+
"total_examples": report.total_examples,
|
|
270
|
+
"all_passed": report.all_passed(),
|
|
271
|
+
"results": [
|
|
272
|
+
{
|
|
273
|
+
"function": r.function_name,
|
|
274
|
+
"passed": r.passed,
|
|
275
|
+
"examples": r.examples_run,
|
|
276
|
+
"error": r.error,
|
|
277
|
+
"file_path": r.file_path, # DX-26
|
|
278
|
+
"seed": r.seed, # DX-26
|
|
279
|
+
}
|
|
280
|
+
for r in report.results
|
|
281
|
+
],
|
|
282
|
+
"errors": report.errors,
|
|
283
|
+
},
|
|
284
|
+
indent=2,
|
|
285
|
+
)
|
|
245
286
|
|
|
246
287
|
# Human-readable format
|
|
247
288
|
lines = []
|
|
@@ -263,10 +304,16 @@ def format_property_test_report(
|
|
|
263
304
|
for result in report.results:
|
|
264
305
|
if not result.passed:
|
|
265
306
|
# DX-26: file::function format
|
|
266
|
-
location =
|
|
307
|
+
location = (
|
|
308
|
+
f"{result.file_path}::{result.function_name}"
|
|
309
|
+
if result.file_path
|
|
310
|
+
else result.function_name
|
|
311
|
+
)
|
|
267
312
|
lines.append(f" [red]✗[/red] {location}")
|
|
268
313
|
if result.error:
|
|
269
|
-
short_error =
|
|
314
|
+
short_error = (
|
|
315
|
+
result.error[:100] + "..." if len(result.error) > 100 else result.error
|
|
316
|
+
)
|
|
270
317
|
lines.append(f" {short_error}")
|
|
271
318
|
if result.seed:
|
|
272
319
|
lines.append(f" [dim]Seed: {result.seed}[/dim]")
|
invar/shell/prove/crosshair.py
CHANGED
|
@@ -12,7 +12,7 @@ import os
|
|
|
12
12
|
import subprocess
|
|
13
13
|
import sys
|
|
14
14
|
from concurrent.futures import ProcessPoolExecutor, as_completed
|
|
15
|
-
from pathlib import Path
|
|
15
|
+
from pathlib import Path
|
|
16
16
|
from typing import TYPE_CHECKING
|
|
17
17
|
|
|
18
18
|
from returns.result import Failure, Result, Success
|
|
@@ -82,10 +82,7 @@ def has_verifiable_contracts(source: str) -> bool:
|
|
|
82
82
|
if isinstance(func, ast.Name) and func.id in contract_decorators:
|
|
83
83
|
return True
|
|
84
84
|
# @deal.pre(...) or @deal.post(...)
|
|
85
|
-
if (
|
|
86
|
-
isinstance(func, ast.Attribute)
|
|
87
|
-
and func.attr in contract_decorators
|
|
88
|
-
):
|
|
85
|
+
if isinstance(func, ast.Attribute) and func.attr in contract_decorators:
|
|
89
86
|
return True
|
|
90
87
|
|
|
91
88
|
return False
|
|
@@ -102,6 +99,7 @@ def _verify_single_file(
|
|
|
102
99
|
max_iterations: int = 5,
|
|
103
100
|
timeout: int = 300,
|
|
104
101
|
per_condition_timeout: int = 30,
|
|
102
|
+
project_root: str | None = None,
|
|
105
103
|
) -> dict[str, Any]:
|
|
106
104
|
"""
|
|
107
105
|
Verify a single file with CrossHair.
|
|
@@ -133,13 +131,14 @@ def _verify_single_file(
|
|
|
133
131
|
]
|
|
134
132
|
|
|
135
133
|
try:
|
|
136
|
-
|
|
134
|
+
env_root = Path(project_root) if project_root else None
|
|
137
135
|
result = subprocess.run(
|
|
138
136
|
cmd,
|
|
139
137
|
capture_output=True,
|
|
140
138
|
text=True,
|
|
141
139
|
timeout=timeout,
|
|
142
|
-
|
|
140
|
+
cwd=project_root,
|
|
141
|
+
env=build_subprocess_env(cwd=env_root),
|
|
143
142
|
)
|
|
144
143
|
|
|
145
144
|
elapsed_ms = int((time.time() - start_time) * 1000)
|
|
@@ -222,6 +221,7 @@ def run_crosshair_parallel(
|
|
|
222
221
|
cache: ProveCache | None = None,
|
|
223
222
|
timeout: int = 300,
|
|
224
223
|
per_condition_timeout: int = 30,
|
|
224
|
+
project_root: Path | None = None,
|
|
225
225
|
) -> Result[dict, str]:
|
|
226
226
|
"""Run CrossHair on multiple files in parallel (DX-13).
|
|
227
227
|
|
|
@@ -331,7 +331,12 @@ def run_crosshair_parallel(
|
|
|
331
331
|
with ProcessPoolExecutor(max_workers=max_workers) as executor:
|
|
332
332
|
futures = {
|
|
333
333
|
executor.submit(
|
|
334
|
-
_verify_single_file,
|
|
334
|
+
_verify_single_file,
|
|
335
|
+
str(f.resolve()),
|
|
336
|
+
max_iterations,
|
|
337
|
+
timeout,
|
|
338
|
+
per_condition_timeout,
|
|
339
|
+
str(project_root) if project_root else None,
|
|
335
340
|
): f
|
|
336
341
|
for f in files_to_verify
|
|
337
342
|
}
|
|
@@ -355,7 +360,11 @@ def run_crosshair_parallel(
|
|
|
355
360
|
# Sequential execution (single file or max_workers=1)
|
|
356
361
|
for py_file in files_to_verify:
|
|
357
362
|
result = _verify_single_file(
|
|
358
|
-
str(py_file),
|
|
363
|
+
str(py_file.resolve()),
|
|
364
|
+
max_iterations,
|
|
365
|
+
timeout,
|
|
366
|
+
per_condition_timeout,
|
|
367
|
+
str(project_root) if project_root else None,
|
|
359
368
|
)
|
|
360
369
|
_process_verification_result(
|
|
361
370
|
result,
|
|
@@ -368,9 +377,7 @@ def run_crosshair_parallel(
|
|
|
368
377
|
total_time_ms += result.get("time_ms", 0)
|
|
369
378
|
|
|
370
379
|
# Determine overall status
|
|
371
|
-
status =
|
|
372
|
-
CrossHairStatus.VERIFIED if not failed_files else CrossHairStatus.COUNTEREXAMPLE
|
|
373
|
-
)
|
|
380
|
+
status = CrossHairStatus.VERIFIED if not failed_files else CrossHairStatus.COUNTEREXAMPLE
|
|
374
381
|
|
|
375
382
|
return Success(
|
|
376
383
|
{
|
invar/shell/prove/guard_ts.py
CHANGED
|
@@ -374,14 +374,28 @@ def _check_tool_available(tool: str, check_args: list[str]) -> bool:
|
|
|
374
374
|
# =============================================================================
|
|
375
375
|
|
|
376
376
|
|
|
377
|
+
def _is_invar_package_dir(package_dir: Path, package_name: str) -> bool:
|
|
378
|
+
package_json = package_dir / "package.json"
|
|
379
|
+
if not package_json.exists():
|
|
380
|
+
return False
|
|
381
|
+
|
|
382
|
+
try:
|
|
383
|
+
data = json.loads(package_json.read_text(encoding="utf-8"))
|
|
384
|
+
except (OSError, json.JSONDecodeError):
|
|
385
|
+
return False
|
|
386
|
+
|
|
387
|
+
return data.get("name") == f"@invar/{package_name}"
|
|
388
|
+
|
|
389
|
+
|
|
377
390
|
# @shell_complexity: Path discovery with fallback logic
|
|
378
391
|
def _get_invar_package_cmd(package_name: str, project_path: Path) -> list[str]:
|
|
379
392
|
"""Get command to run an @invar/* package.
|
|
380
393
|
|
|
381
394
|
Priority order:
|
|
382
|
-
1.
|
|
383
|
-
2.
|
|
384
|
-
3.
|
|
395
|
+
1. Project-local override (typescript/packages/* or packages/*)
|
|
396
|
+
2. Embedded tools (pip install invar-tools includes these)
|
|
397
|
+
3. Local monorepo lookup (walk up)
|
|
398
|
+
4. npx fallback (if published to npm)
|
|
385
399
|
|
|
386
400
|
Args:
|
|
387
401
|
package_name: Package name without @invar/ prefix (e.g., "ts-analyzer")
|
|
@@ -390,7 +404,18 @@ def _get_invar_package_cmd(package_name: str, project_path: Path) -> list[str]:
|
|
|
390
404
|
Returns:
|
|
391
405
|
Command list for subprocess.run
|
|
392
406
|
"""
|
|
393
|
-
#
|
|
407
|
+
# Resolve to absolute path to avoid path doubling issues
|
|
408
|
+
resolved_path = project_path.resolve()
|
|
409
|
+
|
|
410
|
+
local_cli = resolved_path / "typescript" / "packages" / package_name / "dist" / "cli.js"
|
|
411
|
+
if local_cli.exists() and _is_invar_package_dir(local_cli.parent.parent, package_name):
|
|
412
|
+
return ["node", str(local_cli)]
|
|
413
|
+
|
|
414
|
+
local_cli = resolved_path / "packages" / package_name / "dist" / "cli.js"
|
|
415
|
+
if local_cli.exists() and _is_invar_package_dir(local_cli.parent.parent, package_name):
|
|
416
|
+
return ["node", str(local_cli)]
|
|
417
|
+
|
|
418
|
+
# Priority 2: Embedded tools (from pip install)
|
|
394
419
|
try:
|
|
395
420
|
from invar.node_tools import get_tool_path
|
|
396
421
|
|
|
@@ -399,16 +424,13 @@ def _get_invar_package_cmd(package_name: str, project_path: Path) -> list[str]:
|
|
|
399
424
|
except ImportError:
|
|
400
425
|
pass # node_tools module not available
|
|
401
426
|
|
|
402
|
-
# Priority
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
# Priority 2b: Walk up to find the Invar root (monorepo setup)
|
|
408
|
-
check_path = project_path
|
|
427
|
+
# Priority 3b: Walk up to find the Invar root (monorepo setup)
|
|
428
|
+
# This is intentional for monorepo development - allows running from subdirectories
|
|
429
|
+
# Only searches up to 5 levels to limit exposure
|
|
430
|
+
check_path = resolved_path
|
|
409
431
|
for _ in range(5): # Max 5 levels up
|
|
410
432
|
candidate = check_path / f"typescript/packages/{package_name}/dist/cli.js"
|
|
411
|
-
if candidate.exists():
|
|
433
|
+
if candidate.exists() and _is_invar_package_dir(candidate.parent.parent, package_name):
|
|
412
434
|
return ["node", str(candidate)]
|
|
413
435
|
parent = check_path.parent
|
|
414
436
|
if parent == check_path:
|
|
@@ -439,7 +461,7 @@ def run_ts_analyzer(project_path: Path) -> Result[dict, str]:
|
|
|
439
461
|
try:
|
|
440
462
|
cmd = _get_invar_package_cmd("ts-analyzer", project_path)
|
|
441
463
|
result = subprocess.run(
|
|
442
|
-
[*cmd, str(project_path), "--json"],
|
|
464
|
+
[*cmd, str(project_path.resolve()), "--json"],
|
|
443
465
|
capture_output=True,
|
|
444
466
|
text=True,
|
|
445
467
|
timeout=60,
|
|
@@ -456,7 +478,7 @@ def run_ts_analyzer(project_path: Path) -> Result[dict, str]:
|
|
|
456
478
|
# Fall back to running without --json flag for human-readable summary
|
|
457
479
|
try:
|
|
458
480
|
summary_result = subprocess.run(
|
|
459
|
-
[*cmd, str(project_path)],
|
|
481
|
+
[*cmd, str(project_path.resolve())],
|
|
460
482
|
capture_output=True,
|
|
461
483
|
text=True,
|
|
462
484
|
timeout=60,
|
|
@@ -553,7 +575,7 @@ def run_quick_check(project_path: Path) -> Result[dict, str]:
|
|
|
553
575
|
try:
|
|
554
576
|
cmd = _get_invar_package_cmd("quick-check", project_path)
|
|
555
577
|
result = subprocess.run(
|
|
556
|
-
[*cmd, str(project_path), "--json"],
|
|
578
|
+
[*cmd, str(project_path.resolve()), "--json"],
|
|
557
579
|
capture_output=True,
|
|
558
580
|
text=True,
|
|
559
581
|
timeout=30, # Quick check should be fast
|
|
@@ -750,7 +772,8 @@ def run_eslint(project_path: Path) -> Result[list[TypeScriptViolation], str]:
|
|
|
750
772
|
try:
|
|
751
773
|
# Get command for @invar/eslint-plugin (embedded or local dev)
|
|
752
774
|
cmd = _get_invar_package_cmd("eslint-plugin", project_path)
|
|
753
|
-
|
|
775
|
+
# Resolve path to absolute to avoid path doubling in subprocess
|
|
776
|
+
cmd.append(str(project_path.resolve())) # Add project path as argument
|
|
754
777
|
|
|
755
778
|
# Use temp file to avoid subprocess 64KB buffer limit
|
|
756
779
|
# ESLint output can be large for big projects
|
|
@@ -805,7 +828,6 @@ def run_eslint(project_path: Path) -> Result[list[TypeScriptViolation], str]:
|
|
|
805
828
|
if result.returncode != 0 and result.stderr:
|
|
806
829
|
return Failure(f"ESLint error: {result.stderr[:200]}")
|
|
807
830
|
return Failure("ESLint output parsing failed: JSON decode error")
|
|
808
|
-
return Failure("ESLint output parsing failed: JSON decode error")
|
|
809
831
|
|
|
810
832
|
return Success(violations)
|
|
811
833
|
|