invar-tools 1.17.12__py3-none-any.whl → 1.17.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,6 +6,8 @@ Shell module: handles user interaction and file I/O.
6
6
 
7
7
  from __future__ import annotations
8
8
 
9
+ import os
10
+ import sys
9
11
  from pathlib import Path
10
12
 
11
13
  import typer
@@ -17,7 +19,7 @@ from invar import __version__
17
19
  from invar.core.models import GuardReport, RuleConfig
18
20
  from invar.core.rules import check_all_rules
19
21
  from invar.core.utils import get_exit_code
20
- from invar.shell.config import find_project_root, load_config
22
+ from invar.shell.config import find_project_root, find_pyproject_root, load_config
21
23
  from invar.shell.fs import scan_project
22
24
  from invar.shell.guard_output import output_agent, output_rich
23
25
 
@@ -190,12 +192,28 @@ def guard(
190
192
  ts_result = run_typescript_guard(path if path.is_dir() else find_project_root(path))
191
193
  match ts_result:
192
194
  case Success(result):
193
- import json as json_mod
195
+ if human:
196
+ # Human-readable Rich output
197
+ from invar.shell.prove.guard_ts import format_typescript_guard_v2
198
+
199
+ output = format_typescript_guard_v2(result)
200
+ console.print(f"[bold]TypeScript Guard[/bold] ({project_language})")
201
+ if result.status == "passed":
202
+ console.print("[green]✓ PASSED[/green]")
203
+ elif result.status == "skipped":
204
+ console.print("[yellow]⚠ SKIPPED[/yellow] (no TypeScript tools available)")
205
+ else:
206
+ console.print(f"[red]✗ FAILED[/red] ({result.error_count} errors)")
207
+ for v in result.violations[:10]: # Show first 10
208
+ console.print(f" {v.file}:{v.line}: [{v.severity}] {v.message}")
209
+ else:
210
+ # JSON output for agents
211
+ import json as json_mod
194
212
 
195
- from invar.shell.prove.guard_ts import format_typescript_guard_v2
213
+ from invar.shell.prove.guard_ts import format_typescript_guard_v2
196
214
 
197
- output = format_typescript_guard_v2(result)
198
- console.print(json_mod.dumps(output, indent=2))
215
+ output = format_typescript_guard_v2(result)
216
+ console.print(json_mod.dumps(output, indent=2))
199
217
  raise typer.Exit(0 if result.status == "passed" else 1)
200
218
  case Failure(err):
201
219
  console.print(f"[red]Error:[/red] {err}")
@@ -209,7 +227,27 @@ def guard(
209
227
  console.print(f"[red]Error:[/red] {path} is not a Python file")
210
228
  raise typer.Exit(1)
211
229
  single_file = path.resolve()
212
- path = find_project_root(path)
230
+
231
+ pyproject_root = find_pyproject_root(single_file if single_file else path)
232
+ if pyproject_root is None:
233
+ console.print(
234
+ "[red]Error:[/red] pyproject.toml not found (searched upward from the target path)"
235
+ )
236
+ raise typer.Exit(1)
237
+ path = pyproject_root
238
+
239
+ from invar.shell.subprocess_env import get_uvx_respawn_command
240
+
241
+ cmd = get_uvx_respawn_command(
242
+ project_root=path,
243
+ argv=sys.argv[1:],
244
+ tool_name=Path(sys.argv[0]).name,
245
+ invar_tools_version=__version__,
246
+ )
247
+ if cmd is not None:
248
+ env = os.environ.copy()
249
+ env["INVAR_UVX_RESPAWNED"] = "1"
250
+ os.execvpe(cmd[0], cmd, env)
213
251
 
214
252
  # Load and configure
215
253
  config_result = load_config(path)
@@ -357,6 +395,7 @@ def guard(
357
395
 
358
396
  # Phase 1: Doctests (DX-37: with optional coverage)
359
397
  doctest_passed, doctest_output, doctest_coverage = run_doctests_phase(
398
+ path,
360
399
  checked_files,
361
400
  explain,
362
401
  timeout=config.timeout_doctest,
@@ -377,6 +416,7 @@ def guard(
377
416
 
378
417
  # Phase 3: Hypothesis property tests (DX-37: with optional coverage)
379
418
  property_passed, property_output, property_coverage = run_property_tests_phase(
419
+ path,
380
420
  checked_files,
381
421
  doctest_passed,
382
422
  static_exit_code,
invar/shell/config.py CHANGED
@@ -39,11 +39,27 @@ class ModuleType(Enum):
39
39
 
40
40
 
41
41
  # I/O libraries that indicate Shell module (for AST import checking)
42
- _IO_LIBRARIES = frozenset([
43
- "os", "sys", "subprocess", "pathlib", "shutil", "io", "socket",
44
- "requests", "aiohttp", "httpx", "urllib", "sqlite3", "psycopg2",
45
- "pymongo", "sqlalchemy", "typer", "click",
46
- ])
42
+ _IO_LIBRARIES = frozenset(
43
+ [
44
+ "os",
45
+ "sys",
46
+ "subprocess",
47
+ "pathlib",
48
+ "shutil",
49
+ "io",
50
+ "socket",
51
+ "requests",
52
+ "aiohttp",
53
+ "httpx",
54
+ "urllib",
55
+ "sqlite3",
56
+ "psycopg2",
57
+ "pymongo",
58
+ "sqlalchemy",
59
+ "typer",
60
+ "click",
61
+ ]
62
+ )
47
63
 
48
64
  # Contract decorator names
49
65
  _CONTRACT_DECORATORS = frozenset(["pre", "post", "invariant"])
@@ -226,6 +242,7 @@ def auto_detect_module_type(source: str, file_path: str = "") -> ModuleType:
226
242
  # Unknown: neither clear pattern
227
243
  return ModuleType.UNKNOWN
228
244
 
245
+
229
246
  if TYPE_CHECKING:
230
247
  from pathlib import Path
231
248
 
@@ -268,6 +285,20 @@ def _find_config_source(project_root: Path) -> Result[tuple[Path | None, ConfigS
268
285
 
269
286
 
270
287
  # @shell_complexity: Project root discovery requires checking multiple markers
288
+ def find_pyproject_root(start_path: "Path") -> "Path | None": # noqa: UP037
289
+ from pathlib import Path
290
+
291
+ current = Path(start_path).resolve()
292
+ if current.is_file():
293
+ current = current.parent
294
+
295
+ for parent in [current, *current.parents]:
296
+ if (parent / "pyproject.toml").exists():
297
+ return parent
298
+
299
+ return None
300
+
301
+
271
302
  def find_project_root(start_path: "Path") -> "Path": # noqa: UP037
272
303
  """
273
304
  Find project root by walking up from start_path looking for config files.
@@ -335,34 +366,44 @@ def load_config(project_root: Path) -> Result[RuleConfig, str]:
335
366
  3. .invar/config.toml [guard]
336
367
  4. Built-in defaults
337
368
 
369
+ If pyproject.toml exists but has no [tool.invar.guard] section,
370
+ continues to check other sources (fallback behavior).
371
+
338
372
  Args:
339
373
  project_root: Path to project root directory
340
374
 
341
375
  Returns:
342
376
  Result containing RuleConfig or error message
343
377
  """
344
- find_result = _find_config_source(project_root)
345
- if isinstance(find_result, Failure):
346
- return find_result
347
- config_path, source = find_result.unwrap()
378
+ # Try each config source in priority order
379
+ sources_to_try: list[tuple[Path, ConfigSource]] = []
348
380
 
349
- if source == "default":
350
- return Success(RuleConfig())
381
+ pyproject = project_root / "pyproject.toml"
382
+ if pyproject.exists():
383
+ sources_to_try.append((pyproject, "pyproject"))
351
384
 
352
- assert config_path is not None # source != "default" guarantees path exists
353
- result = _read_toml(config_path)
385
+ invar_toml = project_root / "invar.toml"
386
+ if invar_toml.exists():
387
+ sources_to_try.append((invar_toml, "invar"))
354
388
 
355
- if isinstance(result, Failure):
356
- return result
389
+ invar_config = project_root / ".invar" / "config.toml"
390
+ if invar_config.exists():
391
+ sources_to_try.append((invar_config, "invar_dir"))
357
392
 
358
- data = result.unwrap()
359
- guard_config = extract_guard_section(data, source)
393
+ # Try each source, fallback if no guard config found
394
+ for config_path, source in sources_to_try:
395
+ result = _read_toml(config_path)
396
+ if isinstance(result, Failure):
397
+ continue # Skip unreadable files
360
398
 
361
- # For pyproject.toml, if no [tool.invar.guard] section, use defaults
362
- if source == "pyproject" and not guard_config:
363
- return Success(RuleConfig())
399
+ data = result.unwrap()
400
+ guard_config = extract_guard_section(data, source)
364
401
 
365
- return Success(parse_guard_config(guard_config))
402
+ if guard_config: # Found valid guard config
403
+ return Success(parse_guard_config(guard_config))
404
+
405
+ # No config found in any source, use defaults
406
+ return Success(RuleConfig())
366
407
 
367
408
 
368
409
  # Default paths for Core/Shell classification
@@ -492,6 +533,7 @@ def classify_file(
492
533
  else:
493
534
  # Log warning about config error, use defaults
494
535
  import logging
536
+
495
537
  logging.getLogger(__name__).debug(
496
538
  "Pattern classification failed: %s, using defaults", pattern_result.failure()
497
539
  )
@@ -503,6 +545,7 @@ def classify_file(
503
545
  else:
504
546
  # Log warning about config error, use defaults
505
547
  import logging
548
+
506
549
  logging.getLogger(__name__).debug(
507
550
  "Path classification failed: %s, using defaults", path_result.failure()
508
551
  )
invar/shell/git.py CHANGED
@@ -7,13 +7,10 @@ Shell module: handles git I/O for changed file detection.
7
7
  from __future__ import annotations
8
8
 
9
9
  import subprocess
10
- from typing import TYPE_CHECKING
10
+ from pathlib import Path
11
11
 
12
12
  from returns.result import Failure, Result, Success
13
13
 
14
- if TYPE_CHECKING:
15
- from pathlib import Path
16
-
17
14
 
18
15
  def _run_git(args: list[str], cwd: Path) -> Result[str, str]:
19
16
  """Run a git command and return stdout."""
@@ -49,27 +46,29 @@ def get_changed_files(project_root: Path) -> Result[set[Path], str]:
49
46
  >>> isinstance(result, (Success, Failure))
50
47
  True
51
48
  """
52
- # Verify git repo
53
49
  check = _run_git(["rev-parse", "--git-dir"], project_root)
54
50
  if isinstance(check, Failure):
55
51
  return Failure(f"Not a git repository: {project_root}")
56
52
 
53
+ repo_root_result = _run_git(["rev-parse", "--show-toplevel"], project_root)
54
+ if isinstance(repo_root_result, Failure):
55
+ return Failure(repo_root_result.failure())
56
+
57
+ repo_root = Path(repo_root_result.unwrap().strip())
58
+
57
59
  changed: set[Path] = set()
58
60
 
59
- # Staged changes
60
61
  staged = _run_git(["diff", "--cached", "--name-only"], project_root)
61
62
  if isinstance(staged, Success):
62
- changed.update(_parse_py_files(staged.unwrap(), project_root))
63
+ changed.update(_parse_py_files(staged.unwrap(), repo_root))
63
64
 
64
- # Unstaged changes
65
65
  unstaged = _run_git(["diff", "--name-only"], project_root)
66
66
  if isinstance(unstaged, Success):
67
- changed.update(_parse_py_files(unstaged.unwrap(), project_root))
67
+ changed.update(_parse_py_files(unstaged.unwrap(), repo_root))
68
68
 
69
- # Untracked files
70
69
  untracked = _run_git(["ls-files", "--others", "--exclude-standard"], project_root)
71
70
  if isinstance(untracked, Success):
72
- changed.update(_parse_py_files(untracked.unwrap(), project_root))
71
+ changed.update(_parse_py_files(untracked.unwrap(), repo_root))
73
72
 
74
73
  return Success(changed)
75
74
 
@@ -36,24 +36,36 @@ def handle_changed_mode(
36
36
  if isinstance(changed_result, Failure):
37
37
  return Failure(changed_result.failure())
38
38
 
39
- only_files = changed_result.unwrap()
39
+ all_files = changed_result.unwrap()
40
+ only_files = {p for p in all_files if p.is_relative_to(path)}
40
41
  if not only_files:
41
- return Failure("NO_CHANGES") # Special marker for "no changes"
42
+ return Failure("NO_CHANGES")
42
43
 
43
44
  return Success((only_files, list(only_files)))
44
45
 
45
46
 
46
47
  # @shell_orchestration: Coordinates path classification and file collection
47
48
  # @shell_complexity: File collection with path normalization
48
- def collect_files_to_check(
49
- path: Path, checked_files: list[Path]
50
- ) -> list[Path]:
51
- """Collect Python files to check when not in --changed mode."""
52
- from invar.shell.config import get_path_classification
49
+ def collect_files_to_check(path: Path, checked_files: list[Path]) -> list[Path]:
50
+ """Collect Python files for runtime phases, honoring exclude_paths."""
51
+ from invar.shell.config import get_exclude_paths, get_path_classification
52
+ from invar.shell.fs import _is_excluded
53
53
 
54
54
  if checked_files:
55
55
  return checked_files
56
56
 
57
+ exclude_result = get_exclude_paths(path)
58
+ exclude_patterns = exclude_result.unwrap() if isinstance(exclude_result, Success) else []
59
+
60
+ def _add_py_files_under(root: Path) -> None:
61
+ for py_file in root.rglob("*.py"):
62
+ try:
63
+ rel = str(py_file.relative_to(path))
64
+ except ValueError:
65
+ rel = str(py_file)
66
+ if not _is_excluded(rel, exclude_patterns):
67
+ result_files.append(py_file)
68
+
57
69
  result_files: list[Path] = []
58
70
 
59
71
  path_result = get_path_classification(path)
@@ -62,26 +74,33 @@ def collect_files_to_check(
62
74
  else:
63
75
  core_paths, shell_paths = ["src/core"], ["src/shell"]
64
76
 
65
- # Scan core/shell paths
66
77
  for core_path in core_paths:
67
78
  full_path = path / core_path
68
79
  if full_path.exists():
69
- result_files.extend(full_path.rglob("*.py"))
80
+ _add_py_files_under(full_path)
70
81
 
71
82
  for shell_path in shell_paths:
72
83
  full_path = path / shell_path
73
84
  if full_path.exists():
74
- result_files.extend(full_path.rglob("*.py"))
85
+ _add_py_files_under(full_path)
75
86
 
76
- # Fallback: scan path directly
77
87
  if not result_files and path.exists():
78
- result_files.extend(path.rglob("*.py"))
88
+ _add_py_files_under(path)
79
89
 
80
- return result_files
90
+ seen: set[str] = set()
91
+ unique: list[Path] = []
92
+ for f in result_files:
93
+ key = str(f)
94
+ if key not in seen:
95
+ seen.add(key)
96
+ unique.append(f)
97
+
98
+ return unique
81
99
 
82
100
 
83
101
  # @shell_orchestration: Coordinates doctest execution via testing module
84
102
  def run_doctests_phase(
103
+ project_root: Path,
85
104
  checked_files: list[Path],
86
105
  explain: bool,
87
106
  timeout: int = 60,
@@ -103,12 +122,20 @@ def run_doctests_phase(
103
122
  return True, "", None
104
123
 
105
124
  doctest_result = run_doctests_on_files(
106
- checked_files, verbose=explain, timeout=timeout, collect_coverage=collect_coverage
125
+ checked_files,
126
+ verbose=explain,
127
+ timeout=timeout,
128
+ collect_coverage=collect_coverage,
129
+ cwd=project_root,
107
130
  )
108
131
  if isinstance(doctest_result, Success):
109
132
  result_data = doctest_result.unwrap()
110
133
  passed = result_data.get("status") in ("passed", "skipped")
111
- output = result_data.get("stdout", "")
134
+ stdout = result_data.get("stdout", "")
135
+ stderr = result_data.get("stderr", "")
136
+ output = stdout
137
+ if not passed and stderr:
138
+ output = f"{stdout}\n{stderr}" if stdout else stderr
112
139
  # DX-37: Return coverage data if collected
113
140
  coverage_data = {"collected": result_data.get("coverage_collected", False)}
114
141
  return passed, output, coverage_data if collect_coverage else None
@@ -151,7 +178,24 @@ def run_crosshair_phase(
151
178
  return True, {"status": "skipped", "reason": "no files to verify"}
152
179
 
153
180
  # Only verify Core files (pure logic)
154
- core_files = [f for f in checked_files if "core" in str(f)]
181
+ # BUG-57: Use config-based core detection instead of hardcoded "core" in path
182
+ from invar.core.utils import matches_path_prefix
183
+ from invar.shell.config import get_path_classification
184
+
185
+ path_result = get_path_classification(path)
186
+ if isinstance(path_result, Success):
187
+ core_paths, _ = path_result.unwrap()
188
+ else:
189
+ core_paths = ["src/core", "core"]
190
+
191
+ def is_core_file(f: Path) -> bool:
192
+ try:
193
+ rel = str(f.relative_to(path))
194
+ except ValueError:
195
+ rel = str(f)
196
+ return matches_path_prefix(rel, core_paths)
197
+
198
+ core_files = [f for f in checked_files if is_core_file(f)]
155
199
  if not core_files:
156
200
  return True, {"status": "skipped", "reason": "no core files found"}
157
201
 
@@ -176,6 +220,7 @@ def run_crosshair_phase(
176
220
  cache=cache,
177
221
  timeout=timeout,
178
222
  per_condition_timeout=per_condition_timeout,
223
+ project_root=path,
179
224
  )
180
225
 
181
226
  if isinstance(crosshair_result, Success):
@@ -230,26 +275,17 @@ def output_verification_status(
230
275
  console.print(doctest_output)
231
276
 
232
277
  # CrossHair results
233
- _output_crosshair_status(
234
- static_exit_code, doctest_passed, crosshair_output
235
- )
278
+ _output_crosshair_status(static_exit_code, doctest_passed, crosshair_output)
236
279
 
237
280
  # Property tests results
238
281
  if property_output:
239
- _output_property_tests_status(
240
- static_exit_code, doctest_passed, property_output
241
- )
282
+ _output_property_tests_status(static_exit_code, doctest_passed, property_output)
242
283
  else:
243
284
  console.print("[dim]⊘ Runtime tests skipped (static errors)[/dim]")
244
285
 
245
286
  # DX-26: Combined conclusion after all phases
246
287
  console.print("-" * 40)
247
- all_passed = (
248
- static_exit_code == 0
249
- and doctest_passed
250
- and crosshair_passed
251
- and property_passed
252
- )
288
+ all_passed = static_exit_code == 0 and doctest_passed and crosshair_passed and property_passed
253
289
  # In strict mode, warnings also cause failure (but exit code already reflects this)
254
290
  status = "passed" if all_passed else "failed"
255
291
  color = "green" if all_passed else "red"
@@ -259,6 +295,7 @@ def output_verification_status(
259
295
  # @shell_orchestration: Coordinates shell module calls for property testing
260
296
  # @shell_complexity: Property tests with result aggregation
261
297
  def run_property_tests_phase(
298
+ project_root: Path,
262
299
  checked_files: list[Path],
263
300
  doctest_passed: bool,
264
301
  static_exit_code: int,
@@ -286,11 +323,33 @@ def run_property_tests_phase(
286
323
  return True, {"status": "skipped", "reason": "no files"}, None
287
324
 
288
325
  # Only test Core files (with contracts)
289
- core_files = [f for f in checked_files if "core" in str(f)]
326
+ # BUG-57: Use config-based core detection instead of hardcoded "core" in path
327
+ from invar.core.utils import matches_path_prefix
328
+ from invar.shell.config import get_path_classification
329
+
330
+ path_result = get_path_classification(project_root)
331
+ if isinstance(path_result, Success):
332
+ core_paths, _ = path_result.unwrap()
333
+ else:
334
+ core_paths = ["src/core", "core"]
335
+
336
+ def is_core_file(f: Path) -> bool:
337
+ try:
338
+ rel = str(f.relative_to(project_root))
339
+ except ValueError:
340
+ rel = str(f)
341
+ return matches_path_prefix(rel, core_paths)
342
+
343
+ core_files = [f for f in checked_files if is_core_file(f)]
290
344
  if not core_files:
291
345
  return True, {"status": "skipped", "reason": "no core files"}, None
292
346
 
293
- result = run_property_tests_on_files(core_files, max_examples, collect_coverage=collect_coverage)
347
+ result = run_property_tests_on_files(
348
+ core_files,
349
+ max_examples,
350
+ collect_coverage=collect_coverage,
351
+ project_root=project_root,
352
+ )
294
353
 
295
354
  if isinstance(result, Success):
296
355
  report, coverage_data = result.unwrap()
@@ -305,15 +364,19 @@ def run_property_tests_phase(
305
364
  for r in report.results
306
365
  if not r.passed
307
366
  ]
308
- return report.all_passed(), {
309
- "status": "passed" if report.all_passed() else "failed",
310
- "functions_tested": report.functions_tested,
311
- "functions_passed": report.functions_passed,
312
- "functions_failed": report.functions_failed,
313
- "total_examples": report.total_examples,
314
- "failures": failures, # DX-26: Structured failure info
315
- "errors": report.errors,
316
- }, coverage_data
367
+ return (
368
+ report.all_passed(),
369
+ {
370
+ "status": "passed" if report.all_passed() else "failed",
371
+ "functions_tested": report.functions_tested,
372
+ "functions_passed": report.functions_passed,
373
+ "functions_failed": report.functions_failed,
374
+ "total_examples": report.total_examples,
375
+ "failures": failures, # DX-26: Structured failure info
376
+ "errors": report.errors,
377
+ },
378
+ coverage_data,
379
+ )
317
380
 
318
381
  return False, {"status": "error", "error": result.failure()}, None
319
382
 
@@ -366,8 +429,8 @@ def _output_property_tests_status(
366
429
  # Show reproduction command with seed
367
430
  if seed:
368
431
  console.print(
369
- f" [dim]Reproduce: python -c \"from hypothesis import reproduce_failure; "
370
- f"import {func_name}\" --seed={seed}[/dim]"
432
+ f' [dim]Reproduce: python -c "from hypothesis import reproduce_failure; '
433
+ f'import {func_name}" --seed={seed}[/dim]'
371
434
  )
372
435
  # Fallback for errors without structured failures
373
436
  for error in property_output.get("errors", [])[:5]:
@@ -406,8 +469,7 @@ def _output_crosshair_status(
406
469
  if workers > 1:
407
470
  stats += f", {workers} workers"
408
471
  console.print(
409
- f"[green]✓ CrossHair verified[/green] "
410
- f"[dim]({stats}, {time_sec:.1f}s)[/dim]"
472
+ f"[green]✓ CrossHair verified[/green] [dim]({stats}, {time_sec:.1f}s)[/dim]"
411
473
  )
412
474
  else:
413
475
  console.print("[green]✓ CrossHair verified[/green]")