invar-tools 1.0.0__py3-none-any.whl → 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. invar/__init__.py +1 -0
  2. invar/core/contracts.py +80 -10
  3. invar/core/entry_points.py +367 -0
  4. invar/core/extraction.py +5 -6
  5. invar/core/format_specs.py +195 -0
  6. invar/core/format_strategies.py +197 -0
  7. invar/core/formatter.py +32 -10
  8. invar/core/hypothesis_strategies.py +50 -10
  9. invar/core/inspect.py +1 -1
  10. invar/core/lambda_helpers.py +3 -2
  11. invar/core/models.py +30 -18
  12. invar/core/must_use.py +2 -1
  13. invar/core/parser.py +13 -6
  14. invar/core/postcondition_scope.py +128 -0
  15. invar/core/property_gen.py +86 -42
  16. invar/core/purity.py +13 -7
  17. invar/core/purity_heuristics.py +5 -9
  18. invar/core/references.py +8 -6
  19. invar/core/review_trigger.py +370 -0
  20. invar/core/rule_meta.py +69 -2
  21. invar/core/rules.py +91 -28
  22. invar/core/shell_analysis.py +247 -0
  23. invar/core/shell_architecture.py +171 -0
  24. invar/core/strategies.py +7 -14
  25. invar/core/suggestions.py +92 -0
  26. invar/core/sync_helpers.py +238 -0
  27. invar/core/tautology.py +103 -37
  28. invar/core/template_parser.py +467 -0
  29. invar/core/timeout_inference.py +4 -7
  30. invar/core/utils.py +63 -18
  31. invar/core/verification_routing.py +155 -0
  32. invar/mcp/server.py +113 -13
  33. invar/shell/commands/__init__.py +11 -0
  34. invar/shell/{cli.py → commands/guard.py} +152 -44
  35. invar/shell/{init_cmd.py → commands/init.py} +200 -28
  36. invar/shell/commands/merge.py +256 -0
  37. invar/shell/commands/mutate.py +184 -0
  38. invar/shell/{perception.py → commands/perception.py} +2 -0
  39. invar/shell/commands/sync_self.py +113 -0
  40. invar/shell/commands/template_sync.py +366 -0
  41. invar/shell/{test_cmd.py → commands/test.py} +3 -1
  42. invar/shell/commands/update.py +48 -0
  43. invar/shell/config.py +247 -10
  44. invar/shell/coverage.py +351 -0
  45. invar/shell/fs.py +5 -2
  46. invar/shell/git.py +2 -0
  47. invar/shell/guard_helpers.py +116 -20
  48. invar/shell/guard_output.py +106 -24
  49. invar/shell/mcp_config.py +3 -0
  50. invar/shell/mutation.py +314 -0
  51. invar/shell/property_tests.py +75 -24
  52. invar/shell/prove/__init__.py +9 -0
  53. invar/shell/prove/accept.py +113 -0
  54. invar/shell/{prove.py → prove/crosshair.py} +69 -30
  55. invar/shell/prove/hypothesis.py +293 -0
  56. invar/shell/subprocess_env.py +393 -0
  57. invar/shell/template_engine.py +345 -0
  58. invar/shell/templates.py +53 -0
  59. invar/shell/testing.py +77 -37
  60. invar/templates/CLAUDE.md.template +86 -9
  61. invar/templates/aider.conf.yml.template +16 -14
  62. invar/templates/commands/audit.md +138 -0
  63. invar/templates/commands/guard.md +77 -0
  64. invar/templates/config/CLAUDE.md.jinja +206 -0
  65. invar/templates/config/context.md.jinja +92 -0
  66. invar/templates/config/pre-commit.yaml.jinja +44 -0
  67. invar/templates/context.md.template +33 -0
  68. invar/templates/cursorrules.template +25 -13
  69. invar/templates/examples/README.md +2 -0
  70. invar/templates/examples/conftest.py +3 -0
  71. invar/templates/examples/contracts.py +4 -2
  72. invar/templates/examples/core_shell.py +10 -4
  73. invar/templates/examples/workflow.md +81 -0
  74. invar/templates/manifest.toml +137 -0
  75. invar/templates/protocol/INVAR.md +210 -0
  76. invar/templates/skills/develop/SKILL.md.jinja +318 -0
  77. invar/templates/skills/investigate/SKILL.md.jinja +106 -0
  78. invar/templates/skills/propose/SKILL.md.jinja +104 -0
  79. invar/templates/skills/review/SKILL.md.jinja +125 -0
  80. invar_tools-1.3.0.dist-info/METADATA +377 -0
  81. invar_tools-1.3.0.dist-info/RECORD +95 -0
  82. invar_tools-1.3.0.dist-info/entry_points.txt +2 -0
  83. invar_tools-1.3.0.dist-info/licenses/LICENSE +190 -0
  84. invar_tools-1.3.0.dist-info/licenses/LICENSE-GPL +674 -0
  85. invar_tools-1.3.0.dist-info/licenses/NOTICE +63 -0
  86. invar/contracts.py +0 -152
  87. invar/decorators.py +0 -94
  88. invar/invariant.py +0 -57
  89. invar/resource.py +0 -99
  90. invar/shell/prove_fallback.py +0 -183
  91. invar/shell/update_cmd.py +0 -191
  92. invar/templates/INVAR.md +0 -134
  93. invar_tools-1.0.0.dist-info/METADATA +0 -321
  94. invar_tools-1.0.0.dist-info/RECORD +0 -64
  95. invar_tools-1.0.0.dist-info/entry_points.txt +0 -2
  96. invar_tools-1.0.0.dist-info/licenses/LICENSE +0 -21
  97. /invar/shell/{prove_cache.py → prove/cache.py} +0 -0
  98. {invar_tools-1.0.0.dist-info → invar_tools-1.3.0.dist-info}/WHEEL +0 -0
invar/shell/config.py CHANGED
@@ -8,11 +8,15 @@ Configuration sources (priority order):
8
8
  2. invar.toml [guard]
9
9
  3. .invar/config.toml [guard]
10
10
  4. Built-in defaults
11
+
12
+ DX-22: Added content-based auto-detection for Core/Shell classification.
11
13
  """
12
14
 
13
15
  from __future__ import annotations
14
16
 
17
+ import ast
15
18
  import tomllib
19
+ from enum import Enum
16
20
  from typing import TYPE_CHECKING, Any, Literal
17
21
 
18
22
  from returns.result import Failure, Result, Success
@@ -25,12 +29,210 @@ from invar.core.utils import (
25
29
  parse_guard_config,
26
30
  )
27
31
 
32
+
33
+ class ModuleType(Enum):
34
+ """DX-22: Module type for auto-detection."""
35
+
36
+ CORE = "core"
37
+ SHELL = "shell"
38
+ UNKNOWN = "unknown"
39
+
40
+
41
+ # I/O libraries that indicate Shell module (for AST import checking)
42
+ _IO_LIBRARIES = frozenset([
43
+ "os", "sys", "subprocess", "pathlib", "shutil", "io", "socket",
44
+ "requests", "aiohttp", "httpx", "urllib", "sqlite3", "psycopg2",
45
+ "pymongo", "sqlalchemy", "typer", "click",
46
+ ])
47
+
48
+ # Contract decorator names
49
+ _CONTRACT_DECORATORS = frozenset(["pre", "post", "invariant"])
50
+
51
+ # Result monad types
52
+ _RESULT_TYPES = frozenset(["Result", "Success", "Failure"])
53
+
54
+
55
+ # @shell_orchestration: AST analysis
56
+ # @shell_complexity: AST branches
57
+ def _has_contract_decorators(tree: ast.Module) -> bool:
58
+ """
59
+ Check if AST contains @pre/@post contract decorators.
60
+
61
+ Uses AST to only detect real decorators, not strings in docstrings.
62
+
63
+ Examples:
64
+ >>> import ast
65
+ >>> tree = ast.parse("@pre(lambda x: x > 0)\\ndef foo(x): pass")
66
+ >>> _has_contract_decorators(tree)
67
+ True
68
+ >>> tree = ast.parse("def foo():\\n '''>>> @pre(x)'''\\n pass")
69
+ >>> _has_contract_decorators(tree)
70
+ False
71
+ """
72
+ for node in ast.walk(tree):
73
+ if isinstance(node, ast.FunctionDef | ast.AsyncFunctionDef):
74
+ for decorator in node.decorator_list:
75
+ # @pre(...) or @post(...)
76
+ if isinstance(decorator, ast.Call):
77
+ func = decorator.func
78
+ if isinstance(func, ast.Name) and func.id in _CONTRACT_DECORATORS:
79
+ return True
80
+ if isinstance(func, ast.Attribute) and func.attr in _CONTRACT_DECORATORS:
81
+ return True
82
+ # @pre (without call - rare but possible)
83
+ elif isinstance(decorator, ast.Name) and decorator.id in _CONTRACT_DECORATORS:
84
+ return True
85
+ return False
86
+
87
+
88
+ # @shell_orchestration: AST analysis
89
+ # @shell_complexity: AST branches
90
+ def _has_io_imports(tree: ast.Module) -> bool:
91
+ """
92
+ Check if AST contains imports of I/O libraries.
93
+
94
+ Examples:
95
+ >>> import ast
96
+ >>> tree = ast.parse("import os")
97
+ >>> _has_io_imports(tree)
98
+ True
99
+ >>> tree = ast.parse("from pathlib import Path")
100
+ >>> _has_io_imports(tree)
101
+ True
102
+ >>> tree = ast.parse("import json")
103
+ >>> _has_io_imports(tree)
104
+ False
105
+ >>> tree = ast.parse("def foo():\\n '''import os'''\\n pass")
106
+ >>> _has_io_imports(tree)
107
+ False
108
+ """
109
+ for node in ast.walk(tree):
110
+ if isinstance(node, ast.Import):
111
+ for alias in node.names:
112
+ lib = alias.name.split(".")[0]
113
+ if lib in _IO_LIBRARIES:
114
+ return True
115
+ elif isinstance(node, ast.ImportFrom) and node.module:
116
+ lib = node.module.split(".")[0]
117
+ if lib in _IO_LIBRARIES:
118
+ return True
119
+ return False
120
+
121
+
122
+ # @shell_orchestration: AST analysis
123
+ # @shell_complexity: AST branches
124
+ def _has_result_types(tree: ast.Module) -> bool:
125
+ """
126
+ Check if AST contains Result/Success/Failure usage.
127
+
128
+ Checks:
129
+ - Return type annotations: -> Result[T, E]
130
+ - Imports: from returns.result import Success
131
+ - Function calls: Success(...), Failure(...)
132
+
133
+ Examples:
134
+ >>> import ast
135
+ >>> tree = ast.parse("from returns.result import Success")
136
+ >>> _has_result_types(tree)
137
+ True
138
+ >>> tree = ast.parse("def foo() -> Result[int, str]: pass")
139
+ >>> _has_result_types(tree)
140
+ True
141
+ >>> tree = ast.parse("return Success(42)")
142
+ >>> _has_result_types(tree)
143
+ True
144
+ >>> tree = ast.parse("def foo():\\n '''Success'''\\n pass")
145
+ >>> _has_result_types(tree)
146
+ False
147
+ """
148
+ for node in ast.walk(tree):
149
+ # Check imports: from returns.result import Success
150
+ if isinstance(node, ast.ImportFrom):
151
+ if node.module and "returns" in node.module:
152
+ for alias in node.names:
153
+ if alias.name in _RESULT_TYPES:
154
+ return True
155
+ # Check function calls: Success(...), Failure(...)
156
+ elif isinstance(node, ast.Call):
157
+ if isinstance(node.func, ast.Name) and node.func.id in _RESULT_TYPES:
158
+ return True
159
+ # Check type annotations: -> Result[T, E]
160
+ elif isinstance(node, ast.FunctionDef | ast.AsyncFunctionDef):
161
+ if node.returns:
162
+ ann = node.returns
163
+ if isinstance(ann, ast.Subscript):
164
+ if isinstance(ann.value, ast.Name) and ann.value.id == "Result":
165
+ return True
166
+ elif isinstance(ann, ast.Name) and ann.id in _RESULT_TYPES:
167
+ return True
168
+ return False
169
+
170
+
171
+ # @shell_complexity: Classification decision tree requires multiple conditions
172
+ def auto_detect_module_type(source: str, file_path: str = "") -> ModuleType:
173
+ """
174
+ Automatically detect module type from source content using AST.
175
+
176
+ DX-22: Content-based classification when path-based is inconclusive.
177
+ Uses AST parsing to avoid false positives from docstrings/comments.
178
+
179
+ Priority:
180
+ 1. Path convention (**/core/** or **/shell/**)
181
+ 2. Content features via AST (contracts, Result types, I/O imports)
182
+
183
+ Args:
184
+ source: Python source code as string
185
+ file_path: Optional file path for path-based hints
186
+
187
+ Returns:
188
+ ModuleType indicating Core, Shell, or Unknown
189
+
190
+ Examples:
191
+ >>> auto_detect_module_type("@pre(lambda x: x > 0)\\ndef foo(x): pass")
192
+ <ModuleType.CORE: 'core'>
193
+ >>> auto_detect_module_type("from returns.result import Success\\ndef load(): return Success('ok')")
194
+ <ModuleType.SHELL: 'shell'>
195
+ >>> auto_detect_module_type("def helper(): pass")
196
+ <ModuleType.UNKNOWN: 'unknown'>
197
+ >>> auto_detect_module_type("def foo():\\n '''>>> @pre(x)'''\\n pass")
198
+ <ModuleType.UNKNOWN: 'unknown'>
199
+ """
200
+ # Priority 1: Path convention
201
+ if file_path:
202
+ path_lower = file_path.lower()
203
+ if "/core/" in path_lower or path_lower.endswith("/core"):
204
+ return ModuleType.CORE
205
+ if "/shell/" in path_lower or path_lower.endswith("/shell"):
206
+ return ModuleType.SHELL
207
+
208
+ # Priority 2: Content features via AST
209
+ try:
210
+ tree = ast.parse(source)
211
+ except SyntaxError:
212
+ return ModuleType.UNKNOWN
213
+
214
+ has_contracts = _has_contract_decorators(tree)
215
+ has_io = _has_io_imports(tree)
216
+ has_result = _has_result_types(tree)
217
+
218
+ # Core: has contracts AND no I/O
219
+ if has_contracts and not has_io:
220
+ return ModuleType.CORE
221
+
222
+ # Shell: has I/O or Result types
223
+ if has_io or has_result:
224
+ return ModuleType.SHELL
225
+
226
+ # Unknown: neither clear pattern
227
+ return ModuleType.UNKNOWN
228
+
28
229
  if TYPE_CHECKING:
29
230
  from pathlib import Path
30
231
 
31
232
  ConfigSource = Literal["pyproject", "invar", "invar_dir", "default"]
32
233
 
33
234
 
235
+ # @shell_complexity: Config cascade checks multiple sources with fallback
34
236
  def _find_config_source(project_root: Path) -> Result[tuple[Path | None, ConfigSource], str]:
35
237
  """
36
238
  Find the first available config file.
@@ -76,6 +278,7 @@ def _read_toml(path: Path) -> Result[dict[str, Any], str]:
76
278
  return Failure(f"Failed to read {path.name}: {e}")
77
279
 
78
280
 
281
+ # @shell_complexity: Config loading with multiple sources and parse error handling
79
282
  def load_config(project_root: Path) -> Result[RuleConfig, str]:
80
283
  """
81
284
  Load Invar configuration from available sources.
@@ -139,6 +342,9 @@ _DEFAULT_EXCLUDE_PATHS = [
139
342
  "dist",
140
343
  "build",
141
344
  ".tox",
345
+ # Templates and examples are documentation, not enforced code
346
+ "templates",
347
+ ".invar/examples",
142
348
  ]
143
349
 
144
350
 
@@ -206,11 +412,20 @@ def get_exclude_paths(project_root: Path) -> Result[list[str], str]:
206
412
  return Success(guard_config.get("exclude_paths", _DEFAULT_EXCLUDE_PATHS.copy()))
207
413
 
208
414
 
209
- def classify_file(file_path: str, project_root: Path) -> Result[tuple[bool, bool], str]:
415
+ # @shell_complexity: Classification decision tree requires multiple config lookups and priority checks
416
+ # @invar:allow entry_point_too_thick: False positive - .get() matches router.get pattern
417
+ def classify_file(
418
+ file_path: str, project_root: Path, source: str = ""
419
+ ) -> Result[tuple[bool, bool], str]:
210
420
  """
211
421
  Classify a file as Core, Shell, or neither.
212
422
 
213
- Priority: patterns > paths > uncategorized.
423
+ DX-22 Part 5: Priority order is patterns > paths > content > uncategorized.
424
+
425
+ Args:
426
+ file_path: Relative path to the file
427
+ project_root: Project root directory
428
+ source: Optional source content for content-based detection
214
429
 
215
430
  Examples:
216
431
  >>> import tempfile
@@ -220,18 +435,32 @@ def classify_file(file_path: str, project_root: Path) -> Result[tuple[bool, bool
220
435
  ... result = classify_file("src/core/logic.py", root)
221
436
  ... result.unwrap()[0]
222
437
  True
438
+ >>> classify_file("lib/utils.py", Path("."), "@pre(lambda x: x > 0)\\ndef foo(x): pass").unwrap()
439
+ (True, False)
440
+ >>> classify_file("lib/io.py", Path("."), "def read() -> Result[str, str]: return Success('ok')").unwrap()
441
+ (False, True)
223
442
  """
224
443
  pattern_result = get_pattern_classification(project_root)
225
- core_patterns, shell_patterns = (
226
- pattern_result.unwrap() if isinstance(pattern_result, Success) else ([], [])
227
- )
444
+ if isinstance(pattern_result, Success):
445
+ core_patterns, shell_patterns = pattern_result.unwrap()
446
+ else:
447
+ # Log warning about config error, use defaults
448
+ import logging
449
+ logging.getLogger(__name__).debug(
450
+ "Pattern classification failed: %s, using defaults", pattern_result.failure()
451
+ )
452
+ core_patterns, shell_patterns = ([], [])
228
453
 
229
454
  path_result = get_path_classification(project_root)
230
- core_paths, shell_paths = (
231
- path_result.unwrap()
232
- if isinstance(path_result, Success)
233
- else (_DEFAULT_CORE_PATHS, _DEFAULT_SHELL_PATHS)
234
- )
455
+ if isinstance(path_result, Success):
456
+ core_paths, shell_paths = path_result.unwrap()
457
+ else:
458
+ # Log warning about config error, use defaults
459
+ import logging
460
+ logging.getLogger(__name__).debug(
461
+ "Path classification failed: %s, using defaults", path_result.failure()
462
+ )
463
+ core_paths, shell_paths = (_DEFAULT_CORE_PATHS, _DEFAULT_SHELL_PATHS)
235
464
 
236
465
  # Priority 1: Pattern-based classification
237
466
  if core_patterns and matches_pattern(file_path, core_patterns):
@@ -245,4 +474,12 @@ def classify_file(file_path: str, project_root: Path) -> Result[tuple[bool, bool
245
474
  if matches_path_prefix(file_path, shell_paths):
246
475
  return Success((False, True))
247
476
 
477
+ # Priority 3: Content-based auto-detection (DX-22 Part 5)
478
+ if source:
479
+ module_type = auto_detect_module_type(source, file_path)
480
+ if module_type == ModuleType.CORE:
481
+ return Success((True, False))
482
+ if module_type == ModuleType.SHELL:
483
+ return Success((False, True))
484
+
248
485
  return Success((False, False))
@@ -0,0 +1,351 @@
1
+ """
2
+ Coverage integration for Guard verification phases.
3
+
4
+ DX-37: Collect branch coverage from doctest + hypothesis phases.
5
+ Coverage.py is used for accurate tracking via sys.settrace().
6
+
7
+ Note: CrossHair uses symbolic execution (Z3 solver) in subprocess,
8
+ so coverage.py cannot track it. This is a fundamental limitation.
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ from contextlib import contextmanager
14
+ from dataclasses import dataclass, field
15
+ from typing import TYPE_CHECKING
16
+
17
+ from deal import post, pre
18
+ from returns.result import Failure, Result, Success
19
+
20
+ if TYPE_CHECKING:
21
+ from collections.abc import Iterator
22
+ from pathlib import Path
23
+
24
+ from coverage import Coverage
25
+
26
+
27
+ @dataclass
28
+ class UncoveredBranch:
29
+ """A branch that was never taken during testing.
30
+
31
+ Examples:
32
+ >>> branch = UncoveredBranch(line=127, branch_type="else", context="if x > 0:")
33
+ >>> branch.line
34
+ 127
35
+ >>> branch.branch_type
36
+ 'else'
37
+ """
38
+
39
+ line: int
40
+ branch_type: str # "if", "else", "elif", "except", "for", "while"
41
+ context: str # Source line for context
42
+
43
+
44
+ @dataclass
45
+ class FileCoverage:
46
+ """Coverage data for a single file.
47
+
48
+ Examples:
49
+ >>> fc = FileCoverage(path="src/foo.py", branch_coverage=94.5)
50
+ >>> fc.branch_coverage
51
+ 94.5
52
+ >>> len(fc.uncovered_branches)
53
+ 0
54
+ """
55
+
56
+ path: str
57
+ branch_coverage: float # 0.0 to 100.0
58
+ uncovered_branches: list[UncoveredBranch] = field(default_factory=list)
59
+
60
+
61
+ @dataclass
62
+ class CoverageReport:
63
+ """Coverage data from doctest + hypothesis phases.
64
+
65
+ Examples:
66
+ >>> report = CoverageReport(overall_branch_coverage=91.2)
67
+ >>> report.phases_tracked
68
+ []
69
+ >>> report.phases_excluded
70
+ ['crosshair']
71
+ """
72
+
73
+ overall_branch_coverage: float # 0.0 to 100.0
74
+ files: dict[str, FileCoverage] = field(default_factory=dict)
75
+ phases_tracked: list[str] = field(default_factory=list)
76
+ phases_excluded: list[str] = field(default_factory=lambda: ["crosshair"])
77
+
78
+
79
+ # @shell_orchestration: Import check utility for coverage.py dependency
80
+ def _is_coverage_available() -> bool:
81
+ """Check if coverage.py is installed.
82
+
83
+ Examples:
84
+ >>> result = _is_coverage_available()
85
+ >>> isinstance(result, bool)
86
+ True
87
+ """
88
+ try:
89
+ import coverage # noqa: F401
90
+
91
+ return True
92
+ except ImportError:
93
+ return False
94
+
95
+
96
+ @pre(lambda source_dirs: len(source_dirs) >= 0)
97
+ @contextmanager
98
+ def collect_coverage(source_dirs: list[Path]) -> Iterator[Coverage]:
99
+ """Context manager for coverage collection.
100
+
101
+ Args:
102
+ source_dirs: Directories to track coverage for
103
+
104
+ Yields:
105
+ Coverage object for data extraction
106
+
107
+ Examples:
108
+ >>> from pathlib import Path
109
+ >>> # When coverage is available, yields a Coverage object
110
+ >>> # with collect_coverage([Path("src")]) as cov:
111
+ >>> # pass # Execute code to track
112
+ """
113
+ import coverage
114
+
115
+ cov = coverage.Coverage(
116
+ branch=True,
117
+ source=[str(d) for d in source_dirs] if source_dirs else None,
118
+ omit=["**/test_*", "**/*_test.py", "**/conftest.py"],
119
+ )
120
+
121
+ cov.start()
122
+ try:
123
+ yield cov
124
+ finally:
125
+ cov.stop()
126
+ cov.save()
127
+
128
+
129
+ # @shell_complexity: Coverage API interaction with multiple analysis branches
130
+ @pre(lambda cov, files: files is not None)
131
+ @post(lambda result: isinstance(result, CoverageReport))
132
+ def extract_coverage_report(cov: Coverage, files: list[Path], phase: str) -> CoverageReport:
133
+ """Extract coverage report from Coverage object.
134
+
135
+ Args:
136
+ cov: Coverage object after data collection
137
+ files: Files to extract coverage for
138
+ phase: Name of the phase ("doctest" or "hypothesis")
139
+
140
+ Returns:
141
+ CoverageReport with branch coverage data
142
+
143
+ Examples:
144
+ >>> # After running with collect_coverage:
145
+ >>> # report = extract_coverage_report(cov, [Path("src/foo.py")], "doctest")
146
+ >>> # report.phases_tracked == ["doctest"]
147
+ """
148
+ file_coverages: dict[str, FileCoverage] = {}
149
+ total_branches = 0
150
+ covered_branches = 0
151
+
152
+ # Get analysis data
153
+ for file_path in files:
154
+ str_path = str(file_path)
155
+ try:
156
+ # Trigger coverage analysis for this file
157
+ _ = cov.analysis2(str_path)
158
+
159
+ # Get branch data
160
+ branch_stats = cov._analyze(str_path)
161
+ if hasattr(branch_stats, "numbers"):
162
+ nums = branch_stats.numbers
163
+ file_total = nums.n_branches
164
+ file_covered = nums.n_branches - nums.n_missing_branches
165
+
166
+ if file_total > 0:
167
+ total_branches += file_total
168
+ covered_branches += file_covered
169
+ branch_pct = (file_covered / file_total) * 100
170
+
171
+ # Extract uncovered branches
172
+ uncovered = []
173
+ if hasattr(branch_stats, "missing_branch_arcs"):
174
+ for arc in branch_stats.missing_branch_arcs():
175
+ from_line, to_line = arc
176
+ uncovered.append(
177
+ UncoveredBranch(
178
+ line=from_line,
179
+ branch_type="branch",
180
+ context=f"line {from_line} -> {to_line}",
181
+ )
182
+ )
183
+
184
+ file_coverages[str_path] = FileCoverage(
185
+ path=str_path,
186
+ branch_coverage=round(branch_pct, 1),
187
+ uncovered_branches=uncovered[:5], # Limit to 5 per file
188
+ )
189
+ except Exception:
190
+ # File not covered or analysis failed
191
+ continue
192
+
193
+ overall = (covered_branches / total_branches * 100) if total_branches > 0 else 0.0
194
+
195
+ return CoverageReport(
196
+ overall_branch_coverage=round(overall, 1),
197
+ files=file_coverages,
198
+ phases_tracked=[phase],
199
+ )
200
+
201
+
202
+ # @shell_orchestration: Report merging coordinates data from multiple phases
203
+ # @shell_complexity: Report merging with multiple iteration paths
204
+ @pre(lambda reports: all(isinstance(r, CoverageReport) for r in reports if r is not None))
205
+ @post(lambda result: isinstance(result, CoverageReport))
206
+ def merge_coverage_reports(reports: list[CoverageReport | None]) -> CoverageReport:
207
+ """Merge coverage from multiple phases.
208
+
209
+ Union of covered lines/branches across all phases.
210
+ Only branches uncovered in ALL phases are reported as uncovered.
211
+
212
+ Args:
213
+ reports: List of CoverageReport objects (None entries are skipped)
214
+
215
+ Returns:
216
+ Merged CoverageReport
217
+
218
+ Examples:
219
+ >>> r1 = CoverageReport(overall_branch_coverage=80.0, phases_tracked=["doctest"])
220
+ >>> r2 = CoverageReport(overall_branch_coverage=70.0, phases_tracked=["hypothesis"])
221
+ >>> merged = merge_coverage_reports([r1, r2])
222
+ >>> "doctest" in merged.phases_tracked
223
+ True
224
+ >>> "hypothesis" in merged.phases_tracked
225
+ True
226
+ """
227
+ valid_reports = [r for r in reports if r is not None]
228
+
229
+ if not valid_reports:
230
+ return CoverageReport(overall_branch_coverage=0.0)
231
+
232
+ if len(valid_reports) == 1:
233
+ return valid_reports[0]
234
+
235
+ # Merge phases tracked
236
+ all_phases: list[str] = []
237
+ for r in valid_reports:
238
+ all_phases.extend(r.phases_tracked)
239
+
240
+ # Merge file coverages - take the max coverage for each file
241
+ merged_files: dict[str, FileCoverage] = {}
242
+ for r in valid_reports:
243
+ for path, fc in r.files.items():
244
+ if path not in merged_files or fc.branch_coverage > merged_files[path].branch_coverage:
245
+ merged_files[path] = fc
246
+
247
+ # Calculate overall as average of file coverages (weighted would be better but needs LOC)
248
+ if merged_files:
249
+ overall = sum(fc.branch_coverage for fc in merged_files.values()) / len(merged_files)
250
+ else:
251
+ overall = 0.0
252
+
253
+ return CoverageReport(
254
+ overall_branch_coverage=round(overall, 1),
255
+ files=merged_files,
256
+ phases_tracked=all_phases,
257
+ )
258
+
259
+
260
+ # @shell_orchestration: Format report for Rich console output
261
+ @pre(lambda report: isinstance(report, CoverageReport))
262
+ @post(lambda result: isinstance(result, str))
263
+ def format_coverage_output(report: CoverageReport) -> str:
264
+ """Format coverage report for CLI output.
265
+
266
+ Args:
267
+ report: CoverageReport to format
268
+
269
+ Returns:
270
+ Formatted string for terminal output
271
+
272
+ Examples:
273
+ >>> report = CoverageReport(overall_branch_coverage=91.2, phases_tracked=["doctest"])
274
+ >>> output = format_coverage_output(report)
275
+ >>> "91.2%" in output
276
+ True
277
+ >>> "doctest" in output
278
+ True
279
+ """
280
+ lines = [
281
+ f"Coverage Analysis ({' + '.join(report.phases_tracked)}):",
282
+ ]
283
+
284
+ # Sort files by coverage (lowest first to highlight issues)
285
+ sorted_files = sorted(report.files.items(), key=lambda x: x[1].branch_coverage)
286
+
287
+ for path, fc in sorted_files[:10]: # Limit to 10 files
288
+ uncovered_count = len(fc.uncovered_branches)
289
+ lines.append(f" {path}: {fc.branch_coverage}% branch ({uncovered_count} uncovered)")
290
+ for branch in fc.uncovered_branches[:3]: # Limit to 3 branches per file
291
+ lines.append(f" Line {branch.line}: {branch.context}")
292
+
293
+ lines.append("")
294
+ lines.append(f"Overall: {report.overall_branch_coverage}% branch coverage ({' + '.join(report.phases_tracked)})")
295
+ lines.append("")
296
+ lines.append("Note: CrossHair uses symbolic execution; coverage not applicable.")
297
+
298
+ return "\n".join(lines)
299
+
300
+
301
+ # @shell_orchestration: Format report for JSON agent output
302
+ @post(lambda result: isinstance(result, dict))
303
+ def format_coverage_json(report: CoverageReport) -> dict:
304
+ """Format coverage report for JSON output.
305
+
306
+ Args:
307
+ report: CoverageReport to format
308
+
309
+ Returns:
310
+ Dictionary for JSON serialization
311
+
312
+ Examples:
313
+ >>> report = CoverageReport(overall_branch_coverage=91.2, phases_tracked=["doctest"])
314
+ >>> data = format_coverage_json(report)
315
+ >>> data["enabled"]
316
+ True
317
+ >>> data["overall_branch_coverage"]
318
+ 91.2
319
+ """
320
+ return {
321
+ "enabled": True,
322
+ "phases_tracked": report.phases_tracked,
323
+ "phases_excluded": report.phases_excluded,
324
+ "overall_branch_coverage": report.overall_branch_coverage,
325
+ "files": [
326
+ {
327
+ "path": fc.path,
328
+ "branch_coverage": fc.branch_coverage,
329
+ "uncovered_branches": [
330
+ {"line": b.line, "type": b.branch_type, "context": b.context}
331
+ for b in fc.uncovered_branches
332
+ ],
333
+ }
334
+ for fc in report.files.values()
335
+ ],
336
+ }
337
+
338
+
339
+ def check_coverage_available() -> Result[bool, str]:
340
+ """Check if coverage.py is installed and return helpful error if not.
341
+
342
+ Returns:
343
+ Success(True) if available, Failure with install instructions if not
344
+
345
+ Examples:
346
+ >>> result = check_coverage_available()
347
+ >>> # Either Success(True) or Failure("Install coverage...")
348
+ """
349
+ if _is_coverage_available():
350
+ return Success(True)
351
+ return Failure("Install coverage for --coverage support: pip install coverage[toml]>=7.0")
invar/shell/fs.py CHANGED
@@ -19,6 +19,7 @@ if TYPE_CHECKING:
19
19
  from pathlib import Path
20
20
 
21
21
 
22
+ # @shell_complexity: Recursive file discovery with gitignore and exclusions
22
23
  def discover_python_files(
23
24
  project_root: Path,
24
25
  exclude_patterns: list[str] | None = None,
@@ -52,6 +53,7 @@ def discover_python_files(
52
53
  yield py_file
53
54
 
54
55
 
56
+ # @shell_complexity: File reading with AST parsing and error handling
55
57
  def read_and_parse_file(file_path: Path, project_root: Path) -> Result[FileInfo, str]:
56
58
  """
57
59
  Read a Python file and parse it into FileInfo.
@@ -79,8 +81,8 @@ def read_and_parse_file(file_path: Path, project_root: Path) -> Result[FileInfo,
79
81
  if file_info is None:
80
82
  return Failure(f"Syntax error in {file_path}")
81
83
 
82
- # Classify as Core or Shell based on patterns and paths
83
- classify_result = classify_file(relative_path, project_root)
84
+ # Classify as Core or Shell based on patterns, paths, and content (DX-22 Part 5)
85
+ classify_result = classify_file(relative_path, project_root, file_info.source)
84
86
  file_info.is_core, file_info.is_shell = (
85
87
  classify_result.unwrap() if isinstance(classify_result, Success) else (False, False)
86
88
  )
@@ -88,6 +90,7 @@ def read_and_parse_file(file_path: Path, project_root: Path) -> Result[FileInfo,
88
90
  return Success(file_info)
89
91
 
90
92
 
93
+ # @shell_complexity: Project scanning with exclusions and error handling
91
94
  def scan_project(
92
95
  project_root: Path,
93
96
  only_files: set[Path] | None = None,