specfact-cli 0.4.2__py3-none-any.whl → 0.6.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- specfact_cli/__init__.py +1 -1
- specfact_cli/agents/analyze_agent.py +2 -3
- specfact_cli/analyzers/__init__.py +2 -1
- specfact_cli/analyzers/ambiguity_scanner.py +601 -0
- specfact_cli/analyzers/code_analyzer.py +462 -30
- specfact_cli/analyzers/constitution_evidence_extractor.py +491 -0
- specfact_cli/analyzers/contract_extractor.py +419 -0
- specfact_cli/analyzers/control_flow_analyzer.py +281 -0
- specfact_cli/analyzers/requirement_extractor.py +337 -0
- specfact_cli/analyzers/test_pattern_extractor.py +330 -0
- specfact_cli/cli.py +151 -206
- specfact_cli/commands/constitution.py +281 -0
- specfact_cli/commands/enforce.py +42 -34
- specfact_cli/commands/import_cmd.py +481 -152
- specfact_cli/commands/init.py +224 -55
- specfact_cli/commands/plan.py +2133 -547
- specfact_cli/commands/repro.py +100 -78
- specfact_cli/commands/sync.py +701 -186
- specfact_cli/enrichers/constitution_enricher.py +765 -0
- specfact_cli/enrichers/plan_enricher.py +294 -0
- specfact_cli/importers/speckit_converter.py +364 -48
- specfact_cli/importers/speckit_scanner.py +65 -0
- specfact_cli/models/plan.py +42 -0
- specfact_cli/resources/mappings/node-async.yaml +49 -0
- specfact_cli/resources/mappings/python-async.yaml +47 -0
- specfact_cli/resources/mappings/speckit-default.yaml +82 -0
- specfact_cli/resources/prompts/specfact-enforce.md +185 -0
- specfact_cli/resources/prompts/specfact-import-from-code.md +626 -0
- specfact_cli/resources/prompts/specfact-plan-add-feature.md +188 -0
- specfact_cli/resources/prompts/specfact-plan-add-story.md +212 -0
- specfact_cli/resources/prompts/specfact-plan-compare.md +571 -0
- specfact_cli/resources/prompts/specfact-plan-init.md +531 -0
- specfact_cli/resources/prompts/specfact-plan-promote.md +352 -0
- specfact_cli/resources/prompts/specfact-plan-review.md +1276 -0
- specfact_cli/resources/prompts/specfact-plan-select.md +401 -0
- specfact_cli/resources/prompts/specfact-plan-update-feature.md +242 -0
- specfact_cli/resources/prompts/specfact-plan-update-idea.md +211 -0
- specfact_cli/resources/prompts/specfact-repro.md +268 -0
- specfact_cli/resources/prompts/specfact-sync.md +497 -0
- specfact_cli/resources/schemas/deviation.schema.json +61 -0
- specfact_cli/resources/schemas/plan.schema.json +204 -0
- specfact_cli/resources/schemas/protocol.schema.json +53 -0
- specfact_cli/resources/templates/github-action.yml.j2 +140 -0
- specfact_cli/resources/templates/plan.bundle.yaml.j2 +141 -0
- specfact_cli/resources/templates/pr-template.md.j2 +58 -0
- specfact_cli/resources/templates/protocol.yaml.j2 +24 -0
- specfact_cli/resources/templates/telemetry.yaml.example +35 -0
- specfact_cli/sync/__init__.py +10 -1
- specfact_cli/sync/watcher.py +268 -0
- specfact_cli/telemetry.py +440 -0
- specfact_cli/utils/acceptance_criteria.py +127 -0
- specfact_cli/utils/enrichment_parser.py +445 -0
- specfact_cli/utils/feature_keys.py +12 -3
- specfact_cli/utils/ide_setup.py +170 -0
- specfact_cli/utils/structure.py +179 -2
- specfact_cli/utils/yaml_utils.py +33 -0
- specfact_cli/validators/repro_checker.py +22 -1
- specfact_cli/validators/schema.py +15 -4
- specfact_cli-0.6.8.dist-info/METADATA +456 -0
- specfact_cli-0.6.8.dist-info/RECORD +99 -0
- {specfact_cli-0.4.2.dist-info → specfact_cli-0.6.8.dist-info}/entry_points.txt +1 -0
- specfact_cli-0.6.8.dist-info/licenses/LICENSE.md +202 -0
- specfact_cli-0.4.2.dist-info/METADATA +0 -370
- specfact_cli-0.4.2.dist-info/RECORD +0 -62
- specfact_cli-0.4.2.dist-info/licenses/LICENSE.md +0 -61
- {specfact_cli-0.4.2.dist-info → specfact_cli-0.6.8.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,491 @@
|
|
|
1
|
+
"""Constitution evidence extractor for extracting evidence-based constitution checklist from code patterns.
|
|
2
|
+
|
|
3
|
+
Extracts evidence from code patterns to determine PASS/FAIL status for Articles VII, VIII, and IX
|
|
4
|
+
of the Spec-Kit constitution, generating rationale based on concrete evidence from the codebase.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import ast
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
from beartype import beartype
|
|
14
|
+
from icontract import ensure, require
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class ConstitutionEvidenceExtractor:
|
|
18
|
+
"""
|
|
19
|
+
Extracts evidence-based constitution checklist from code patterns.
|
|
20
|
+
|
|
21
|
+
Analyzes code patterns to determine PASS/FAIL status for:
|
|
22
|
+
- Article VII (Simplicity): Project structure, directory depth, file organization
|
|
23
|
+
- Article VIII (Anti-Abstraction): Framework usage, abstraction layers
|
|
24
|
+
- Article IX (Integration-First): Contract patterns, API definitions, type hints
|
|
25
|
+
|
|
26
|
+
Generates evidence-based status (PASS/FAIL) with rationale, avoiding PENDING status.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
# Framework detection patterns
|
|
30
|
+
FRAMEWORK_IMPORTS = {
|
|
31
|
+
"django": ["django", "django.db", "django.contrib"],
|
|
32
|
+
"flask": ["flask", "flask_sqlalchemy", "flask_restful"],
|
|
33
|
+
"fastapi": ["fastapi", "fastapi.routing", "fastapi.middleware"],
|
|
34
|
+
"sqlalchemy": ["sqlalchemy", "sqlalchemy.orm", "sqlalchemy.ext"],
|
|
35
|
+
"pydantic": ["pydantic", "pydantic.v1", "pydantic.v2"],
|
|
36
|
+
"tortoise": ["tortoise", "tortoise.models", "tortoise.fields"],
|
|
37
|
+
"peewee": ["peewee"],
|
|
38
|
+
"sqlmodel": ["sqlmodel"],
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
# Contract decorator patterns
|
|
42
|
+
CONTRACT_DECORATORS = ["@icontract", "@require", "@ensure", "@invariant", "@beartype"]
|
|
43
|
+
|
|
44
|
+
# Thresholds for Article VII (Simplicity)
|
|
45
|
+
MAX_DIRECTORY_DEPTH = 4 # PASS if depth <= 4, FAIL if depth > 4
|
|
46
|
+
MAX_FILES_PER_DIRECTORY = 20 # PASS if files <= 20, FAIL if files > 20
|
|
47
|
+
|
|
48
|
+
# Thresholds for Article VIII (Anti-Abstraction)
|
|
49
|
+
MAX_ABSTRACTION_LAYERS = 2 # PASS if layers <= 2, FAIL if layers > 2
|
|
50
|
+
|
|
51
|
+
# Thresholds for Article IX (Integration-First)
|
|
52
|
+
MIN_CONTRACT_COVERAGE = 0.1 # PASS if >= 10% of functions have contracts, FAIL if < 10%
|
|
53
|
+
|
|
54
|
+
@beartype
|
|
55
|
+
def __init__(self, repo_path: Path) -> None:
|
|
56
|
+
"""
|
|
57
|
+
Initialize constitution evidence extractor.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
repo_path: Path to repository root for analysis
|
|
61
|
+
"""
|
|
62
|
+
self.repo_path = Path(repo_path)
|
|
63
|
+
|
|
64
|
+
@beartype
|
|
65
|
+
@require(lambda repo_path: repo_path is None or repo_path.exists(), "Repository path must exist if provided")
|
|
66
|
+
@ensure(lambda result: isinstance(result, dict), "Must return dict")
|
|
67
|
+
def extract_article_vii_evidence(self, repo_path: Path | None = None) -> dict[str, Any]:
|
|
68
|
+
"""
|
|
69
|
+
Extract Article VII (Simplicity) evidence from project structure.
|
|
70
|
+
|
|
71
|
+
Analyzes:
|
|
72
|
+
- Directory depth (shallow = PASS, deep = FAIL)
|
|
73
|
+
- Files per directory (few = PASS, many = FAIL)
|
|
74
|
+
- File naming patterns (consistent = PASS, inconsistent = FAIL)
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
repo_path: Path to repository (default: self.repo_path)
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
Dictionary with status, rationale, and evidence
|
|
81
|
+
"""
|
|
82
|
+
if repo_path is None:
|
|
83
|
+
repo_path = self.repo_path
|
|
84
|
+
|
|
85
|
+
repo_path = Path(repo_path)
|
|
86
|
+
if not repo_path.exists():
|
|
87
|
+
return {
|
|
88
|
+
"status": "FAIL",
|
|
89
|
+
"rationale": "Repository path does not exist",
|
|
90
|
+
"evidence": [],
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
# Analyze directory structure
|
|
94
|
+
max_depth = 0
|
|
95
|
+
max_files_per_dir = 0
|
|
96
|
+
total_dirs = 0
|
|
97
|
+
total_files = 0
|
|
98
|
+
evidence: list[str] = []
|
|
99
|
+
|
|
100
|
+
def analyze_directory(path: Path, depth: int = 0) -> None:
|
|
101
|
+
"""Recursively analyze directory structure."""
|
|
102
|
+
nonlocal max_depth, max_files_per_dir, total_dirs, total_files
|
|
103
|
+
|
|
104
|
+
if depth > max_depth:
|
|
105
|
+
max_depth = depth
|
|
106
|
+
|
|
107
|
+
# Count files in this directory (excluding hidden and common ignore patterns)
|
|
108
|
+
files = [
|
|
109
|
+
f
|
|
110
|
+
for f in path.iterdir()
|
|
111
|
+
if f.is_file()
|
|
112
|
+
and not f.name.startswith(".")
|
|
113
|
+
and f.suffix in (".py", ".md", ".yaml", ".yml", ".toml", ".json")
|
|
114
|
+
]
|
|
115
|
+
file_count = len(files)
|
|
116
|
+
|
|
117
|
+
if file_count > max_files_per_dir:
|
|
118
|
+
max_files_per_dir = file_count
|
|
119
|
+
evidence.append(f"Directory {path.relative_to(repo_path)} has {file_count} files")
|
|
120
|
+
|
|
121
|
+
total_dirs += 1
|
|
122
|
+
total_files += file_count
|
|
123
|
+
|
|
124
|
+
# Recurse into subdirectories (limit depth to avoid infinite recursion)
|
|
125
|
+
if depth < 10: # Safety limit
|
|
126
|
+
for subdir in path.iterdir():
|
|
127
|
+
if (
|
|
128
|
+
subdir.is_dir()
|
|
129
|
+
and not subdir.name.startswith(".")
|
|
130
|
+
and subdir.name not in ("__pycache__", "node_modules", ".git")
|
|
131
|
+
):
|
|
132
|
+
analyze_directory(subdir, depth + 1)
|
|
133
|
+
|
|
134
|
+
# Start analysis from repo root
|
|
135
|
+
analyze_directory(repo_path, 0)
|
|
136
|
+
|
|
137
|
+
# Determine status based on thresholds
|
|
138
|
+
depth_pass = max_depth <= self.MAX_DIRECTORY_DEPTH
|
|
139
|
+
files_pass = max_files_per_dir <= self.MAX_FILES_PER_DIRECTORY
|
|
140
|
+
|
|
141
|
+
if depth_pass and files_pass:
|
|
142
|
+
status = "PASS"
|
|
143
|
+
rationale = (
|
|
144
|
+
f"Project has simple structure (max depth: {max_depth}, max files per directory: {max_files_per_dir})"
|
|
145
|
+
)
|
|
146
|
+
else:
|
|
147
|
+
status = "FAIL"
|
|
148
|
+
issues = []
|
|
149
|
+
if not depth_pass:
|
|
150
|
+
issues.append(
|
|
151
|
+
f"deep directory structure (max depth: {max_depth}, threshold: {self.MAX_DIRECTORY_DEPTH})"
|
|
152
|
+
)
|
|
153
|
+
if not files_pass:
|
|
154
|
+
issues.append(
|
|
155
|
+
f"many files per directory (max: {max_files_per_dir}, threshold: {self.MAX_FILES_PER_DIRECTORY})"
|
|
156
|
+
)
|
|
157
|
+
rationale = f"Project violates simplicity: {', '.join(issues)}"
|
|
158
|
+
|
|
159
|
+
return {
|
|
160
|
+
"status": status,
|
|
161
|
+
"rationale": rationale,
|
|
162
|
+
"evidence": evidence[:5], # Limit to top 5 evidence items
|
|
163
|
+
"max_depth": max_depth,
|
|
164
|
+
"max_files_per_dir": max_files_per_dir,
|
|
165
|
+
"total_dirs": total_dirs,
|
|
166
|
+
"total_files": total_files,
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
@beartype
|
|
170
|
+
@require(lambda repo_path: repo_path is None or repo_path.exists(), "Repository path must exist if provided")
|
|
171
|
+
@ensure(lambda result: isinstance(result, dict), "Must return dict")
|
|
172
|
+
def extract_article_viii_evidence(self, repo_path: Path | None = None) -> dict[str, Any]:
|
|
173
|
+
"""
|
|
174
|
+
Extract Article VIII (Anti-Abstraction) evidence from framework usage.
|
|
175
|
+
|
|
176
|
+
Analyzes:
|
|
177
|
+
- Framework imports (Django, Flask, FastAPI, etc.)
|
|
178
|
+
- Abstraction layers (ORM, middleware, wrappers)
|
|
179
|
+
- Framework-specific patterns
|
|
180
|
+
|
|
181
|
+
Args:
|
|
182
|
+
repo_path: Path to repository (default: self.repo_path)
|
|
183
|
+
|
|
184
|
+
Returns:
|
|
185
|
+
Dictionary with status, rationale, and evidence
|
|
186
|
+
"""
|
|
187
|
+
if repo_path is None:
|
|
188
|
+
repo_path = self.repo_path
|
|
189
|
+
|
|
190
|
+
repo_path = Path(repo_path)
|
|
191
|
+
if not repo_path.exists():
|
|
192
|
+
return {
|
|
193
|
+
"status": "FAIL",
|
|
194
|
+
"rationale": "Repository path does not exist",
|
|
195
|
+
"evidence": [],
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
frameworks_detected: set[str] = set()
|
|
199
|
+
abstraction_layers = 0
|
|
200
|
+
evidence: list[str] = []
|
|
201
|
+
total_imports = 0
|
|
202
|
+
|
|
203
|
+
# Scan Python files for framework imports
|
|
204
|
+
for py_file in repo_path.rglob("*.py"):
|
|
205
|
+
if py_file.name.startswith(".") or "__pycache__" in str(py_file):
|
|
206
|
+
continue
|
|
207
|
+
|
|
208
|
+
try:
|
|
209
|
+
content = py_file.read_text(encoding="utf-8")
|
|
210
|
+
tree = ast.parse(content, filename=str(py_file))
|
|
211
|
+
|
|
212
|
+
for node in ast.walk(tree):
|
|
213
|
+
if isinstance(node, ast.Import):
|
|
214
|
+
for alias in node.names:
|
|
215
|
+
import_name = alias.name.split(".")[0]
|
|
216
|
+
total_imports += 1
|
|
217
|
+
|
|
218
|
+
# Check for framework imports
|
|
219
|
+
for framework, patterns in self.FRAMEWORK_IMPORTS.items():
|
|
220
|
+
if any(pattern.startswith(import_name) for pattern in patterns):
|
|
221
|
+
frameworks_detected.add(framework)
|
|
222
|
+
evidence.append(
|
|
223
|
+
f"Framework '{framework}' detected in {py_file.relative_to(repo_path)}"
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
elif isinstance(node, ast.ImportFrom) and node.module:
|
|
227
|
+
module_name = node.module.split(".")[0]
|
|
228
|
+
total_imports += 1
|
|
229
|
+
|
|
230
|
+
# Check for framework imports
|
|
231
|
+
for framework, patterns in self.FRAMEWORK_IMPORTS.items():
|
|
232
|
+
if any(pattern.startswith(module_name) for pattern in patterns):
|
|
233
|
+
frameworks_detected.add(framework)
|
|
234
|
+
evidence.append(f"Framework '{framework}' detected in {py_file.relative_to(repo_path)}")
|
|
235
|
+
|
|
236
|
+
# Detect abstraction layers (ORM usage, middleware, wrappers)
|
|
237
|
+
if isinstance(node, ast.ClassDef):
|
|
238
|
+
# Check for ORM patterns (Model classes, Base classes)
|
|
239
|
+
for base in node.bases:
|
|
240
|
+
if isinstance(base, ast.Name) and ("Model" in base.id or "Base" in base.id):
|
|
241
|
+
abstraction_layers += 1
|
|
242
|
+
evidence.append(f"ORM pattern detected in {py_file.relative_to(repo_path)}: {base.id}")
|
|
243
|
+
|
|
244
|
+
except (SyntaxError, UnicodeDecodeError):
|
|
245
|
+
# Skip files with syntax errors or encoding issues
|
|
246
|
+
continue
|
|
247
|
+
|
|
248
|
+
# Determine status
|
|
249
|
+
# PASS if no frameworks or minimal abstraction, FAIL if heavy framework usage
|
|
250
|
+
if not frameworks_detected and abstraction_layers <= self.MAX_ABSTRACTION_LAYERS:
|
|
251
|
+
status = "PASS"
|
|
252
|
+
rationale = "No framework abstractions detected (direct library usage)"
|
|
253
|
+
else:
|
|
254
|
+
status = "FAIL"
|
|
255
|
+
issues = []
|
|
256
|
+
if frameworks_detected:
|
|
257
|
+
issues.append(f"framework abstractions detected ({', '.join(frameworks_detected)})")
|
|
258
|
+
if abstraction_layers > self.MAX_ABSTRACTION_LAYERS:
|
|
259
|
+
issues.append(
|
|
260
|
+
f"too many abstraction layers ({abstraction_layers}, threshold: {self.MAX_ABSTRACTION_LAYERS})"
|
|
261
|
+
)
|
|
262
|
+
rationale = f"Project violates anti-abstraction: {', '.join(issues)}"
|
|
263
|
+
|
|
264
|
+
return {
|
|
265
|
+
"status": status,
|
|
266
|
+
"rationale": rationale,
|
|
267
|
+
"evidence": evidence[:5], # Limit to top 5 evidence items
|
|
268
|
+
"frameworks_detected": list(frameworks_detected),
|
|
269
|
+
"abstraction_layers": abstraction_layers,
|
|
270
|
+
"total_imports": total_imports,
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
@beartype
|
|
274
|
+
@require(lambda repo_path: repo_path is None or repo_path.exists(), "Repository path must exist if provided")
|
|
275
|
+
@ensure(lambda result: isinstance(result, dict), "Must return dict")
|
|
276
|
+
def extract_article_ix_evidence(self, repo_path: Path | None = None) -> dict[str, Any]:
|
|
277
|
+
"""
|
|
278
|
+
Extract Article IX (Integration-First) evidence from contract patterns.
|
|
279
|
+
|
|
280
|
+
Analyzes:
|
|
281
|
+
- Contract decorators (@icontract, @require, @ensure)
|
|
282
|
+
- API definitions (OpenAPI, JSON Schema, Pydantic models)
|
|
283
|
+
- Type hints (comprehensive = PASS, minimal = FAIL)
|
|
284
|
+
|
|
285
|
+
Args:
|
|
286
|
+
repo_path: Path to repository (default: self.repo_path)
|
|
287
|
+
|
|
288
|
+
Returns:
|
|
289
|
+
Dictionary with status, rationale, and evidence
|
|
290
|
+
"""
|
|
291
|
+
if repo_path is None:
|
|
292
|
+
repo_path = self.repo_path
|
|
293
|
+
|
|
294
|
+
repo_path = Path(repo_path)
|
|
295
|
+
if not repo_path.exists():
|
|
296
|
+
return {
|
|
297
|
+
"status": "FAIL",
|
|
298
|
+
"rationale": "Repository path does not exist",
|
|
299
|
+
"evidence": [],
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
contract_decorators_found = 0
|
|
303
|
+
functions_with_type_hints = 0
|
|
304
|
+
total_functions = 0
|
|
305
|
+
pydantic_models = 0
|
|
306
|
+
evidence: list[str] = []
|
|
307
|
+
|
|
308
|
+
# Scan Python files for contract patterns
|
|
309
|
+
for py_file in repo_path.rglob("*.py"):
|
|
310
|
+
if py_file.name.startswith(".") or "__pycache__" in str(py_file):
|
|
311
|
+
continue
|
|
312
|
+
|
|
313
|
+
try:
|
|
314
|
+
content = py_file.read_text(encoding="utf-8")
|
|
315
|
+
tree = ast.parse(content, filename=str(py_file))
|
|
316
|
+
|
|
317
|
+
for node in ast.walk(tree):
|
|
318
|
+
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
|
|
319
|
+
total_functions += 1
|
|
320
|
+
|
|
321
|
+
# Check for type hints
|
|
322
|
+
if node.returns is not None:
|
|
323
|
+
functions_with_type_hints += 1
|
|
324
|
+
|
|
325
|
+
# Check for contract decorators in source code
|
|
326
|
+
for decorator in node.decorator_list:
|
|
327
|
+
if isinstance(decorator, ast.Name):
|
|
328
|
+
decorator_name = decorator.id
|
|
329
|
+
if decorator_name in ("require", "ensure", "invariant", "beartype"):
|
|
330
|
+
contract_decorators_found += 1
|
|
331
|
+
evidence.append(
|
|
332
|
+
f"Contract decorator '@{decorator_name}' found in {py_file.relative_to(repo_path)}:{node.lineno}"
|
|
333
|
+
)
|
|
334
|
+
elif isinstance(decorator, ast.Attribute):
|
|
335
|
+
if isinstance(decorator.value, ast.Name) and decorator.value.id == "icontract":
|
|
336
|
+
contract_decorators_found += 1
|
|
337
|
+
evidence.append(
|
|
338
|
+
f"Contract decorator '@icontract.{decorator.attr}' found in {py_file.relative_to(repo_path)}:{node.lineno}"
|
|
339
|
+
)
|
|
340
|
+
|
|
341
|
+
# Check for Pydantic models
|
|
342
|
+
if isinstance(node, ast.ClassDef):
|
|
343
|
+
for base in node.bases:
|
|
344
|
+
if (isinstance(base, ast.Name) and ("BaseModel" in base.id or "Pydantic" in base.id)) or (
|
|
345
|
+
isinstance(base, ast.Attribute)
|
|
346
|
+
and isinstance(base.value, ast.Name)
|
|
347
|
+
and base.value.id == "pydantic"
|
|
348
|
+
):
|
|
349
|
+
pydantic_models += 1
|
|
350
|
+
evidence.append(
|
|
351
|
+
f"Pydantic model detected in {py_file.relative_to(repo_path)}: {node.name}"
|
|
352
|
+
)
|
|
353
|
+
|
|
354
|
+
except (SyntaxError, UnicodeDecodeError):
|
|
355
|
+
# Skip files with syntax errors or encoding issues
|
|
356
|
+
continue
|
|
357
|
+
|
|
358
|
+
# Calculate contract coverage
|
|
359
|
+
contract_coverage = contract_decorators_found / total_functions if total_functions > 0 else 0.0
|
|
360
|
+
type_hint_coverage = functions_with_type_hints / total_functions if total_functions > 0 else 0.0
|
|
361
|
+
|
|
362
|
+
# Determine status
|
|
363
|
+
# PASS if contracts defined or good type hint coverage, FAIL if minimal contracts
|
|
364
|
+
if (
|
|
365
|
+
contract_decorators_found > 0
|
|
366
|
+
or contract_coverage >= self.MIN_CONTRACT_COVERAGE
|
|
367
|
+
or type_hint_coverage >= 0.5
|
|
368
|
+
):
|
|
369
|
+
status = "PASS"
|
|
370
|
+
if contract_decorators_found > 0:
|
|
371
|
+
rationale = f"Contracts defined using decorators ({contract_decorators_found} functions with contracts)"
|
|
372
|
+
elif type_hint_coverage >= 0.5:
|
|
373
|
+
rationale = f"Good type hint coverage ({type_hint_coverage:.1%} of functions have type hints)"
|
|
374
|
+
else:
|
|
375
|
+
rationale = f"Contract coverage meets threshold ({contract_coverage:.1%})"
|
|
376
|
+
else:
|
|
377
|
+
status = "FAIL"
|
|
378
|
+
rationale = (
|
|
379
|
+
f"No contract definitions detected (0 contracts, {total_functions} functions, "
|
|
380
|
+
f"threshold: {self.MIN_CONTRACT_COVERAGE:.0%} coverage)"
|
|
381
|
+
)
|
|
382
|
+
|
|
383
|
+
return {
|
|
384
|
+
"status": status,
|
|
385
|
+
"rationale": rationale,
|
|
386
|
+
"evidence": evidence[:5], # Limit to top 5 evidence items
|
|
387
|
+
"contract_decorators": contract_decorators_found,
|
|
388
|
+
"functions_with_type_hints": functions_with_type_hints,
|
|
389
|
+
"total_functions": total_functions,
|
|
390
|
+
"pydantic_models": pydantic_models,
|
|
391
|
+
"contract_coverage": contract_coverage,
|
|
392
|
+
"type_hint_coverage": type_hint_coverage,
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
@beartype
|
|
396
|
+
@ensure(lambda result: isinstance(result, dict), "Must return dict")
|
|
397
|
+
def extract_all_evidence(self, repo_path: Path | None = None) -> dict[str, Any]:
|
|
398
|
+
"""
|
|
399
|
+
Extract evidence for all constitution articles.
|
|
400
|
+
|
|
401
|
+
Args:
|
|
402
|
+
repo_path: Path to repository (default: self.repo_path)
|
|
403
|
+
|
|
404
|
+
Returns:
|
|
405
|
+
Dictionary with evidence for all articles
|
|
406
|
+
"""
|
|
407
|
+
if repo_path is None:
|
|
408
|
+
repo_path = self.repo_path
|
|
409
|
+
|
|
410
|
+
return {
|
|
411
|
+
"article_vii": self.extract_article_vii_evidence(repo_path),
|
|
412
|
+
"article_viii": self.extract_article_viii_evidence(repo_path),
|
|
413
|
+
"article_ix": self.extract_article_ix_evidence(repo_path),
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
@beartype
|
|
417
|
+
@require(lambda evidence: isinstance(evidence, dict), "Evidence must be dict")
|
|
418
|
+
@ensure(lambda result: isinstance(result, str), "Must return string")
|
|
419
|
+
def generate_constitution_check_section(self, evidence: dict[str, Any]) -> str:
|
|
420
|
+
"""
|
|
421
|
+
Generate constitution check section markdown from evidence.
|
|
422
|
+
|
|
423
|
+
Args:
|
|
424
|
+
evidence: Dictionary with evidence for all articles (from extract_all_evidence)
|
|
425
|
+
|
|
426
|
+
Returns:
|
|
427
|
+
Markdown string for constitution check section
|
|
428
|
+
"""
|
|
429
|
+
lines = ["## Constitution Check", ""]
|
|
430
|
+
|
|
431
|
+
# Article VII: Simplicity
|
|
432
|
+
article_vii = evidence.get("article_vii", {})
|
|
433
|
+
status_vii = article_vii.get("status", "FAIL")
|
|
434
|
+
rationale_vii = article_vii.get("rationale", "Evidence extraction failed")
|
|
435
|
+
evidence_vii = article_vii.get("evidence", [])
|
|
436
|
+
|
|
437
|
+
lines.append("**Article VII (Simplicity)**:")
|
|
438
|
+
if status_vii == "PASS":
|
|
439
|
+
lines.append(f"- [x] {rationale_vii}")
|
|
440
|
+
else:
|
|
441
|
+
lines.append(f"- [ ] {rationale_vii}")
|
|
442
|
+
if evidence_vii:
|
|
443
|
+
lines.append("")
|
|
444
|
+
lines.append(" **Evidence:**")
|
|
445
|
+
for ev in evidence_vii:
|
|
446
|
+
lines.append(f" - {ev}")
|
|
447
|
+
lines.append("")
|
|
448
|
+
|
|
449
|
+
# Article VIII: Anti-Abstraction
|
|
450
|
+
article_viii = evidence.get("article_viii", {})
|
|
451
|
+
status_viii = article_viii.get("status", "FAIL")
|
|
452
|
+
rationale_viii = article_viii.get("rationale", "Evidence extraction failed")
|
|
453
|
+
evidence_viii = article_viii.get("evidence", [])
|
|
454
|
+
|
|
455
|
+
lines.append("**Article VIII (Anti-Abstraction)**:")
|
|
456
|
+
if status_viii == "PASS":
|
|
457
|
+
lines.append(f"- [x] {rationale_viii}")
|
|
458
|
+
else:
|
|
459
|
+
lines.append(f"- [ ] {rationale_viii}")
|
|
460
|
+
if evidence_viii:
|
|
461
|
+
lines.append("")
|
|
462
|
+
lines.append(" **Evidence:**")
|
|
463
|
+
for ev in evidence_viii:
|
|
464
|
+
lines.append(f" - {ev}")
|
|
465
|
+
lines.append("")
|
|
466
|
+
|
|
467
|
+
# Article IX: Integration-First
|
|
468
|
+
article_ix = evidence.get("article_ix", {})
|
|
469
|
+
status_ix = article_ix.get("status", "FAIL")
|
|
470
|
+
rationale_ix = article_ix.get("rationale", "Evidence extraction failed")
|
|
471
|
+
evidence_ix = article_ix.get("evidence", [])
|
|
472
|
+
|
|
473
|
+
lines.append("**Article IX (Integration-First)**:")
|
|
474
|
+
if status_ix == "PASS":
|
|
475
|
+
lines.append(f"- [x] {rationale_ix}")
|
|
476
|
+
else:
|
|
477
|
+
lines.append(f"- [ ] {rationale_ix}")
|
|
478
|
+
if evidence_ix:
|
|
479
|
+
lines.append("")
|
|
480
|
+
lines.append(" **Evidence:**")
|
|
481
|
+
for ev in evidence_ix:
|
|
482
|
+
lines.append(f" - {ev}")
|
|
483
|
+
lines.append("")
|
|
484
|
+
|
|
485
|
+
# Overall status (PASS if all articles PASS, otherwise FAIL)
|
|
486
|
+
all_pass = all(evidence.get(f"article_{roman}", {}).get("status") == "PASS" for roman in ["vii", "viii", "ix"])
|
|
487
|
+
overall_status = "PASS" if all_pass else "FAIL"
|
|
488
|
+
lines.append(f"**Status**: {overall_status}")
|
|
489
|
+
lines.append("")
|
|
490
|
+
|
|
491
|
+
return "\n".join(lines)
|