specfact-cli 0.4.2__py3-none-any.whl → 0.6.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- specfact_cli/__init__.py +1 -1
- specfact_cli/agents/analyze_agent.py +2 -3
- specfact_cli/analyzers/__init__.py +2 -1
- specfact_cli/analyzers/ambiguity_scanner.py +601 -0
- specfact_cli/analyzers/code_analyzer.py +462 -30
- specfact_cli/analyzers/constitution_evidence_extractor.py +491 -0
- specfact_cli/analyzers/contract_extractor.py +419 -0
- specfact_cli/analyzers/control_flow_analyzer.py +281 -0
- specfact_cli/analyzers/requirement_extractor.py +337 -0
- specfact_cli/analyzers/test_pattern_extractor.py +330 -0
- specfact_cli/cli.py +151 -206
- specfact_cli/commands/constitution.py +281 -0
- specfact_cli/commands/enforce.py +42 -34
- specfact_cli/commands/import_cmd.py +481 -152
- specfact_cli/commands/init.py +224 -55
- specfact_cli/commands/plan.py +2133 -547
- specfact_cli/commands/repro.py +100 -78
- specfact_cli/commands/sync.py +701 -186
- specfact_cli/enrichers/constitution_enricher.py +765 -0
- specfact_cli/enrichers/plan_enricher.py +294 -0
- specfact_cli/importers/speckit_converter.py +364 -48
- specfact_cli/importers/speckit_scanner.py +65 -0
- specfact_cli/models/plan.py +42 -0
- specfact_cli/resources/mappings/node-async.yaml +49 -0
- specfact_cli/resources/mappings/python-async.yaml +47 -0
- specfact_cli/resources/mappings/speckit-default.yaml +82 -0
- specfact_cli/resources/prompts/specfact-enforce.md +185 -0
- specfact_cli/resources/prompts/specfact-import-from-code.md +626 -0
- specfact_cli/resources/prompts/specfact-plan-add-feature.md +188 -0
- specfact_cli/resources/prompts/specfact-plan-add-story.md +212 -0
- specfact_cli/resources/prompts/specfact-plan-compare.md +571 -0
- specfact_cli/resources/prompts/specfact-plan-init.md +531 -0
- specfact_cli/resources/prompts/specfact-plan-promote.md +352 -0
- specfact_cli/resources/prompts/specfact-plan-review.md +1276 -0
- specfact_cli/resources/prompts/specfact-plan-select.md +401 -0
- specfact_cli/resources/prompts/specfact-plan-update-feature.md +242 -0
- specfact_cli/resources/prompts/specfact-plan-update-idea.md +211 -0
- specfact_cli/resources/prompts/specfact-repro.md +268 -0
- specfact_cli/resources/prompts/specfact-sync.md +497 -0
- specfact_cli/resources/schemas/deviation.schema.json +61 -0
- specfact_cli/resources/schemas/plan.schema.json +204 -0
- specfact_cli/resources/schemas/protocol.schema.json +53 -0
- specfact_cli/resources/templates/github-action.yml.j2 +140 -0
- specfact_cli/resources/templates/plan.bundle.yaml.j2 +141 -0
- specfact_cli/resources/templates/pr-template.md.j2 +58 -0
- specfact_cli/resources/templates/protocol.yaml.j2 +24 -0
- specfact_cli/resources/templates/telemetry.yaml.example +35 -0
- specfact_cli/sync/__init__.py +10 -1
- specfact_cli/sync/watcher.py +268 -0
- specfact_cli/telemetry.py +440 -0
- specfact_cli/utils/acceptance_criteria.py +127 -0
- specfact_cli/utils/enrichment_parser.py +445 -0
- specfact_cli/utils/feature_keys.py +12 -3
- specfact_cli/utils/ide_setup.py +170 -0
- specfact_cli/utils/structure.py +179 -2
- specfact_cli/utils/yaml_utils.py +33 -0
- specfact_cli/validators/repro_checker.py +22 -1
- specfact_cli/validators/schema.py +15 -4
- specfact_cli-0.6.8.dist-info/METADATA +456 -0
- specfact_cli-0.6.8.dist-info/RECORD +99 -0
- {specfact_cli-0.4.2.dist-info → specfact_cli-0.6.8.dist-info}/entry_points.txt +1 -0
- specfact_cli-0.6.8.dist-info/licenses/LICENSE.md +202 -0
- specfact_cli-0.4.2.dist-info/METADATA +0 -370
- specfact_cli-0.4.2.dist-info/RECORD +0 -62
- specfact_cli-0.4.2.dist-info/licenses/LICENSE.md +0 -61
- {specfact_cli-0.4.2.dist-info → specfact_cli-0.6.8.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,765 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Constitution enricher for automatic bootstrap and enrichment of project constitutions.
|
|
3
|
+
|
|
4
|
+
This module provides automatic constitution generation and enrichment capabilities
|
|
5
|
+
that analyze repository context to create bootstrap templates for review and adjustment.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import re
|
|
11
|
+
from datetime import date
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Any
|
|
14
|
+
|
|
15
|
+
from beartype import beartype
|
|
16
|
+
from icontract import ensure, require
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class ConstitutionEnricher:
|
|
20
|
+
"""
|
|
21
|
+
Enricher for automatically generating and enriching project constitutions.
|
|
22
|
+
|
|
23
|
+
Analyzes repository context (README, pyproject.toml, .cursor/rules/, docs/rules/)
|
|
24
|
+
to extract project metadata, development principles, and quality standards,
|
|
25
|
+
then generates a bootstrap constitution template ready for review and adjustment.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
@beartype
|
|
29
|
+
@require(lambda repo_path: isinstance(repo_path, Path), "Repository path must be Path")
|
|
30
|
+
@require(lambda repo_path: repo_path.exists(), "Repository path must exist")
|
|
31
|
+
@ensure(lambda result: isinstance(result, dict), "Must return dict with analysis results")
|
|
32
|
+
def analyze_repository(self, repo_path: Path) -> dict[str, Any]:
|
|
33
|
+
"""
|
|
34
|
+
Analyze repository and extract constitution metadata.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
repo_path: Path to repository root
|
|
38
|
+
|
|
39
|
+
Returns:
|
|
40
|
+
Dictionary with analysis results (project_name, description, principles, etc.)
|
|
41
|
+
"""
|
|
42
|
+
analysis: dict[str, Any] = {
|
|
43
|
+
"project_name": "",
|
|
44
|
+
"description": "",
|
|
45
|
+
"target_users": [],
|
|
46
|
+
"technology_stack": [],
|
|
47
|
+
"principles": [],
|
|
48
|
+
"quality_standards": [],
|
|
49
|
+
"development_workflow": [],
|
|
50
|
+
"project_type": "auto-detect",
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
# Analyze pyproject.toml or package.json
|
|
54
|
+
pyproject_path = repo_path / "pyproject.toml"
|
|
55
|
+
package_json_path = repo_path / "package.json"
|
|
56
|
+
|
|
57
|
+
if pyproject_path.exists():
|
|
58
|
+
analysis.update(self._analyze_pyproject(pyproject_path))
|
|
59
|
+
elif package_json_path.exists():
|
|
60
|
+
analysis.update(self._analyze_package_json(package_json_path))
|
|
61
|
+
|
|
62
|
+
# Analyze README.md
|
|
63
|
+
readme_path = repo_path / "README.md"
|
|
64
|
+
if readme_path.exists():
|
|
65
|
+
analysis.update(self._analyze_readme(readme_path))
|
|
66
|
+
|
|
67
|
+
# Analyze .cursor/rules/ for development principles
|
|
68
|
+
cursor_rules_dir = repo_path / ".cursor" / "rules"
|
|
69
|
+
if cursor_rules_dir.exists():
|
|
70
|
+
analysis["principles"].extend(self._analyze_cursor_rules(cursor_rules_dir))
|
|
71
|
+
|
|
72
|
+
# Analyze docs/rules/ for quality gates and standards
|
|
73
|
+
docs_rules_dir = repo_path / "docs" / "rules"
|
|
74
|
+
if docs_rules_dir.exists():
|
|
75
|
+
analysis["quality_standards"].extend(self._analyze_docs_rules(docs_rules_dir))
|
|
76
|
+
|
|
77
|
+
# Detect project type
|
|
78
|
+
analysis["project_type"] = self._detect_project_type(repo_path, analysis)
|
|
79
|
+
|
|
80
|
+
return analysis
|
|
81
|
+
|
|
82
|
+
@beartype
|
|
83
|
+
@require(lambda pyproject_path: isinstance(pyproject_path, Path), "Path must be Path")
|
|
84
|
+
@require(lambda pyproject_path: pyproject_path.exists(), "Path must exist")
|
|
85
|
+
@ensure(lambda result: isinstance(result, dict), "Must return dict")
|
|
86
|
+
def _analyze_pyproject(self, pyproject_path: Path) -> dict[str, Any]:
|
|
87
|
+
"""Analyze pyproject.toml for project metadata."""
|
|
88
|
+
result: dict[str, Any] = {
|
|
89
|
+
"project_name": "",
|
|
90
|
+
"description": "",
|
|
91
|
+
"technology_stack": [],
|
|
92
|
+
"python_version": "",
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
try:
|
|
96
|
+
content = pyproject_path.read_text(encoding="utf-8")
|
|
97
|
+
|
|
98
|
+
# Extract project name
|
|
99
|
+
name_match = re.search(r'name\s*=\s*["\']([^"\']+)["\']', content)
|
|
100
|
+
if name_match:
|
|
101
|
+
result["project_name"] = name_match.group(1)
|
|
102
|
+
|
|
103
|
+
# Extract description
|
|
104
|
+
desc_match = re.search(r'description\s*=\s*["\']([^"\']+)["\']', content)
|
|
105
|
+
if desc_match:
|
|
106
|
+
result["description"] = desc_match.group(1)
|
|
107
|
+
|
|
108
|
+
# Extract Python version requirement
|
|
109
|
+
python_match = re.search(r'requires-python\s*=\s*["\']([^"\']+)["\']', content)
|
|
110
|
+
if python_match:
|
|
111
|
+
result["python_version"] = python_match.group(1)
|
|
112
|
+
result["technology_stack"].append(f"Python {python_match.group(1)}")
|
|
113
|
+
|
|
114
|
+
# Extract key dependencies (top 5)
|
|
115
|
+
deps_match = re.search(r"dependencies\s*=\s*\[(.*?)\]", content, re.DOTALL)
|
|
116
|
+
if deps_match:
|
|
117
|
+
deps_content = deps_match.group(1)
|
|
118
|
+
# Extract dependency names
|
|
119
|
+
dep_matches = re.findall(r'["\']([^"\']+)["\']', deps_content)
|
|
120
|
+
# Map common dependencies to technology stack
|
|
121
|
+
tech_mapping = {
|
|
122
|
+
"typer": "Typer (CLI framework)",
|
|
123
|
+
"fastapi": "FastAPI (Web framework)",
|
|
124
|
+
"django": "Django (Web framework)",
|
|
125
|
+
"flask": "Flask (Web framework)",
|
|
126
|
+
"pydantic": "Pydantic (Data validation)",
|
|
127
|
+
"sqlalchemy": "SQLAlchemy (ORM)",
|
|
128
|
+
"icontract": "icontract (Runtime contracts)",
|
|
129
|
+
"beartype": "beartype (Type checking)",
|
|
130
|
+
"crosshair": "CrossHair (Symbolic execution)",
|
|
131
|
+
}
|
|
132
|
+
for dep in dep_matches[:5]:
|
|
133
|
+
if dep in tech_mapping:
|
|
134
|
+
result["technology_stack"].append(tech_mapping[dep])
|
|
135
|
+
elif not any(char in dep for char in [">", "<", "=", "~"]):
|
|
136
|
+
# Simple dependency name without version constraints
|
|
137
|
+
result["technology_stack"].append(dep)
|
|
138
|
+
|
|
139
|
+
except Exception:
|
|
140
|
+
pass # If parsing fails, return empty result
|
|
141
|
+
|
|
142
|
+
return result
|
|
143
|
+
|
|
144
|
+
@beartype
|
|
145
|
+
@require(lambda package_json_path: isinstance(package_json_path, Path), "Path must be Path")
|
|
146
|
+
@require(lambda package_json_path: package_json_path.exists(), "Path must exist")
|
|
147
|
+
@ensure(lambda result: isinstance(result, dict), "Must return dict")
|
|
148
|
+
def _analyze_package_json(self, package_json_path: Path) -> dict[str, Any]:
|
|
149
|
+
"""Analyze package.json for project metadata."""
|
|
150
|
+
result: dict[str, Any] = {
|
|
151
|
+
"project_name": "",
|
|
152
|
+
"description": "",
|
|
153
|
+
"technology_stack": [],
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
try:
|
|
157
|
+
import json
|
|
158
|
+
|
|
159
|
+
content = json.loads(package_json_path.read_text(encoding="utf-8"))
|
|
160
|
+
|
|
161
|
+
result["project_name"] = content.get("name", "")
|
|
162
|
+
result["description"] = content.get("description", "")
|
|
163
|
+
|
|
164
|
+
# Extract dependencies
|
|
165
|
+
deps = content.get("dependencies", {})
|
|
166
|
+
dev_deps = content.get("devDependencies", {})
|
|
167
|
+
all_deps = {**deps, **dev_deps}
|
|
168
|
+
|
|
169
|
+
# Map common dependencies
|
|
170
|
+
tech_mapping = {
|
|
171
|
+
"react": "React",
|
|
172
|
+
"vue": "Vue.js",
|
|
173
|
+
"typescript": "TypeScript",
|
|
174
|
+
"vite": "Vite",
|
|
175
|
+
"next": "Next.js",
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
for dep in list(all_deps.keys())[:5]:
|
|
179
|
+
if dep in tech_mapping:
|
|
180
|
+
result["technology_stack"].append(tech_mapping[dep])
|
|
181
|
+
else:
|
|
182
|
+
result["technology_stack"].append(dep)
|
|
183
|
+
|
|
184
|
+
except Exception:
|
|
185
|
+
pass
|
|
186
|
+
|
|
187
|
+
return result
|
|
188
|
+
|
|
189
|
+
@beartype
|
|
190
|
+
@require(lambda readme_path: isinstance(readme_path, Path), "Path must be Path")
|
|
191
|
+
@require(lambda readme_path: readme_path.exists(), "Path must exist")
|
|
192
|
+
@ensure(lambda result: isinstance(result, dict), "Must return dict")
|
|
193
|
+
def _analyze_readme(self, readme_path: Path) -> dict[str, Any]:
|
|
194
|
+
"""Analyze README.md for project description and target users."""
|
|
195
|
+
result: dict[str, Any] = {
|
|
196
|
+
"description": "",
|
|
197
|
+
"target_users": [],
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
try:
|
|
201
|
+
content = readme_path.read_text(encoding="utf-8")
|
|
202
|
+
|
|
203
|
+
# Extract first paragraph after title as description
|
|
204
|
+
lines = content.split("\n")
|
|
205
|
+
description_lines = []
|
|
206
|
+
in_description = False
|
|
207
|
+
|
|
208
|
+
for line in lines:
|
|
209
|
+
# Skip title and empty lines
|
|
210
|
+
if line.startswith("# "):
|
|
211
|
+
in_description = True
|
|
212
|
+
continue
|
|
213
|
+
if in_description and line.strip() and not line.startswith("#"):
|
|
214
|
+
description_lines.append(line.strip())
|
|
215
|
+
if len(description_lines) >= 3: # Get first 3 lines
|
|
216
|
+
break
|
|
217
|
+
elif line.startswith("#") and description_lines:
|
|
218
|
+
break
|
|
219
|
+
|
|
220
|
+
if description_lines:
|
|
221
|
+
result["description"] = " ".join(description_lines)
|
|
222
|
+
|
|
223
|
+
# Extract target users from "Perfect for:" or similar patterns
|
|
224
|
+
perfect_for_match = re.search(r"(?:Perfect for|Target users?|For):\s*(.+?)(?:\n|$)", content, re.IGNORECASE)
|
|
225
|
+
if perfect_for_match:
|
|
226
|
+
users_text = perfect_for_match.group(1)
|
|
227
|
+
# Split by commas or semicolons
|
|
228
|
+
users = [u.strip() for u in re.split(r"[,;]", users_text)]
|
|
229
|
+
result["target_users"] = users[:5] # Limit to 5
|
|
230
|
+
|
|
231
|
+
except Exception:
|
|
232
|
+
pass
|
|
233
|
+
|
|
234
|
+
return result
|
|
235
|
+
|
|
236
|
+
@beartype
|
|
237
|
+
@require(lambda rules_dir: isinstance(rules_dir, Path), "Rules directory must be Path")
|
|
238
|
+
@require(lambda rules_dir: rules_dir.exists(), "Rules directory must exist")
|
|
239
|
+
@require(lambda rules_dir: rules_dir.is_dir(), "Rules directory must be directory")
|
|
240
|
+
@ensure(lambda result: isinstance(result, list), "Must return list of principles")
|
|
241
|
+
def _analyze_cursor_rules(self, rules_dir: Path) -> list[dict[str, str]]:
|
|
242
|
+
"""Analyze .cursor/rules/ for development principles."""
|
|
243
|
+
principles: list[dict[str, str]] = []
|
|
244
|
+
|
|
245
|
+
# Common rule files that contain principles
|
|
246
|
+
rule_files = [
|
|
247
|
+
"python-github-rules.md",
|
|
248
|
+
"coding-factory-rules.md",
|
|
249
|
+
"spec-fact-cli-rules.md",
|
|
250
|
+
"modern-javascript-typescript-guidelines.md",
|
|
251
|
+
]
|
|
252
|
+
|
|
253
|
+
for rule_file in rule_files:
|
|
254
|
+
rule_path = rules_dir / rule_file
|
|
255
|
+
if rule_path.exists():
|
|
256
|
+
try:
|
|
257
|
+
content = rule_path.read_text(encoding="utf-8")
|
|
258
|
+
# Extract principles from headings and key sections
|
|
259
|
+
extracted = self._extract_principles_from_markdown(content, rule_file)
|
|
260
|
+
principles.extend(extracted)
|
|
261
|
+
except Exception:
|
|
262
|
+
pass
|
|
263
|
+
|
|
264
|
+
return principles
|
|
265
|
+
|
|
266
|
+
@beartype
|
|
267
|
+
@require(lambda rules_dir: isinstance(rules_dir, Path), "Rules directory must be Path")
|
|
268
|
+
@require(lambda rules_dir: rules_dir.exists(), "Rules directory must exist")
|
|
269
|
+
@require(lambda rules_dir: rules_dir.is_dir(), "Rules directory must be directory")
|
|
270
|
+
@ensure(lambda result: isinstance(result, list), "Must return list of standards")
|
|
271
|
+
def _analyze_docs_rules(self, rules_dir: Path) -> list[str]:
|
|
272
|
+
"""Analyze docs/rules/ for quality standards and testing requirements."""
|
|
273
|
+
standards: list[str] = []
|
|
274
|
+
|
|
275
|
+
# Look for testing and quality gate files
|
|
276
|
+
test_files = [
|
|
277
|
+
"testing-and-build-guide.md",
|
|
278
|
+
"python-github-rules.md",
|
|
279
|
+
]
|
|
280
|
+
|
|
281
|
+
for test_file in test_files:
|
|
282
|
+
test_path = rules_dir / test_file
|
|
283
|
+
if test_path.exists():
|
|
284
|
+
try:
|
|
285
|
+
content = test_path.read_text(encoding="utf-8")
|
|
286
|
+
# Extract quality standards
|
|
287
|
+
extracted = self._extract_quality_standards(content)
|
|
288
|
+
standards.extend(extracted)
|
|
289
|
+
except Exception:
|
|
290
|
+
pass
|
|
291
|
+
|
|
292
|
+
return standards
|
|
293
|
+
|
|
294
|
+
@beartype
|
|
295
|
+
@require(lambda content: isinstance(content, str), "Content must be string")
|
|
296
|
+
@require(lambda source: isinstance(source, str), "Source must be string")
|
|
297
|
+
@ensure(lambda result: isinstance(result, list), "Must return list")
|
|
298
|
+
def _extract_principles_from_markdown(self, content: str, source: str) -> list[dict[str, str]]:
|
|
299
|
+
"""Extract principles from markdown content."""
|
|
300
|
+
principles: list[dict[str, str]] = []
|
|
301
|
+
|
|
302
|
+
# Look for headings that indicate principles
|
|
303
|
+
principle_patterns = [
|
|
304
|
+
(r"##\s+(?:Core\s+)?Principles?", r"###\s+(.+?)\n(.*?)(?=###|\n##|\Z)", re.DOTALL),
|
|
305
|
+
(r"###\s+(?:I\.|1\.|Principle\s+\d+)\s+(.+?)\n(.*?)(?=###|\n##|\Z)", re.DOTALL),
|
|
306
|
+
]
|
|
307
|
+
|
|
308
|
+
for pattern, extract_pattern, flags in principle_patterns:
|
|
309
|
+
if re.search(pattern, content, re.IGNORECASE):
|
|
310
|
+
# Extract principle sections
|
|
311
|
+
matches = re.finditer(extract_pattern, content, flags)
|
|
312
|
+
for match in matches:
|
|
313
|
+
if len(match.groups()) >= 2:
|
|
314
|
+
name = match.group(1).strip()
|
|
315
|
+
description = match.group(2).strip()
|
|
316
|
+
# Clean up description (remove markdown formatting, limit length)
|
|
317
|
+
description = re.sub(r"\[([^\]]+)\]\([^\)]+\)", r"\1", description) # Remove links
|
|
318
|
+
description = re.sub(r"\*\*([^\*]+)\*\*", r"\1", description) # Remove bold
|
|
319
|
+
description = description[:200] # Limit length
|
|
320
|
+
# Take first sentence or first 150 chars
|
|
321
|
+
if len(description) > 150:
|
|
322
|
+
description = description[:150].rsplit(".", 1)[0] + "."
|
|
323
|
+
|
|
324
|
+
principles.append({"name": name, "description": description, "source": source})
|
|
325
|
+
|
|
326
|
+
# If no structured principles found, look for key phrases
|
|
327
|
+
if not principles:
|
|
328
|
+
key_phrases = [
|
|
329
|
+
("CLI-First", "All functionality exposed via CLI; CLI is the primary interface"),
|
|
330
|
+
("Contract-Driven", "Runtime contracts mandatory; Contract exploration with CrossHair"),
|
|
331
|
+
("Test-First", "TDD mandatory; Tests written before implementation"),
|
|
332
|
+
("Quality Gates", "All code changes must pass linting, formatting, type checking, test coverage"),
|
|
333
|
+
]
|
|
334
|
+
|
|
335
|
+
for phrase, default_desc in key_phrases:
|
|
336
|
+
if phrase.lower() in content.lower():
|
|
337
|
+
principles.append({"name": phrase, "description": default_desc, "source": source})
|
|
338
|
+
|
|
339
|
+
return principles[:5] # Limit to 5 principles
|
|
340
|
+
|
|
341
|
+
@beartype
|
|
342
|
+
@require(lambda content: isinstance(content, str), "Content must be string")
|
|
343
|
+
@ensure(lambda result: isinstance(result, list), "Must return list")
|
|
344
|
+
def _extract_quality_standards(self, content: str) -> list[str]:
|
|
345
|
+
"""Extract quality standards from markdown content."""
|
|
346
|
+
standards: list[str] = []
|
|
347
|
+
|
|
348
|
+
# Look for testing requirements
|
|
349
|
+
if re.search(r"test.*coverage|coverage.*requirement", content, re.IGNORECASE):
|
|
350
|
+
coverage_match = re.search(r"(\d+)%", content)
|
|
351
|
+
if coverage_match:
|
|
352
|
+
standards.append(f"Test coverage: ≥{coverage_match.group(1)}% required")
|
|
353
|
+
|
|
354
|
+
# Look for linting requirements
|
|
355
|
+
if re.search(r"lint|linting", content, re.IGNORECASE):
|
|
356
|
+
standards.append("Linting: black, isort, mypy, pylint required")
|
|
357
|
+
|
|
358
|
+
# Look for formatting requirements
|
|
359
|
+
if re.search(r"format|formatting", content, re.IGNORECASE):
|
|
360
|
+
standards.append("Formatting: black, isort required")
|
|
361
|
+
|
|
362
|
+
# Look for type checking
|
|
363
|
+
if re.search(r"type.*check|mypy|basedpyright", content, re.IGNORECASE):
|
|
364
|
+
standards.append("Type checking: mypy or basedpyright required")
|
|
365
|
+
|
|
366
|
+
return standards
|
|
367
|
+
|
|
368
|
+
@beartype
|
|
369
|
+
@require(lambda repo_path: isinstance(repo_path, Path), "Repository path must be Path")
|
|
370
|
+
@require(lambda analysis: isinstance(analysis, dict), "Analysis must be dict")
|
|
371
|
+
@ensure(lambda result: isinstance(result, str), "Must return string")
|
|
372
|
+
def _detect_project_type(self, repo_path: Path, analysis: dict[str, Any]) -> str:
|
|
373
|
+
"""Detect project type from repository structure."""
|
|
374
|
+
# Check for CLI indicators
|
|
375
|
+
if (repo_path / "src" / "specfact_cli" / "cli.py").exists() or (repo_path / "cli.py").exists():
|
|
376
|
+
return "cli"
|
|
377
|
+
if (repo_path / "setup.py").exists() and "cli" in analysis.get("description", "").lower():
|
|
378
|
+
return "cli"
|
|
379
|
+
|
|
380
|
+
# Check for library indicators
|
|
381
|
+
if (repo_path / "src").exists() and not (repo_path / "src" / "app").exists():
|
|
382
|
+
return "library"
|
|
383
|
+
|
|
384
|
+
# Check for API indicators
|
|
385
|
+
if (repo_path / "app").exists() or (repo_path / "api").exists():
|
|
386
|
+
return "api"
|
|
387
|
+
if "fastapi" in str(analysis.get("technology_stack", [])).lower():
|
|
388
|
+
return "api"
|
|
389
|
+
|
|
390
|
+
# Check for frontend indicators
|
|
391
|
+
if (repo_path / "package.json").exists() and (
|
|
392
|
+
"react" in str(analysis.get("technology_stack", [])).lower() or (repo_path / "src" / "components").exists()
|
|
393
|
+
):
|
|
394
|
+
return "frontend"
|
|
395
|
+
|
|
396
|
+
return "auto-detect"
|
|
397
|
+
|
|
398
|
+
@beartype
|
|
399
|
+
@require(lambda analysis: isinstance(analysis, dict), "Analysis must be dict")
|
|
400
|
+
@ensure(lambda result: isinstance(result, list), "Must return list of principles")
|
|
401
|
+
def suggest_principles(self, analysis: dict[str, Any]) -> list[dict[str, str]]:
|
|
402
|
+
"""
|
|
403
|
+
Suggest principles based on repository analysis.
|
|
404
|
+
|
|
405
|
+
Args:
|
|
406
|
+
analysis: Repository analysis results
|
|
407
|
+
|
|
408
|
+
Returns:
|
|
409
|
+
List of principle dictionaries with name and description
|
|
410
|
+
"""
|
|
411
|
+
principles: list[dict[str, str]] = []
|
|
412
|
+
|
|
413
|
+
# Use extracted principles from analysis
|
|
414
|
+
extracted_principles = analysis.get("principles", [])
|
|
415
|
+
if extracted_principles:
|
|
416
|
+
# Map to numbered principles
|
|
417
|
+
for i, principle in enumerate(extracted_principles[:5], 1):
|
|
418
|
+
principles.append(
|
|
419
|
+
{
|
|
420
|
+
"name": f"{self._number_to_roman(i)}. {principle.get('name', 'Principle')}",
|
|
421
|
+
"description": principle.get("description", ""),
|
|
422
|
+
}
|
|
423
|
+
)
|
|
424
|
+
|
|
425
|
+
# Add project-type-specific principles if not enough extracted
|
|
426
|
+
project_type = analysis.get("project_type", "auto-detect")
|
|
427
|
+
if len(principles) < 3:
|
|
428
|
+
type_specific = self._get_project_type_principles(project_type, analysis)
|
|
429
|
+
# Add only if not already present
|
|
430
|
+
existing_names = {p["name"].lower() for p in principles}
|
|
431
|
+
for principle in type_specific:
|
|
432
|
+
if principle["name"].lower() not in existing_names:
|
|
433
|
+
principles.append(principle)
|
|
434
|
+
if len(principles) >= 5:
|
|
435
|
+
break
|
|
436
|
+
|
|
437
|
+
# Ensure at least 3 principles
|
|
438
|
+
if len(principles) < 3:
|
|
439
|
+
# Add generic principles
|
|
440
|
+
generic_principles = [
|
|
441
|
+
{
|
|
442
|
+
"name": "I. Code Quality",
|
|
443
|
+
"description": "All code must pass linting, formatting, and type checking before commit",
|
|
444
|
+
},
|
|
445
|
+
{
|
|
446
|
+
"name": "II. Testing",
|
|
447
|
+
"description": "Tests required for all new features; Maintain test coverage standards",
|
|
448
|
+
},
|
|
449
|
+
{
|
|
450
|
+
"name": "III. Documentation",
|
|
451
|
+
"description": "Documentation must be updated for all public API changes",
|
|
452
|
+
},
|
|
453
|
+
]
|
|
454
|
+
for generic in generic_principles[: 3 - len(principles)]:
|
|
455
|
+
principles.append(generic)
|
|
456
|
+
|
|
457
|
+
return principles[:5] # Limit to 5 principles
|
|
458
|
+
|
|
459
|
+
@beartype
|
|
460
|
+
@require(lambda num: isinstance(num, int), "Number must be int")
|
|
461
|
+
@require(lambda num: 1 <= num <= 10, "Number must be 1-10")
|
|
462
|
+
@ensure(lambda result: isinstance(result, str), "Must return string")
|
|
463
|
+
def _number_to_roman(self, num: int) -> str:
|
|
464
|
+
"""Convert number to Roman numeral."""
|
|
465
|
+
roman_map = {1: "I", 2: "II", 3: "III", 4: "IV", 5: "V", 6: "VI", 7: "VII", 8: "VIII", 9: "IX", 10: "X"}
|
|
466
|
+
return roman_map.get(num, str(num))
|
|
467
|
+
|
|
468
|
+
@beartype
|
|
469
|
+
@require(lambda project_type: isinstance(project_type, str), "Project type must be string")
|
|
470
|
+
@require(lambda analysis: isinstance(analysis, dict), "Analysis must be dict")
|
|
471
|
+
@ensure(lambda result: isinstance(result, list), "Must return list")
|
|
472
|
+
def _get_project_type_principles(self, project_type: str, analysis: dict[str, Any]) -> list[dict[str, str]]:
|
|
473
|
+
"""Get project-type-specific principles."""
|
|
474
|
+
principles_map = {
|
|
475
|
+
"cli": [
|
|
476
|
+
{
|
|
477
|
+
"name": "I. CLI-First Architecture",
|
|
478
|
+
"description": "All functionality exposed via CLI; CLI is the primary interface; No direct code manipulation bypassing CLI validation",
|
|
479
|
+
},
|
|
480
|
+
{
|
|
481
|
+
"name": "II. Command Structure",
|
|
482
|
+
"description": "Commands follow consistent structure; Help text is comprehensive; Output formats are standardized",
|
|
483
|
+
},
|
|
484
|
+
],
|
|
485
|
+
"library": [
|
|
486
|
+
{
|
|
487
|
+
"name": "I. API Design",
|
|
488
|
+
"description": "Public APIs must be well-documented; Backward compatibility maintained; Versioning follows semantic versioning",
|
|
489
|
+
},
|
|
490
|
+
{
|
|
491
|
+
"name": "II. Modularity",
|
|
492
|
+
"description": "Modules are self-contained; Dependencies are minimal; Clear separation of concerns",
|
|
493
|
+
},
|
|
494
|
+
],
|
|
495
|
+
"api": [
|
|
496
|
+
{
|
|
497
|
+
"name": "I. REST/GraphQL Conventions",
|
|
498
|
+
"description": "API endpoints follow RESTful or GraphQL conventions; Status codes are used correctly; Error responses are standardized",
|
|
499
|
+
},
|
|
500
|
+
{
|
|
501
|
+
"name": "II. Authentication & Authorization",
|
|
502
|
+
"description": "All endpoints require authentication; Authorization is enforced; Security best practices followed",
|
|
503
|
+
},
|
|
504
|
+
],
|
|
505
|
+
"frontend": [
|
|
506
|
+
{
|
|
507
|
+
"name": "I. Component Architecture",
|
|
508
|
+
"description": "Components are reusable and composable; Props are typed; State management is centralized",
|
|
509
|
+
},
|
|
510
|
+
{
|
|
511
|
+
"name": "II. Accessibility",
|
|
512
|
+
"description": "WCAG 2.1 AA compliance required; Keyboard navigation supported; Screen reader compatible",
|
|
513
|
+
},
|
|
514
|
+
],
|
|
515
|
+
}
|
|
516
|
+
|
|
517
|
+
return principles_map.get(project_type, [])
|
|
518
|
+
|
|
519
|
+
@beartype
|
|
520
|
+
@require(lambda template_path: isinstance(template_path, Path), "Template path must be Path")
|
|
521
|
+
@require(lambda suggestions: isinstance(suggestions, dict), "Suggestions must be dict")
|
|
522
|
+
@ensure(lambda result: isinstance(result, str), "Must return string")
|
|
523
|
+
def enrich_template(self, template_path: Path, suggestions: dict[str, Any]) -> str:
|
|
524
|
+
"""
|
|
525
|
+
Fill constitution template with suggestions.
|
|
526
|
+
|
|
527
|
+
Args:
|
|
528
|
+
template_path: Path to constitution template
|
|
529
|
+
suggestions: Dictionary with placeholder values
|
|
530
|
+
|
|
531
|
+
Returns:
|
|
532
|
+
Enriched constitution markdown
|
|
533
|
+
"""
|
|
534
|
+
if not template_path.exists() or str(template_path) == "/dev/null":
|
|
535
|
+
# Create default template
|
|
536
|
+
template_content = self._get_default_template()
|
|
537
|
+
else:
|
|
538
|
+
template_content = template_path.read_text(encoding="utf-8")
|
|
539
|
+
|
|
540
|
+
# Replace placeholders
|
|
541
|
+
enriched = template_content
|
|
542
|
+
|
|
543
|
+
# Replace [PROJECT_NAME]
|
|
544
|
+
project_name = suggestions.get("project_name", "Project")
|
|
545
|
+
enriched = re.sub(r"\[PROJECT_NAME\]", project_name, enriched)
|
|
546
|
+
|
|
547
|
+
# Replace principles (up to 5)
|
|
548
|
+
principles = suggestions.get("principles", [])
|
|
549
|
+
for i, principle in enumerate(principles, 1):
|
|
550
|
+
enriched = re.sub(
|
|
551
|
+
rf"\[PRINCIPLE_{i}_NAME\]",
|
|
552
|
+
principle.get("name", f"Principle {i}"),
|
|
553
|
+
enriched,
|
|
554
|
+
)
|
|
555
|
+
enriched = re.sub(
|
|
556
|
+
rf"\[PRINCIPLE_{i}_DESCRIPTION\]",
|
|
557
|
+
principle.get("description", ""),
|
|
558
|
+
enriched,
|
|
559
|
+
)
|
|
560
|
+
|
|
561
|
+
# Remove unused principle placeholders
|
|
562
|
+
for i in range(len(principles) + 1, 6):
|
|
563
|
+
# Remove principle section if placeholder remains
|
|
564
|
+
pattern = rf"### \[PRINCIPLE_{i}_NAME\].*?\[PRINCIPLE_{i}_DESCRIPTION\]"
|
|
565
|
+
enriched = re.sub(pattern, "", enriched, flags=re.DOTALL)
|
|
566
|
+
|
|
567
|
+
# Replace [SECTION_2_NAME] and [SECTION_2_CONTENT]
|
|
568
|
+
section2_name = suggestions.get("section2_name", "Development Workflow")
|
|
569
|
+
section2_content = suggestions.get("section2_content", self._generate_workflow_section(suggestions))
|
|
570
|
+
enriched = re.sub(r"\[SECTION_2_NAME\]", section2_name, enriched)
|
|
571
|
+
enriched = re.sub(r"\[SECTION_2_CONTENT\]", section2_content, enriched)
|
|
572
|
+
|
|
573
|
+
# Replace [SECTION_3_NAME] and [SECTION_3_CONTENT] (optional)
|
|
574
|
+
section3_name = suggestions.get("section3_name", "Quality Standards")
|
|
575
|
+
section3_content = suggestions.get("section3_content", self._generate_quality_standards_section(suggestions))
|
|
576
|
+
enriched = re.sub(r"\[SECTION_3_NAME\]", section3_name, enriched)
|
|
577
|
+
enriched = re.sub(r"\[SECTION_3_CONTENT\]", section3_content, enriched)
|
|
578
|
+
|
|
579
|
+
# Replace [GOVERNANCE_RULES]
|
|
580
|
+
governance_rules = suggestions.get(
|
|
581
|
+
"governance_rules",
|
|
582
|
+
"Constitution supersedes all other practices. Amendments require documentation, team approval, and migration plan for breaking changes.",
|
|
583
|
+
)
|
|
584
|
+
enriched = re.sub(r"\[GOVERNANCE_RULES\]", governance_rules, enriched)
|
|
585
|
+
|
|
586
|
+
# Replace version and dates
|
|
587
|
+
today = date.today().isoformat()
|
|
588
|
+
enriched = re.sub(r"\[CONSTITUTION_VERSION\]", "1.0.0", enriched)
|
|
589
|
+
enriched = re.sub(r"\[RATIFICATION_DATE\]", today, enriched)
|
|
590
|
+
enriched = re.sub(r"\[LAST_AMENDED_DATE\]", today, enriched)
|
|
591
|
+
|
|
592
|
+
# Remove HTML comments (examples)
|
|
593
|
+
enriched = re.sub(r"<!--.*?-->", "", enriched, flags=re.DOTALL)
|
|
594
|
+
|
|
595
|
+
# Clean up multiple blank lines
|
|
596
|
+
enriched = re.sub(r"\n{3,}", "\n\n", enriched)
|
|
597
|
+
|
|
598
|
+
return enriched.strip() + "\n"
|
|
599
|
+
|
|
600
|
+
@beartype
|
|
601
|
+
@ensure(lambda result: isinstance(result, str), "Must return string")
|
|
602
|
+
def _get_default_template(self) -> str:
|
|
603
|
+
"""Get default constitution template."""
|
|
604
|
+
return """# [PROJECT_NAME] Constitution
|
|
605
|
+
|
|
606
|
+
## Core Principles
|
|
607
|
+
|
|
608
|
+
### [PRINCIPLE_1_NAME]
|
|
609
|
+
[PRINCIPLE_1_DESCRIPTION]
|
|
610
|
+
|
|
611
|
+
### [PRINCIPLE_2_NAME]
|
|
612
|
+
[PRINCIPLE_2_DESCRIPTION]
|
|
613
|
+
|
|
614
|
+
### [PRINCIPLE_3_NAME]
|
|
615
|
+
[PRINCIPLE_3_DESCRIPTION]
|
|
616
|
+
|
|
617
|
+
## [SECTION_2_NAME]
|
|
618
|
+
|
|
619
|
+
[SECTION_2_CONTENT]
|
|
620
|
+
|
|
621
|
+
## [SECTION_3_NAME]
|
|
622
|
+
|
|
623
|
+
[SECTION_3_CONTENT]
|
|
624
|
+
|
|
625
|
+
## Governance
|
|
626
|
+
|
|
627
|
+
[GOVERNANCE_RULES]
|
|
628
|
+
|
|
629
|
+
**Version**: [CONSTITUTION_VERSION] | **Ratified**: [RATIFICATION_DATE] | **Last Amended**: [LAST_AMENDED_DATE]
|
|
630
|
+
"""
|
|
631
|
+
|
|
632
|
+
@beartype
|
|
633
|
+
@require(lambda suggestions: isinstance(suggestions, dict), "Suggestions must be dict")
|
|
634
|
+
@ensure(lambda result: isinstance(result, str), "Must return string")
|
|
635
|
+
def _generate_workflow_section(self, suggestions: dict[str, Any]) -> str:
|
|
636
|
+
"""Generate development workflow section."""
|
|
637
|
+
workflow_items = suggestions.get("development_workflow", [])
|
|
638
|
+
|
|
639
|
+
if not workflow_items:
|
|
640
|
+
# Generate from analysis
|
|
641
|
+
workflow_items = [
|
|
642
|
+
"Testing: Run test suite before committing",
|
|
643
|
+
"Formatting: Apply code formatter before committing",
|
|
644
|
+
"Linting: Fix linting errors before committing",
|
|
645
|
+
"Type Checking: Ensure type checking passes",
|
|
646
|
+
]
|
|
647
|
+
|
|
648
|
+
lines = []
|
|
649
|
+
for item in workflow_items:
|
|
650
|
+
lines.append(f"- {item}")
|
|
651
|
+
|
|
652
|
+
return "\n".join(lines) if lines else "Standard development workflow applies."
|
|
653
|
+
|
|
654
|
+
@beartype
|
|
655
|
+
@require(lambda suggestions: isinstance(suggestions, dict), "Suggestions must be dict")
|
|
656
|
+
@ensure(lambda result: isinstance(result, str), "Must return string")
|
|
657
|
+
def _generate_quality_standards_section(self, suggestions: dict[str, Any]) -> str:
|
|
658
|
+
"""Generate quality standards section."""
|
|
659
|
+
standards = suggestions.get("quality_standards", [])
|
|
660
|
+
|
|
661
|
+
if not standards:
|
|
662
|
+
standards = [
|
|
663
|
+
"Code quality: Linting and formatting required",
|
|
664
|
+
"Testing: Test coverage standards must be met",
|
|
665
|
+
"Documentation: Public APIs must be documented",
|
|
666
|
+
]
|
|
667
|
+
|
|
668
|
+
lines = []
|
|
669
|
+
for standard in standards:
|
|
670
|
+
lines.append(f"- {standard}")
|
|
671
|
+
|
|
672
|
+
return "\n".join(lines) if lines else "Standard quality gates apply."
|
|
673
|
+
|
|
674
|
+
@beartype
|
|
675
|
+
@require(lambda repo_path: isinstance(repo_path, Path), "Repository path must be Path")
|
|
676
|
+
@require(lambda constitution_path: isinstance(constitution_path, Path), "Constitution path must be Path")
|
|
677
|
+
@ensure(lambda result: isinstance(result, str), "Must return enriched constitution")
|
|
678
|
+
def bootstrap(self, repo_path: Path, constitution_path: Path) -> str:
|
|
679
|
+
"""
|
|
680
|
+
Generate bootstrap constitution from repository analysis.
|
|
681
|
+
|
|
682
|
+
Args:
|
|
683
|
+
repo_path: Path to repository root
|
|
684
|
+
constitution_path: Path where constitution should be written
|
|
685
|
+
|
|
686
|
+
Returns:
|
|
687
|
+
Enriched constitution markdown
|
|
688
|
+
"""
|
|
689
|
+
# Analyze repository
|
|
690
|
+
analysis = self.analyze_repository(repo_path)
|
|
691
|
+
|
|
692
|
+
# Suggest principles
|
|
693
|
+
principles = self.suggest_principles(analysis)
|
|
694
|
+
|
|
695
|
+
# Prepare suggestions
|
|
696
|
+
suggestions: dict[str, Any] = {
|
|
697
|
+
"project_name": analysis.get("project_name", "Project"),
|
|
698
|
+
"principles": principles,
|
|
699
|
+
"section2_name": "Development Workflow",
|
|
700
|
+
"section2_content": self._generate_workflow_section(analysis),
|
|
701
|
+
"section3_name": "Quality Standards",
|
|
702
|
+
"section3_content": self._generate_quality_standards_section(analysis),
|
|
703
|
+
"governance_rules": "Constitution supersedes all other practices. Amendments require documentation, team approval, and migration plan for breaking changes.",
|
|
704
|
+
"development_workflow": analysis.get("development_workflow", []),
|
|
705
|
+
"quality_standards": analysis.get("quality_standards", []),
|
|
706
|
+
}
|
|
707
|
+
|
|
708
|
+
# Enrich template (always use default template for bootstrap, not existing constitution)
|
|
709
|
+
# Bootstrap should generate fresh constitution, not enrich existing one
|
|
710
|
+
template_path = Path("/dev/null") # Will trigger default template in enrich_template
|
|
711
|
+
return self.enrich_template(template_path, suggestions)
|
|
712
|
+
|
|
713
|
+
@beartype
|
|
714
|
+
@require(lambda constitution_path: isinstance(constitution_path, Path), "Constitution path must be Path")
|
|
715
|
+
@ensure(lambda result: isinstance(result, tuple), "Must return (is_valid, issues) tuple")
|
|
716
|
+
def validate(self, constitution_path: Path) -> tuple[bool, list[str]]:
|
|
717
|
+
"""
|
|
718
|
+
Validate constitution completeness.
|
|
719
|
+
|
|
720
|
+
Args:
|
|
721
|
+
constitution_path: Path to constitution file
|
|
722
|
+
|
|
723
|
+
Returns:
|
|
724
|
+
Tuple of (is_valid, list_of_issues)
|
|
725
|
+
"""
|
|
726
|
+
issues: list[str] = []
|
|
727
|
+
|
|
728
|
+
if not constitution_path.exists():
|
|
729
|
+
return (False, ["Constitution file does not exist"])
|
|
730
|
+
|
|
731
|
+
try:
|
|
732
|
+
content = constitution_path.read_text(encoding="utf-8").strip()
|
|
733
|
+
|
|
734
|
+
if not content or content == "# Constitution":
|
|
735
|
+
issues.append("Constitution is empty or minimal (only contains header)")
|
|
736
|
+
|
|
737
|
+
# Check for remaining placeholders
|
|
738
|
+
placeholder_pattern = r"\[[A-Z_0-9]+\]"
|
|
739
|
+
placeholders = re.findall(placeholder_pattern, content)
|
|
740
|
+
if placeholders:
|
|
741
|
+
issues.append(
|
|
742
|
+
f"Constitution contains {len(placeholders)} unresolved placeholders: {', '.join(placeholders[:5])}"
|
|
743
|
+
)
|
|
744
|
+
|
|
745
|
+
# Check for principles
|
|
746
|
+
if not re.search(r"##\s+Core\s+Principles", content, re.IGNORECASE):
|
|
747
|
+
issues.append("Constitution missing 'Core Principles' section")
|
|
748
|
+
|
|
749
|
+
# Check for at least one principle
|
|
750
|
+
principle_count = len(re.findall(r"###\s+(?:I\.|II\.|III\.|IV\.|V\.|1\.|2\.|3\.|4\.|5\.)", content))
|
|
751
|
+
if principle_count == 0:
|
|
752
|
+
issues.append("Constitution has no numbered principles")
|
|
753
|
+
|
|
754
|
+
# Check for governance section
|
|
755
|
+
if not re.search(r"##\s+Governance", content, re.IGNORECASE):
|
|
756
|
+
issues.append("Constitution missing 'Governance' section")
|
|
757
|
+
|
|
758
|
+
# Check for version line
|
|
759
|
+
if not re.search(r"\*\*Version\*\*.*\*\*Ratified\*\*", content):
|
|
760
|
+
issues.append("Constitution missing version and ratification date")
|
|
761
|
+
|
|
762
|
+
except Exception as e:
|
|
763
|
+
return (False, [f"Error reading constitution: {e!s}"])
|
|
764
|
+
|
|
765
|
+
return (len(issues) == 0, issues)
|