specfact-cli 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of specfact-cli might be problematic. Click here for more details.
- specfact_cli/__init__.py +14 -0
- specfact_cli/agents/__init__.py +23 -0
- specfact_cli/agents/analyze_agent.py +392 -0
- specfact_cli/agents/base.py +95 -0
- specfact_cli/agents/plan_agent.py +202 -0
- specfact_cli/agents/registry.py +176 -0
- specfact_cli/agents/sync_agent.py +133 -0
- specfact_cli/analyzers/__init__.py +10 -0
- specfact_cli/analyzers/code_analyzer.py +775 -0
- specfact_cli/cli.py +397 -0
- specfact_cli/commands/__init__.py +7 -0
- specfact_cli/commands/enforce.py +87 -0
- specfact_cli/commands/import_cmd.py +355 -0
- specfact_cli/commands/init.py +119 -0
- specfact_cli/commands/plan.py +1090 -0
- specfact_cli/commands/repro.py +172 -0
- specfact_cli/commands/sync.py +408 -0
- specfact_cli/common/__init__.py +24 -0
- specfact_cli/common/logger_setup.py +673 -0
- specfact_cli/common/logging_utils.py +41 -0
- specfact_cli/common/text_utils.py +52 -0
- specfact_cli/common/utils.py +48 -0
- specfact_cli/comparators/__init__.py +10 -0
- specfact_cli/comparators/plan_comparator.py +391 -0
- specfact_cli/generators/__init__.py +13 -0
- specfact_cli/generators/plan_generator.py +105 -0
- specfact_cli/generators/protocol_generator.py +115 -0
- specfact_cli/generators/report_generator.py +200 -0
- specfact_cli/generators/workflow_generator.py +111 -0
- specfact_cli/importers/__init__.py +6 -0
- specfact_cli/importers/speckit_converter.py +773 -0
- specfact_cli/importers/speckit_scanner.py +704 -0
- specfact_cli/models/__init__.py +32 -0
- specfact_cli/models/deviation.py +105 -0
- specfact_cli/models/enforcement.py +150 -0
- specfact_cli/models/plan.py +97 -0
- specfact_cli/models/protocol.py +28 -0
- specfact_cli/modes/__init__.py +18 -0
- specfact_cli/modes/detector.py +126 -0
- specfact_cli/modes/router.py +153 -0
- specfact_cli/sync/__init__.py +11 -0
- specfact_cli/sync/repository_sync.py +279 -0
- specfact_cli/sync/speckit_sync.py +388 -0
- specfact_cli/utils/__init__.py +57 -0
- specfact_cli/utils/console.py +69 -0
- specfact_cli/utils/feature_keys.py +213 -0
- specfact_cli/utils/git.py +241 -0
- specfact_cli/utils/ide_setup.py +381 -0
- specfact_cli/utils/prompts.py +179 -0
- specfact_cli/utils/structure.py +496 -0
- specfact_cli/utils/yaml_utils.py +200 -0
- specfact_cli/validators/__init__.py +19 -0
- specfact_cli/validators/fsm.py +260 -0
- specfact_cli/validators/repro_checker.py +320 -0
- specfact_cli/validators/schema.py +200 -0
- specfact_cli-0.4.0.dist-info/METADATA +332 -0
- specfact_cli-0.4.0.dist-info/RECORD +60 -0
- specfact_cli-0.4.0.dist-info/WHEEL +4 -0
- specfact_cli-0.4.0.dist-info/entry_points.txt +2 -0
- specfact_cli-0.4.0.dist-info/licenses/LICENSE.md +55 -0
|
@@ -0,0 +1,704 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Spec-Kit scanner for importing Spec-Kit projects.
|
|
3
|
+
|
|
4
|
+
This module provides functionality to scan Spec-Kit repositories,
|
|
5
|
+
parse their structure, and extract features, stories, and requirements
|
|
6
|
+
from markdown artifacts generated by Spec-Kit commands.
|
|
7
|
+
|
|
8
|
+
Spec-Kit uses slash commands (/speckit.specify, /speckit.plan, etc.) to
|
|
9
|
+
generate markdown artifacts in specs/ and .specify/ directories.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from __future__ import annotations
|
|
13
|
+
|
|
14
|
+
import re
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
from typing import Any
|
|
17
|
+
|
|
18
|
+
from beartype import beartype
|
|
19
|
+
from icontract import ensure, require
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class SpecKitScanner:
|
|
23
|
+
"""
|
|
24
|
+
Scanner for Spec-Kit repositories.
|
|
25
|
+
|
|
26
|
+
Scans Spec-Kit directory structure, parses markdown files (spec.md, plan.md, tasks.md),
|
|
27
|
+
and extracts features, user stories, requirements, and tasks.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
# Spec-Kit directory structure
|
|
31
|
+
SPECIFY_DIR = ".specify"
|
|
32
|
+
SPECIFY_MEMORY_DIR = ".specify/memory"
|
|
33
|
+
SPECS_DIR = "specs"
|
|
34
|
+
|
|
35
|
+
@beartype
|
|
36
|
+
def __init__(self, repo_path: Path) -> None:
|
|
37
|
+
"""
|
|
38
|
+
Initialize Spec-Kit scanner.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
repo_path: Path to Spec-Kit repository root
|
|
42
|
+
"""
|
|
43
|
+
self.repo_path = Path(repo_path)
|
|
44
|
+
|
|
45
|
+
@beartype
|
|
46
|
+
@ensure(lambda result: isinstance(result, bool), "Must return boolean")
|
|
47
|
+
def is_speckit_repo(self) -> bool:
|
|
48
|
+
"""
|
|
49
|
+
Check if repository is a Spec-Kit project.
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
True if Spec-Kit structure detected, False otherwise
|
|
53
|
+
"""
|
|
54
|
+
# Check for Spec-Kit format (.specify directory)
|
|
55
|
+
specify_dir = self.repo_path / self.SPECIFY_DIR
|
|
56
|
+
return specify_dir.exists() and specify_dir.is_dir()
|
|
57
|
+
|
|
58
|
+
@beartype
|
|
59
|
+
@ensure(lambda result: isinstance(result, dict), "Must return dictionary")
|
|
60
|
+
@ensure(lambda result: "is_speckit" in result, "Must include is_speckit key")
|
|
61
|
+
def scan_structure(self) -> dict[str, Any]:
|
|
62
|
+
"""
|
|
63
|
+
Scan Spec-Kit directory structure.
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
Dictionary with detected structure information
|
|
67
|
+
"""
|
|
68
|
+
structure = {
|
|
69
|
+
"is_speckit": False,
|
|
70
|
+
"specify_dir": None,
|
|
71
|
+
"specify_memory_dir": None,
|
|
72
|
+
"specs_dir": None,
|
|
73
|
+
"spec_files": [],
|
|
74
|
+
"feature_dirs": [],
|
|
75
|
+
"memory_files": [],
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
if not self.is_speckit_repo():
|
|
79
|
+
return structure
|
|
80
|
+
|
|
81
|
+
structure["is_speckit"] = True
|
|
82
|
+
|
|
83
|
+
# Check for .specify directory
|
|
84
|
+
specify_dir = self.repo_path / self.SPECIFY_DIR
|
|
85
|
+
if specify_dir.exists() and specify_dir.is_dir():
|
|
86
|
+
structure["specify_dir"] = str(specify_dir)
|
|
87
|
+
|
|
88
|
+
# Check for .specify/memory directory
|
|
89
|
+
specify_memory_dir = self.repo_path / self.SPECIFY_MEMORY_DIR
|
|
90
|
+
if specify_memory_dir.exists():
|
|
91
|
+
structure["specify_memory_dir"] = str(specify_memory_dir)
|
|
92
|
+
structure["memory_files"] = [str(f) for f in specify_memory_dir.glob("*.md")]
|
|
93
|
+
|
|
94
|
+
# Check for specs directory
|
|
95
|
+
specs_dir = self.repo_path / self.SPECS_DIR
|
|
96
|
+
if specs_dir.exists():
|
|
97
|
+
structure["specs_dir"] = str(specs_dir)
|
|
98
|
+
# Find all feature directories (specs/*/)
|
|
99
|
+
for spec_dir in specs_dir.iterdir():
|
|
100
|
+
if spec_dir.is_dir():
|
|
101
|
+
structure["feature_dirs"].append(str(spec_dir))
|
|
102
|
+
# Find all markdown files in each feature directory
|
|
103
|
+
for md_file in spec_dir.glob("*.md"):
|
|
104
|
+
structure["spec_files"].append(str(md_file))
|
|
105
|
+
# Also check for contracts/*.yaml
|
|
106
|
+
contracts_dir = spec_dir / "contracts"
|
|
107
|
+
if contracts_dir.exists():
|
|
108
|
+
for yaml_file in contracts_dir.glob("*.yaml"):
|
|
109
|
+
structure["spec_files"].append(str(yaml_file))
|
|
110
|
+
|
|
111
|
+
return structure
|
|
112
|
+
|
|
113
|
+
@beartype
|
|
114
|
+
@ensure(lambda result: isinstance(result, list), "Must return list")
|
|
115
|
+
@ensure(lambda result: all(isinstance(f, dict) for f in result), "All items must be dictionaries")
|
|
116
|
+
def discover_features(self) -> list[dict[str, Any]]:
|
|
117
|
+
"""
|
|
118
|
+
Discover all features from specs directory.
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
List of feature dictionaries with parsed data from spec.md, plan.md, tasks.md
|
|
122
|
+
"""
|
|
123
|
+
features: list[dict[str, Any]] = []
|
|
124
|
+
structure = self.scan_structure()
|
|
125
|
+
|
|
126
|
+
if not structure["is_speckit"] or not structure["feature_dirs"]:
|
|
127
|
+
return features
|
|
128
|
+
|
|
129
|
+
for feature_dir_path in structure["feature_dirs"]:
|
|
130
|
+
feature_dir = Path(feature_dir_path)
|
|
131
|
+
spec_file = feature_dir / "spec.md"
|
|
132
|
+
|
|
133
|
+
if spec_file.exists():
|
|
134
|
+
spec_data = self.parse_spec_markdown(spec_file)
|
|
135
|
+
if spec_data:
|
|
136
|
+
# Parse plan.md if it exists
|
|
137
|
+
plan_file = feature_dir / "plan.md"
|
|
138
|
+
if plan_file.exists():
|
|
139
|
+
plan_data = self.parse_plan_markdown(plan_file)
|
|
140
|
+
spec_data["plan"] = plan_data
|
|
141
|
+
|
|
142
|
+
# Parse tasks.md if it exists
|
|
143
|
+
tasks_file = feature_dir / "tasks.md"
|
|
144
|
+
if tasks_file.exists():
|
|
145
|
+
tasks_data = self.parse_tasks_markdown(tasks_file)
|
|
146
|
+
spec_data["tasks"] = tasks_data
|
|
147
|
+
|
|
148
|
+
features.append(spec_data)
|
|
149
|
+
|
|
150
|
+
return features
|
|
151
|
+
|
|
152
|
+
@beartype
|
|
153
|
+
@require(lambda spec_file: spec_file is not None, "Spec file path must not be None")
|
|
154
|
+
@require(lambda spec_file: spec_file.suffix == ".md", "Spec file must be markdown")
|
|
155
|
+
@ensure(
|
|
156
|
+
lambda result, spec_file: result is None or (isinstance(result, dict) and "feature_key" in result),
|
|
157
|
+
"Must return None or dict with feature_key",
|
|
158
|
+
)
|
|
159
|
+
def parse_spec_markdown(self, spec_file: Path) -> dict[str, Any] | None:
|
|
160
|
+
"""
|
|
161
|
+
Parse a Spec-Kit spec.md file to extract features, stories, requirements, and success criteria.
|
|
162
|
+
|
|
163
|
+
Args:
|
|
164
|
+
spec_file: Path to spec.md file
|
|
165
|
+
|
|
166
|
+
Returns:
|
|
167
|
+
Dictionary with extracted feature and story information, or None if file doesn't exist
|
|
168
|
+
"""
|
|
169
|
+
if not spec_file.exists():
|
|
170
|
+
return None
|
|
171
|
+
|
|
172
|
+
try:
|
|
173
|
+
content = spec_file.read_text(encoding="utf-8")
|
|
174
|
+
spec_data: dict[str, Any] = {
|
|
175
|
+
"feature_key": None,
|
|
176
|
+
"feature_title": None,
|
|
177
|
+
"feature_branch": None,
|
|
178
|
+
"created_date": None,
|
|
179
|
+
"status": None,
|
|
180
|
+
"stories": [],
|
|
181
|
+
"requirements": [],
|
|
182
|
+
"success_criteria": [],
|
|
183
|
+
"edge_cases": [],
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
# Extract frontmatter (if present)
|
|
187
|
+
frontmatter_match = re.search(r"^---\n(.*?)\n---", content, re.MULTILINE | re.DOTALL)
|
|
188
|
+
if frontmatter_match:
|
|
189
|
+
frontmatter = frontmatter_match.group(1)
|
|
190
|
+
# Extract Feature Branch
|
|
191
|
+
branch_match = re.search(r"\*\*Feature Branch\*\*:\s*`(.+?)`", frontmatter)
|
|
192
|
+
if branch_match:
|
|
193
|
+
spec_data["feature_branch"] = branch_match.group(1).strip()
|
|
194
|
+
# Extract Created date
|
|
195
|
+
created_match = re.search(r"\*\*Created\*\*:\s*(\d{4}-\d{2}-\d{2})", frontmatter)
|
|
196
|
+
if created_match:
|
|
197
|
+
spec_data["created_date"] = created_match.group(1).strip()
|
|
198
|
+
# Extract Status
|
|
199
|
+
status_match = re.search(r"\*\*Status\*\*:\s*(.+?)(?:\n|$)", frontmatter)
|
|
200
|
+
if status_match:
|
|
201
|
+
spec_data["status"] = status_match.group(1).strip()
|
|
202
|
+
|
|
203
|
+
# Extract feature key from directory name (specs/001-feature-name/spec.md)
|
|
204
|
+
spec_dir = spec_file.parent
|
|
205
|
+
if spec_dir.name:
|
|
206
|
+
spec_data["feature_key"] = spec_dir.name.upper().replace("-", "_")
|
|
207
|
+
# If feature_branch not found in frontmatter, use directory name
|
|
208
|
+
if not spec_data["feature_branch"]:
|
|
209
|
+
spec_data["feature_branch"] = spec_dir.name
|
|
210
|
+
|
|
211
|
+
# Extract feature title from spec.md header
|
|
212
|
+
title_match = re.search(r"^#\s+Feature Specification:\s*(.+)$", content, re.MULTILINE)
|
|
213
|
+
if title_match:
|
|
214
|
+
spec_data["feature_title"] = title_match.group(1).strip()
|
|
215
|
+
|
|
216
|
+
# Extract user stories with full context
|
|
217
|
+
story_pattern = r"###\s+User Story\s+(\d+)\s*-\s*(.+?)\s*\(Priority:\s*(P\d+)\)"
|
|
218
|
+
stories = re.finditer(story_pattern, content, re.MULTILINE | re.DOTALL)
|
|
219
|
+
|
|
220
|
+
story_counter = 1
|
|
221
|
+
for story_match in stories:
|
|
222
|
+
story_number = story_match.group(1)
|
|
223
|
+
story_title = story_match.group(2).strip()
|
|
224
|
+
priority = story_match.group(3)
|
|
225
|
+
|
|
226
|
+
# Find story content (between this story and next story or end of section)
|
|
227
|
+
story_start = story_match.end()
|
|
228
|
+
next_story_match = re.search(r"###\s+User Story\s+\d+", content[story_start:], re.MULTILINE)
|
|
229
|
+
story_end = story_start + next_story_match.start() if next_story_match else len(content)
|
|
230
|
+
story_content = content[story_start:story_end]
|
|
231
|
+
|
|
232
|
+
# Extract "As a..." description
|
|
233
|
+
as_a_match = re.search(
|
|
234
|
+
r"As a (.+?), I want (.+?) so that (.+?)(?=\n\n|\*\*Why|\*\*Independent|\*\*Acceptance)",
|
|
235
|
+
story_content,
|
|
236
|
+
re.DOTALL,
|
|
237
|
+
)
|
|
238
|
+
as_a_text = ""
|
|
239
|
+
if as_a_match:
|
|
240
|
+
as_a_text = f"As a {as_a_match.group(1)}, I want {as_a_match.group(2)}, so that {as_a_match.group(3)}".strip()
|
|
241
|
+
|
|
242
|
+
# Extract "Why this priority" text
|
|
243
|
+
why_priority_match = re.search(
|
|
244
|
+
r"\*\*Why this priority\*\*:\s*(.+?)(?=\n\n|\*\*Independent|$)", story_content, re.DOTALL
|
|
245
|
+
)
|
|
246
|
+
why_priority = why_priority_match.group(1).strip() if why_priority_match else ""
|
|
247
|
+
|
|
248
|
+
# Extract INVSEST criteria
|
|
249
|
+
invsest_criteria: dict[str, str | None] = {
|
|
250
|
+
"independent": None,
|
|
251
|
+
"negotiable": None,
|
|
252
|
+
"valuable": None,
|
|
253
|
+
"estimable": None,
|
|
254
|
+
"small": None,
|
|
255
|
+
"testable": None,
|
|
256
|
+
}
|
|
257
|
+
for criterion in ["Independent", "Negotiable", "Valuable", "Estimable", "Small", "Testable"]:
|
|
258
|
+
criterion_match = re.search(rf"\*\*{criterion}\*\*:\s*(YES|NO)", story_content, re.IGNORECASE)
|
|
259
|
+
if criterion_match:
|
|
260
|
+
invsest_criteria[criterion.lower()] = criterion_match.group(1).upper()
|
|
261
|
+
|
|
262
|
+
# Extract acceptance scenarios
|
|
263
|
+
acceptance_pattern = r"(\d+)\.\s+\*\*Given\*\*\s+(.+?),\s+\*\*When\*\*\s+(.+?),\s+\*\*Then\*\*\s+(.+?)(?=\n\n|\n\d+\.|\n###|$)"
|
|
264
|
+
acceptances = re.finditer(acceptance_pattern, story_content, re.DOTALL)
|
|
265
|
+
|
|
266
|
+
acceptance_criteria = []
|
|
267
|
+
for acc_match in acceptances:
|
|
268
|
+
given = acc_match.group(2).strip()
|
|
269
|
+
when = acc_match.group(3).strip()
|
|
270
|
+
then = acc_match.group(4).strip()
|
|
271
|
+
acceptance_criteria.append(f"Given {given}, When {when}, Then {then}")
|
|
272
|
+
|
|
273
|
+
# Extract scenarios (Primary, Alternate, Exception, Recovery)
|
|
274
|
+
scenarios = {
|
|
275
|
+
"primary": [],
|
|
276
|
+
"alternate": [],
|
|
277
|
+
"exception": [],
|
|
278
|
+
"recovery": [],
|
|
279
|
+
}
|
|
280
|
+
scenarios_section = re.search(r"\*\*Scenarios:\*\*\s*\n(.*?)(?=\n\n|\*\*|$)", story_content, re.DOTALL)
|
|
281
|
+
if scenarios_section:
|
|
282
|
+
scenarios_text = scenarios_section.group(1)
|
|
283
|
+
# Extract Primary scenarios
|
|
284
|
+
primary_matches = re.finditer(
|
|
285
|
+
r"- \*\*Primary Scenario\*\*:\s*(.+?)(?=\n-|\n|$)", scenarios_text, re.DOTALL
|
|
286
|
+
)
|
|
287
|
+
for match in primary_matches:
|
|
288
|
+
scenarios["primary"].append(match.group(1).strip())
|
|
289
|
+
# Extract Alternate scenarios
|
|
290
|
+
alternate_matches = re.finditer(
|
|
291
|
+
r"- \*\*Alternate Scenario\*\*:\s*(.+?)(?=\n-|\n|$)", scenarios_text, re.DOTALL
|
|
292
|
+
)
|
|
293
|
+
for match in alternate_matches:
|
|
294
|
+
scenarios["alternate"].append(match.group(1).strip())
|
|
295
|
+
# Extract Exception scenarios
|
|
296
|
+
exception_matches = re.finditer(
|
|
297
|
+
r"- \*\*Exception Scenario\*\*:\s*(.+?)(?=\n-|\n|$)", scenarios_text, re.DOTALL
|
|
298
|
+
)
|
|
299
|
+
for match in exception_matches:
|
|
300
|
+
scenarios["exception"].append(match.group(1).strip())
|
|
301
|
+
# Extract Recovery scenarios
|
|
302
|
+
recovery_matches = re.finditer(
|
|
303
|
+
r"- \*\*Recovery Scenario\*\*:\s*(.+?)(?=\n-|\n|$)", scenarios_text, re.DOTALL
|
|
304
|
+
)
|
|
305
|
+
for match in recovery_matches:
|
|
306
|
+
scenarios["recovery"].append(match.group(1).strip())
|
|
307
|
+
|
|
308
|
+
story_key = f"STORY-{story_counter:03d}"
|
|
309
|
+
spec_data["stories"].append(
|
|
310
|
+
{
|
|
311
|
+
"key": story_key,
|
|
312
|
+
"number": story_number,
|
|
313
|
+
"title": story_title,
|
|
314
|
+
"priority": priority,
|
|
315
|
+
"as_a": as_a_text,
|
|
316
|
+
"why_priority": why_priority,
|
|
317
|
+
"invsest": invsest_criteria,
|
|
318
|
+
"acceptance": acceptance_criteria,
|
|
319
|
+
"scenarios": scenarios,
|
|
320
|
+
}
|
|
321
|
+
)
|
|
322
|
+
story_counter += 1
|
|
323
|
+
|
|
324
|
+
# Extract functional requirements (FR-XXX)
|
|
325
|
+
req_pattern = r"-?\s*\*\*FR-(\d+)\*\*:\s*System MUST\s+(.+?)(?=\n-|\n\*|\n\n|\*\*FR-|$)"
|
|
326
|
+
requirements = re.finditer(req_pattern, content, re.MULTILINE | re.DOTALL)
|
|
327
|
+
|
|
328
|
+
for req_match in requirements:
|
|
329
|
+
req_id = req_match.group(1)
|
|
330
|
+
req_text = req_match.group(2).strip()
|
|
331
|
+
spec_data["requirements"].append(
|
|
332
|
+
{
|
|
333
|
+
"id": f"FR-{req_id}",
|
|
334
|
+
"text": req_text,
|
|
335
|
+
}
|
|
336
|
+
)
|
|
337
|
+
|
|
338
|
+
# Extract success criteria (SC-XXX)
|
|
339
|
+
sc_pattern = r"-?\s*\*\*SC-(\d+)\*\*:\s*(.+?)(?=\n-|\n\*|\n\n|\*\*SC-|$)"
|
|
340
|
+
success_criteria = re.finditer(sc_pattern, content, re.MULTILINE | re.DOTALL)
|
|
341
|
+
|
|
342
|
+
for sc_match in success_criteria:
|
|
343
|
+
sc_id = sc_match.group(1)
|
|
344
|
+
sc_text = sc_match.group(2).strip()
|
|
345
|
+
spec_data["success_criteria"].append(
|
|
346
|
+
{
|
|
347
|
+
"id": f"SC-{sc_id}",
|
|
348
|
+
"text": sc_text,
|
|
349
|
+
}
|
|
350
|
+
)
|
|
351
|
+
|
|
352
|
+
# Extract edge cases section
|
|
353
|
+
edge_case_section = re.search(r"### Edge Cases\n(.*?)(?=\n##|$)", content, re.MULTILINE | re.DOTALL)
|
|
354
|
+
if edge_case_section:
|
|
355
|
+
edge_case_text = edge_case_section.group(1)
|
|
356
|
+
# Extract individual edge cases (lines starting with -)
|
|
357
|
+
edge_case_pattern = r"- (.+?)(?=\n-|\n|$)"
|
|
358
|
+
edge_cases = re.finditer(edge_case_pattern, edge_case_text, re.MULTILINE)
|
|
359
|
+
for ec_match in edge_cases:
|
|
360
|
+
ec_text = ec_match.group(1).strip()
|
|
361
|
+
if ec_text:
|
|
362
|
+
spec_data["edge_cases"].append(ec_text)
|
|
363
|
+
|
|
364
|
+
return spec_data
|
|
365
|
+
|
|
366
|
+
except Exception as e:
|
|
367
|
+
raise ValueError(f"Failed to parse spec.md: {e}") from e
|
|
368
|
+
|
|
369
|
+
@beartype
|
|
370
|
+
@require(lambda plan_file: plan_file is not None, "Plan file path must not be None")
|
|
371
|
+
@require(lambda plan_file: plan_file.suffix == ".md", "Plan file must be markdown")
|
|
372
|
+
@ensure(
|
|
373
|
+
lambda result: result is None or (isinstance(result, dict) and "dependencies" in result),
|
|
374
|
+
"Must return None or dict with dependencies",
|
|
375
|
+
)
|
|
376
|
+
def parse_plan_markdown(self, plan_file: Path) -> dict[str, Any] | None:
|
|
377
|
+
"""
|
|
378
|
+
Parse a Spec-Kit plan.md file to extract technical context and architecture.
|
|
379
|
+
|
|
380
|
+
Args:
|
|
381
|
+
plan_file: Path to plan.md file
|
|
382
|
+
|
|
383
|
+
Returns:
|
|
384
|
+
Dictionary with extracted plan information, or None if file doesn't exist
|
|
385
|
+
"""
|
|
386
|
+
if not plan_file.exists():
|
|
387
|
+
return None
|
|
388
|
+
|
|
389
|
+
try:
|
|
390
|
+
content = plan_file.read_text(encoding="utf-8")
|
|
391
|
+
plan_data: dict[str, Any] = {
|
|
392
|
+
"summary": None,
|
|
393
|
+
"language_version": None,
|
|
394
|
+
"dependencies": [],
|
|
395
|
+
"technology_stack": [],
|
|
396
|
+
"constraints": [],
|
|
397
|
+
"unknowns": [],
|
|
398
|
+
"constitution_check": {},
|
|
399
|
+
"phases": [],
|
|
400
|
+
"architecture": {},
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
# Extract summary
|
|
404
|
+
summary_match = re.search(r"^## Summary\n(.*?)(?=\n##|$)", content, re.MULTILINE | re.DOTALL)
|
|
405
|
+
if summary_match:
|
|
406
|
+
plan_data["summary"] = summary_match.group(1).strip()
|
|
407
|
+
|
|
408
|
+
# Extract technical context
|
|
409
|
+
tech_context_match = re.search(r"^## Technical Context\n(.*?)(?=\n##|$)", content, re.MULTILINE | re.DOTALL)
|
|
410
|
+
if tech_context_match:
|
|
411
|
+
tech_context = tech_context_match.group(1)
|
|
412
|
+
# Extract language/version
|
|
413
|
+
lang_match = re.search(r"\*\*Language/Version\*\*:\s*(.+?)(?=\n|$)", tech_context, re.MULTILINE)
|
|
414
|
+
if lang_match:
|
|
415
|
+
plan_data["language_version"] = lang_match.group(1).strip()
|
|
416
|
+
|
|
417
|
+
# Extract dependencies
|
|
418
|
+
deps_match = re.search(
|
|
419
|
+
r"\*\*Primary Dependencies\*\*:\s*\n(.*?)(?=\n\*\*|$)", tech_context, re.MULTILINE | re.DOTALL
|
|
420
|
+
)
|
|
421
|
+
if deps_match:
|
|
422
|
+
deps_text = deps_match.group(1)
|
|
423
|
+
# Extract list items
|
|
424
|
+
dep_items = re.finditer(r"- `(.+?)`\s*-?\s*(.+?)(?=\n-|\n|$)", deps_text, re.MULTILINE)
|
|
425
|
+
for dep_match in dep_items:
|
|
426
|
+
dep_name = dep_match.group(1).strip()
|
|
427
|
+
dep_desc = dep_match.group(2).strip() if dep_match.group(2) else ""
|
|
428
|
+
plan_data["dependencies"].append({"name": dep_name, "description": dep_desc})
|
|
429
|
+
|
|
430
|
+
# Extract Technology Stack
|
|
431
|
+
stack_match = re.search(
|
|
432
|
+
r"\*\*Technology Stack\*\*:\s*\n(.*?)(?=\n\*\*|$)", tech_context, re.MULTILINE | re.DOTALL
|
|
433
|
+
)
|
|
434
|
+
if stack_match:
|
|
435
|
+
stack_text = stack_match.group(1)
|
|
436
|
+
stack_items = re.finditer(r"- (.+?)(?=\n-|\n|$)", stack_text, re.MULTILINE)
|
|
437
|
+
for item_match in stack_items:
|
|
438
|
+
plan_data["technology_stack"].append(item_match.group(1).strip())
|
|
439
|
+
|
|
440
|
+
# Extract Constraints
|
|
441
|
+
constraints_match = re.search(
|
|
442
|
+
r"\*\*Constraints\*\*:\s*\n(.*?)(?=\n\*\*|$)", tech_context, re.MULTILINE | re.DOTALL
|
|
443
|
+
)
|
|
444
|
+
if constraints_match:
|
|
445
|
+
constraints_text = constraints_match.group(1)
|
|
446
|
+
constraint_items = re.finditer(r"- (.+?)(?=\n-|\n|$)", constraints_text, re.MULTILINE)
|
|
447
|
+
for item_match in constraint_items:
|
|
448
|
+
plan_data["constraints"].append(item_match.group(1).strip())
|
|
449
|
+
|
|
450
|
+
# Extract Unknowns
|
|
451
|
+
unknowns_match = re.search(
|
|
452
|
+
r"\*\*Unknowns\*\*:\s*\n(.*?)(?=\n\*\*|$)", tech_context, re.MULTILINE | re.DOTALL
|
|
453
|
+
)
|
|
454
|
+
if unknowns_match:
|
|
455
|
+
unknowns_text = unknowns_match.group(1)
|
|
456
|
+
unknown_items = re.finditer(r"- (.+?)(?=\n-|\n|$)", unknowns_text, re.MULTILINE)
|
|
457
|
+
for item_match in unknown_items:
|
|
458
|
+
plan_data["unknowns"].append(item_match.group(1).strip())
|
|
459
|
+
|
|
460
|
+
# Extract Constitution Check section (CRITICAL for /speckit.analyze)
|
|
461
|
+
constitution_match = re.search(
|
|
462
|
+
r"^## Constitution Check\n(.*?)(?=\n##|$)", content, re.MULTILINE | re.DOTALL
|
|
463
|
+
)
|
|
464
|
+
if constitution_match:
|
|
465
|
+
constitution_text = constitution_match.group(1)
|
|
466
|
+
plan_data["constitution_check"] = {
|
|
467
|
+
"article_vii": {},
|
|
468
|
+
"article_viii": {},
|
|
469
|
+
"article_ix": {},
|
|
470
|
+
"status": None,
|
|
471
|
+
}
|
|
472
|
+
# Extract Article VII (Simplicity)
|
|
473
|
+
article_vii_match = re.search(
|
|
474
|
+
r"\*\*Article VII \(Simplicity\)\*\*:\s*\n(.*?)(?=\n\*\*|$)",
|
|
475
|
+
constitution_text,
|
|
476
|
+
re.MULTILINE | re.DOTALL,
|
|
477
|
+
)
|
|
478
|
+
if article_vii_match:
|
|
479
|
+
article_vii_text = article_vii_match.group(1)
|
|
480
|
+
plan_data["constitution_check"]["article_vii"] = {
|
|
481
|
+
"using_3_projects": re.search(r"- \[([ x])\]", article_vii_text) is not None,
|
|
482
|
+
"no_future_proofing": re.search(r"- \[([ x])\]", article_vii_text) is not None,
|
|
483
|
+
}
|
|
484
|
+
# Extract Article VIII (Anti-Abstraction)
|
|
485
|
+
article_viii_match = re.search(
|
|
486
|
+
r"\*\*Article VIII \(Anti-Abstraction\)\*\*:\s*\n(.*?)(?=\n\*\*|$)",
|
|
487
|
+
constitution_text,
|
|
488
|
+
re.MULTILINE | re.DOTALL,
|
|
489
|
+
)
|
|
490
|
+
if article_viii_match:
|
|
491
|
+
article_viii_text = article_viii_match.group(1)
|
|
492
|
+
plan_data["constitution_check"]["article_viii"] = {
|
|
493
|
+
"using_framework_directly": re.search(r"- \[([ x])\]", article_viii_text) is not None,
|
|
494
|
+
"single_model_representation": re.search(r"- \[([ x])\]", article_viii_text) is not None,
|
|
495
|
+
}
|
|
496
|
+
# Extract Article IX (Integration-First)
|
|
497
|
+
article_ix_match = re.search(
|
|
498
|
+
r"\*\*Article IX \(Integration-First\)\*\*:\s*\n(.*?)(?=\n\*\*|$)",
|
|
499
|
+
constitution_text,
|
|
500
|
+
re.MULTILINE | re.DOTALL,
|
|
501
|
+
)
|
|
502
|
+
if article_ix_match:
|
|
503
|
+
article_ix_text = article_ix_match.group(1)
|
|
504
|
+
plan_data["constitution_check"]["article_ix"] = {
|
|
505
|
+
"contracts_defined": re.search(r"- \[([ x])\]", article_ix_text) is not None,
|
|
506
|
+
"contract_tests_written": re.search(r"- \[([ x])\]", article_ix_text) is not None,
|
|
507
|
+
}
|
|
508
|
+
# Extract Status
|
|
509
|
+
status_match = re.search(r"\*\*Status\*\*:\s*(PASS|FAIL)", constitution_text, re.IGNORECASE)
|
|
510
|
+
if status_match:
|
|
511
|
+
plan_data["constitution_check"]["status"] = status_match.group(1).upper()
|
|
512
|
+
|
|
513
|
+
# Extract Phases
|
|
514
|
+
phase_pattern = r"^## Phase (-?\d+):\s*(.+?)\n(.*?)(?=\n## Phase|$)"
|
|
515
|
+
phases = re.finditer(phase_pattern, content, re.MULTILINE | re.DOTALL)
|
|
516
|
+
for phase_match in phases:
|
|
517
|
+
phase_num = phase_match.group(1)
|
|
518
|
+
phase_name = phase_match.group(2).strip()
|
|
519
|
+
phase_content = phase_match.group(3).strip()
|
|
520
|
+
plan_data["phases"].append(
|
|
521
|
+
{
|
|
522
|
+
"number": phase_num,
|
|
523
|
+
"name": phase_name,
|
|
524
|
+
"content": phase_content,
|
|
525
|
+
}
|
|
526
|
+
)
|
|
527
|
+
|
|
528
|
+
return plan_data
|
|
529
|
+
|
|
530
|
+
except Exception as e:
|
|
531
|
+
raise ValueError(f"Failed to parse plan.md: {e}") from e
|
|
532
|
+
|
|
533
|
+
@beartype
|
|
534
|
+
@require(lambda tasks_file: tasks_file is not None, "Tasks file path must not be None")
|
|
535
|
+
@require(lambda tasks_file: tasks_file.suffix == ".md", "Tasks file must be markdown")
|
|
536
|
+
@ensure(
|
|
537
|
+
lambda result: result is None or (isinstance(result, dict) and "tasks" in result),
|
|
538
|
+
"Must return None or dict with tasks",
|
|
539
|
+
)
|
|
540
|
+
def parse_tasks_markdown(self, tasks_file: Path) -> dict[str, Any] | None:
|
|
541
|
+
"""
|
|
542
|
+
Parse a Spec-Kit tasks.md file to extract tasks with IDs, story mappings, and dependencies.
|
|
543
|
+
|
|
544
|
+
Args:
|
|
545
|
+
tasks_file: Path to tasks.md file
|
|
546
|
+
|
|
547
|
+
Returns:
|
|
548
|
+
Dictionary with extracted task information, or None if file doesn't exist
|
|
549
|
+
"""
|
|
550
|
+
if not tasks_file.exists():
|
|
551
|
+
return None
|
|
552
|
+
|
|
553
|
+
try:
|
|
554
|
+
content = tasks_file.read_text(encoding="utf-8")
|
|
555
|
+
tasks_data: dict[str, Any] = {
|
|
556
|
+
"tasks": [],
|
|
557
|
+
"phases": [],
|
|
558
|
+
}
|
|
559
|
+
|
|
560
|
+
# Extract tasks (format: - [ ] [TaskID] [P?] [Story?] Description)
|
|
561
|
+
task_pattern = r"- \[([ x])\] \[?([T\d]+)\]?\s*\[?([P])?\]?\s*\[?([US\d]+)?\]?\s*(.+?)(?=\n-|\n##|$)"
|
|
562
|
+
tasks = re.finditer(task_pattern, content, re.MULTILINE | re.DOTALL)
|
|
563
|
+
|
|
564
|
+
for task_match in tasks:
|
|
565
|
+
checked = task_match.group(1) == "x"
|
|
566
|
+
task_id = task_match.group(2)
|
|
567
|
+
is_parallel = task_match.group(3) == "P"
|
|
568
|
+
story_ref = task_match.group(4)
|
|
569
|
+
description = task_match.group(5).strip()
|
|
570
|
+
|
|
571
|
+
tasks_data["tasks"].append(
|
|
572
|
+
{
|
|
573
|
+
"id": task_id,
|
|
574
|
+
"description": description,
|
|
575
|
+
"checked": checked,
|
|
576
|
+
"parallel": is_parallel,
|
|
577
|
+
"story_ref": story_ref,
|
|
578
|
+
}
|
|
579
|
+
)
|
|
580
|
+
|
|
581
|
+
# Extract phase sections and map tasks to phases
|
|
582
|
+
phase_pattern = r"^## Phase (\d+): (.+?)\n(.*?)(?=\n## Phase|$)"
|
|
583
|
+
phases = re.finditer(phase_pattern, content, re.MULTILINE | re.DOTALL)
|
|
584
|
+
|
|
585
|
+
for phase_match in phases:
|
|
586
|
+
phase_num = phase_match.group(1)
|
|
587
|
+
phase_name = phase_match.group(2).strip()
|
|
588
|
+
phase_content = phase_match.group(3)
|
|
589
|
+
|
|
590
|
+
# Find tasks in this phase
|
|
591
|
+
phase_tasks = []
|
|
592
|
+
phase_task_pattern = (
|
|
593
|
+
r"- \[([ x])\] \[?([T\d]+)\]?\s*\[?([P])?\]?\s*\[?([US\d]+)?\]?\s*(.+?)(?=\n-|\n##|$)"
|
|
594
|
+
)
|
|
595
|
+
phase_task_matches = re.finditer(phase_task_pattern, phase_content, re.MULTILINE | re.DOTALL)
|
|
596
|
+
|
|
597
|
+
for task_match in phase_task_matches:
|
|
598
|
+
checked = task_match.group(1) == "x"
|
|
599
|
+
task_id = task_match.group(2)
|
|
600
|
+
is_parallel = task_match.group(3) == "P"
|
|
601
|
+
story_ref = task_match.group(4)
|
|
602
|
+
description = task_match.group(5).strip()
|
|
603
|
+
|
|
604
|
+
phase_tasks.append(
|
|
605
|
+
{
|
|
606
|
+
"id": task_id,
|
|
607
|
+
"description": description,
|
|
608
|
+
"checked": checked,
|
|
609
|
+
"parallel": is_parallel,
|
|
610
|
+
"story_ref": story_ref,
|
|
611
|
+
"phase": phase_num,
|
|
612
|
+
"phase_name": phase_name,
|
|
613
|
+
}
|
|
614
|
+
)
|
|
615
|
+
|
|
616
|
+
tasks_data["phases"].append(
|
|
617
|
+
{
|
|
618
|
+
"number": phase_num,
|
|
619
|
+
"name": phase_name,
|
|
620
|
+
"content": phase_content,
|
|
621
|
+
"tasks": phase_tasks,
|
|
622
|
+
}
|
|
623
|
+
)
|
|
624
|
+
|
|
625
|
+
return tasks_data
|
|
626
|
+
|
|
627
|
+
except Exception as e:
|
|
628
|
+
raise ValueError(f"Failed to parse tasks.md: {e}") from e
|
|
629
|
+
|
|
630
|
+
def parse_memory_files(self, memory_dir: Path) -> dict[str, Any]:
|
|
631
|
+
"""
|
|
632
|
+
Parse Spec-Kit memory files (constitution.md, etc.).
|
|
633
|
+
|
|
634
|
+
Args:
|
|
635
|
+
memory_dir: Path to memory directory
|
|
636
|
+
|
|
637
|
+
Returns:
|
|
638
|
+
Dictionary with extracted memory information
|
|
639
|
+
"""
|
|
640
|
+
memory_data: dict[str, Any] = {
|
|
641
|
+
"constitution": None,
|
|
642
|
+
"principles": [],
|
|
643
|
+
"constraints": [],
|
|
644
|
+
"version": None,
|
|
645
|
+
}
|
|
646
|
+
|
|
647
|
+
if not memory_dir.exists():
|
|
648
|
+
return memory_data
|
|
649
|
+
|
|
650
|
+
# Parse constitution.md
|
|
651
|
+
constitution_file = memory_dir / "constitution.md"
|
|
652
|
+
if constitution_file.exists():
|
|
653
|
+
try:
|
|
654
|
+
content = constitution_file.read_text(encoding="utf-8")
|
|
655
|
+
memory_data["constitution"] = content
|
|
656
|
+
|
|
657
|
+
# Extract version
|
|
658
|
+
version_match = re.search(r"\*\*Version\*\*:\s*(\d+\.\d+\.\d+)", content, re.MULTILINE)
|
|
659
|
+
if version_match:
|
|
660
|
+
memory_data["version"] = version_match.group(1)
|
|
661
|
+
|
|
662
|
+
# Extract principles (from "### I. Principle Name" or "### Principle Name" sections)
|
|
663
|
+
principle_pattern = r"###\s+(?:[IVX]+\.\s*)?(.+?)(?:\s*\(NON-NEGOTIABLE\))?\n\n(.*?)(?=\n###|\n##|$)"
|
|
664
|
+
principles = re.finditer(principle_pattern, content, re.MULTILINE | re.DOTALL)
|
|
665
|
+
|
|
666
|
+
for prin_match in principles:
|
|
667
|
+
principle_name = prin_match.group(1).strip()
|
|
668
|
+
principle_content = prin_match.group(2).strip() if prin_match.group(2) else ""
|
|
669
|
+
# Skip placeholder principles
|
|
670
|
+
if not principle_name.startswith("["):
|
|
671
|
+
# Extract rationale if present
|
|
672
|
+
rationale_match = re.search(
|
|
673
|
+
r"\*\*Rationale\*\*:\s*(.+?)(?=\n\n|\n###|\n##|$)", principle_content, re.DOTALL
|
|
674
|
+
)
|
|
675
|
+
rationale = rationale_match.group(1).strip() if rationale_match else ""
|
|
676
|
+
|
|
677
|
+
memory_data["principles"].append(
|
|
678
|
+
{
|
|
679
|
+
"name": principle_name,
|
|
680
|
+
"description": (
|
|
681
|
+
principle_content.split("**Rationale**")[0].strip()
|
|
682
|
+
if "**Rationale**" in principle_content
|
|
683
|
+
else principle_content
|
|
684
|
+
),
|
|
685
|
+
"rationale": rationale,
|
|
686
|
+
}
|
|
687
|
+
)
|
|
688
|
+
|
|
689
|
+
# Extract constraints from Governance section
|
|
690
|
+
governance_section = re.search(r"## Governance\n(.*?)(?=\n##|$)", content, re.MULTILINE | re.DOTALL)
|
|
691
|
+
if governance_section:
|
|
692
|
+
# Look for constraint patterns
|
|
693
|
+
constraint_pattern = r"- (.+?)(?=\n-|\n|$)"
|
|
694
|
+
constraints = re.finditer(constraint_pattern, governance_section.group(1), re.MULTILINE)
|
|
695
|
+
for const_match in constraints:
|
|
696
|
+
const_text = const_match.group(1).strip()
|
|
697
|
+
if const_text and not const_text.startswith("["):
|
|
698
|
+
memory_data["constraints"].append(const_text)
|
|
699
|
+
|
|
700
|
+
except Exception:
|
|
701
|
+
# Non-fatal error - log but continue
|
|
702
|
+
pass
|
|
703
|
+
|
|
704
|
+
return memory_data
|