specfact-cli 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of specfact-cli might be problematic. Click here for more details.
- specfact_cli/__init__.py +14 -0
- specfact_cli/agents/__init__.py +23 -0
- specfact_cli/agents/analyze_agent.py +392 -0
- specfact_cli/agents/base.py +95 -0
- specfact_cli/agents/plan_agent.py +202 -0
- specfact_cli/agents/registry.py +176 -0
- specfact_cli/agents/sync_agent.py +133 -0
- specfact_cli/analyzers/__init__.py +10 -0
- specfact_cli/analyzers/code_analyzer.py +775 -0
- specfact_cli/cli.py +397 -0
- specfact_cli/commands/__init__.py +7 -0
- specfact_cli/commands/enforce.py +87 -0
- specfact_cli/commands/import_cmd.py +355 -0
- specfact_cli/commands/init.py +119 -0
- specfact_cli/commands/plan.py +1090 -0
- specfact_cli/commands/repro.py +172 -0
- specfact_cli/commands/sync.py +408 -0
- specfact_cli/common/__init__.py +24 -0
- specfact_cli/common/logger_setup.py +673 -0
- specfact_cli/common/logging_utils.py +41 -0
- specfact_cli/common/text_utils.py +52 -0
- specfact_cli/common/utils.py +48 -0
- specfact_cli/comparators/__init__.py +10 -0
- specfact_cli/comparators/plan_comparator.py +391 -0
- specfact_cli/generators/__init__.py +13 -0
- specfact_cli/generators/plan_generator.py +105 -0
- specfact_cli/generators/protocol_generator.py +115 -0
- specfact_cli/generators/report_generator.py +200 -0
- specfact_cli/generators/workflow_generator.py +111 -0
- specfact_cli/importers/__init__.py +6 -0
- specfact_cli/importers/speckit_converter.py +773 -0
- specfact_cli/importers/speckit_scanner.py +704 -0
- specfact_cli/models/__init__.py +32 -0
- specfact_cli/models/deviation.py +105 -0
- specfact_cli/models/enforcement.py +150 -0
- specfact_cli/models/plan.py +97 -0
- specfact_cli/models/protocol.py +28 -0
- specfact_cli/modes/__init__.py +18 -0
- specfact_cli/modes/detector.py +126 -0
- specfact_cli/modes/router.py +153 -0
- specfact_cli/sync/__init__.py +11 -0
- specfact_cli/sync/repository_sync.py +279 -0
- specfact_cli/sync/speckit_sync.py +388 -0
- specfact_cli/utils/__init__.py +57 -0
- specfact_cli/utils/console.py +69 -0
- specfact_cli/utils/feature_keys.py +213 -0
- specfact_cli/utils/git.py +241 -0
- specfact_cli/utils/ide_setup.py +381 -0
- specfact_cli/utils/prompts.py +179 -0
- specfact_cli/utils/structure.py +496 -0
- specfact_cli/utils/yaml_utils.py +200 -0
- specfact_cli/validators/__init__.py +19 -0
- specfact_cli/validators/fsm.py +260 -0
- specfact_cli/validators/repro_checker.py +320 -0
- specfact_cli/validators/schema.py +200 -0
- specfact_cli-0.4.0.dist-info/METADATA +332 -0
- specfact_cli-0.4.0.dist-info/RECORD +60 -0
- specfact_cli-0.4.0.dist-info/WHEEL +4 -0
- specfact_cli-0.4.0.dist-info/entry_points.txt +2 -0
- specfact_cli-0.4.0.dist-info/licenses/LICENSE.md +55 -0
|
@@ -0,0 +1,773 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Spec-Kit to SpecFact converter.
|
|
3
|
+
|
|
4
|
+
This module converts Spec-Kit markdown artifacts (spec.md, plan.md, tasks.md, constitution.md)
|
|
5
|
+
to SpecFact format (plans, protocols).
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
from beartype import beartype
|
|
14
|
+
from icontract import ensure, require
|
|
15
|
+
|
|
16
|
+
from specfact_cli.generators.plan_generator import PlanGenerator
|
|
17
|
+
from specfact_cli.generators.protocol_generator import ProtocolGenerator
|
|
18
|
+
from specfact_cli.generators.workflow_generator import WorkflowGenerator
|
|
19
|
+
from specfact_cli.importers.speckit_scanner import SpecKitScanner
|
|
20
|
+
from specfact_cli.models.plan import Feature, Idea, PlanBundle, Product, Release, Story
|
|
21
|
+
from specfact_cli.models.protocol import Protocol
|
|
22
|
+
from specfact_cli.utils.structure import SpecFactStructure
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class SpecKitConverter:
|
|
26
|
+
"""
|
|
27
|
+
Converter from Spec-Kit format to SpecFact format.
|
|
28
|
+
|
|
29
|
+
Converts markdown artifacts (spec.md, plan.md, tasks.md, constitution.md) → plan bundles.
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
@beartype
|
|
33
|
+
def __init__(self, repo_path: Path, mapping_file: Path | None = None) -> None:
|
|
34
|
+
"""
|
|
35
|
+
Initialize Spec-Kit converter.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
repo_path: Path to Spec-Kit repository
|
|
39
|
+
mapping_file: Optional custom mapping file (default: built-in)
|
|
40
|
+
"""
|
|
41
|
+
self.repo_path = Path(repo_path)
|
|
42
|
+
self.scanner = SpecKitScanner(repo_path)
|
|
43
|
+
self.protocol_generator = ProtocolGenerator()
|
|
44
|
+
self.plan_generator = PlanGenerator()
|
|
45
|
+
self.workflow_generator = WorkflowGenerator()
|
|
46
|
+
self.mapping_file = mapping_file
|
|
47
|
+
|
|
48
|
+
@beartype
|
|
49
|
+
@ensure(lambda result: isinstance(result, Protocol), "Must return Protocol")
|
|
50
|
+
@ensure(lambda result: len(result.states) >= 2, "Must have at least INIT and COMPLETE states")
|
|
51
|
+
def convert_protocol(self, output_path: Path | None = None) -> Protocol:
|
|
52
|
+
"""
|
|
53
|
+
Convert Spec-Kit features to SpecFact protocol.
|
|
54
|
+
|
|
55
|
+
Creates a minimal protocol from feature states.
|
|
56
|
+
Since Spec-Kit markdown artifacts don't explicitly define FSM protocols,
|
|
57
|
+
this generates a simple protocol based on feature workflow.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
output_path: Optional path to write protocol.yaml (default: .specfact/protocols/workflow.protocol.yaml)
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
Generated Protocol model
|
|
64
|
+
"""
|
|
65
|
+
# For markdown-based Spec-Kit, create a minimal protocol
|
|
66
|
+
# States based on feature workflow: INIT -> FEATURE_1 -> ... -> COMPLETE
|
|
67
|
+
features = self.scanner.discover_features()
|
|
68
|
+
|
|
69
|
+
if not features:
|
|
70
|
+
# Default minimal protocol if no features found
|
|
71
|
+
states = ["INIT", "COMPLETE"]
|
|
72
|
+
else:
|
|
73
|
+
states = ["INIT"]
|
|
74
|
+
for feature in features:
|
|
75
|
+
feature_key = feature.get("feature_key", "UNKNOWN")
|
|
76
|
+
states.append(feature_key)
|
|
77
|
+
states.append("COMPLETE")
|
|
78
|
+
|
|
79
|
+
protocol = Protocol(
|
|
80
|
+
states=states,
|
|
81
|
+
start="INIT",
|
|
82
|
+
transitions=[],
|
|
83
|
+
guards={},
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
# Write to file if output path provided
|
|
87
|
+
if output_path:
|
|
88
|
+
SpecFactStructure.ensure_structure(output_path.parent)
|
|
89
|
+
self.protocol_generator.generate(protocol, output_path)
|
|
90
|
+
else:
|
|
91
|
+
# Use default path - construct .specfact/protocols/workflow.protocol.yaml
|
|
92
|
+
output_path = self.repo_path / ".specfact" / "protocols" / "workflow.protocol.yaml"
|
|
93
|
+
SpecFactStructure.ensure_structure(self.repo_path)
|
|
94
|
+
self.protocol_generator.generate(protocol, output_path)
|
|
95
|
+
|
|
96
|
+
return protocol
|
|
97
|
+
|
|
98
|
+
@beartype
|
|
99
|
+
@ensure(lambda result: isinstance(result, PlanBundle), "Must return PlanBundle")
|
|
100
|
+
@ensure(lambda result: result.version == "1.0", "Must have version 1.0")
|
|
101
|
+
def convert_plan(self, output_path: Path | None = None) -> PlanBundle:
|
|
102
|
+
"""
|
|
103
|
+
Convert Spec-Kit markdown artifacts to SpecFact plan bundle.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
output_path: Optional path to write plan bundle (default: .specfact/plans/main.bundle.yaml)
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
Generated PlanBundle model
|
|
110
|
+
"""
|
|
111
|
+
# Discover features from markdown artifacts
|
|
112
|
+
discovered_features = self.scanner.discover_features()
|
|
113
|
+
|
|
114
|
+
# Extract features from markdown data
|
|
115
|
+
features = self._extract_features_from_markdown(discovered_features)
|
|
116
|
+
|
|
117
|
+
# Parse constitution for constraints
|
|
118
|
+
structure = self.scanner.scan_structure()
|
|
119
|
+
memory_dir = Path(structure.get("specify_memory_dir", "")) if structure.get("specify_memory_dir") else None
|
|
120
|
+
constraints: list[str] = []
|
|
121
|
+
if memory_dir and Path(memory_dir).exists():
|
|
122
|
+
memory_data = self.scanner.parse_memory_files(Path(memory_dir))
|
|
123
|
+
constraints = memory_data.get("constraints", [])
|
|
124
|
+
|
|
125
|
+
# Create idea from repository
|
|
126
|
+
repo_name = self.repo_path.name or "Imported Project"
|
|
127
|
+
idea = Idea(
|
|
128
|
+
title=self._humanize_name(repo_name),
|
|
129
|
+
narrative=f"Imported from Spec-Kit project: {repo_name}",
|
|
130
|
+
target_users=[],
|
|
131
|
+
value_hypothesis="",
|
|
132
|
+
constraints=constraints,
|
|
133
|
+
metrics=None,
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
# Create product with themes (extract from feature titles)
|
|
137
|
+
themes = self._extract_themes_from_features(features)
|
|
138
|
+
product = Product(
|
|
139
|
+
themes=themes,
|
|
140
|
+
releases=[
|
|
141
|
+
Release(
|
|
142
|
+
name="v0.1",
|
|
143
|
+
objectives=["Migrate from Spec-Kit"],
|
|
144
|
+
scope=[f.key for f in features],
|
|
145
|
+
risks=[],
|
|
146
|
+
)
|
|
147
|
+
],
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
# Create plan bundle
|
|
151
|
+
plan_bundle = PlanBundle(
|
|
152
|
+
version="1.0",
|
|
153
|
+
idea=idea,
|
|
154
|
+
business=None,
|
|
155
|
+
product=product,
|
|
156
|
+
features=features,
|
|
157
|
+
metadata=None,
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
# Write to file if output path provided
|
|
161
|
+
if output_path:
|
|
162
|
+
SpecFactStructure.ensure_structure(output_path.parent)
|
|
163
|
+
self.plan_generator.generate(plan_bundle, output_path)
|
|
164
|
+
else:
|
|
165
|
+
# Use default path - construct .specfact/plans/main.bundle.yaml
|
|
166
|
+
output_path = self.repo_path / ".specfact" / "plans" / "main.bundle.yaml"
|
|
167
|
+
SpecFactStructure.ensure_structure(self.repo_path)
|
|
168
|
+
self.plan_generator.generate(plan_bundle, output_path)
|
|
169
|
+
|
|
170
|
+
return plan_bundle
|
|
171
|
+
|
|
172
|
+
@beartype
|
|
173
|
+
@require(lambda discovered_features: isinstance(discovered_features, list), "Must be list")
|
|
174
|
+
@ensure(lambda result: isinstance(result, list), "Must return list")
|
|
175
|
+
@ensure(lambda result: all(isinstance(f, Feature) for f in result), "All items must be Features")
|
|
176
|
+
def _extract_features_from_markdown(self, discovered_features: list[dict[str, Any]]) -> list[Feature]:
|
|
177
|
+
"""Extract features from Spec-Kit markdown artifacts."""
|
|
178
|
+
features: list[Feature] = []
|
|
179
|
+
|
|
180
|
+
for feature_data in discovered_features:
|
|
181
|
+
feature_key = feature_data.get("feature_key", "UNKNOWN")
|
|
182
|
+
feature_title = feature_data.get("feature_title", "Unknown Feature")
|
|
183
|
+
|
|
184
|
+
# Extract stories from spec.md
|
|
185
|
+
stories = self._extract_stories_from_spec(feature_data)
|
|
186
|
+
|
|
187
|
+
# Extract outcomes from requirements
|
|
188
|
+
requirements = feature_data.get("requirements", [])
|
|
189
|
+
outcomes: list[str] = []
|
|
190
|
+
for req in requirements:
|
|
191
|
+
if isinstance(req, dict):
|
|
192
|
+
outcomes.append(req.get("text", ""))
|
|
193
|
+
elif isinstance(req, str):
|
|
194
|
+
outcomes.append(req)
|
|
195
|
+
|
|
196
|
+
# Extract acceptance criteria from success criteria
|
|
197
|
+
success_criteria = feature_data.get("success_criteria", [])
|
|
198
|
+
acceptance: list[str] = []
|
|
199
|
+
for sc in success_criteria:
|
|
200
|
+
if isinstance(sc, dict):
|
|
201
|
+
acceptance.append(sc.get("text", ""))
|
|
202
|
+
elif isinstance(sc, str):
|
|
203
|
+
acceptance.append(sc)
|
|
204
|
+
|
|
205
|
+
# Calculate confidence based on completeness
|
|
206
|
+
confidence = 0.5
|
|
207
|
+
if feature_title and feature_title != "Unknown Feature":
|
|
208
|
+
confidence += 0.2
|
|
209
|
+
if stories:
|
|
210
|
+
confidence += 0.2
|
|
211
|
+
if outcomes:
|
|
212
|
+
confidence += 0.1
|
|
213
|
+
|
|
214
|
+
feature = Feature(
|
|
215
|
+
key=feature_key,
|
|
216
|
+
title=feature_title,
|
|
217
|
+
outcomes=outcomes if outcomes else [f"Provides {feature_title} functionality"],
|
|
218
|
+
acceptance=acceptance if acceptance else [f"{feature_title} is functional"],
|
|
219
|
+
constraints=feature_data.get("edge_cases", []),
|
|
220
|
+
stories=stories,
|
|
221
|
+
confidence=min(confidence, 1.0),
|
|
222
|
+
draft=False,
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
features.append(feature)
|
|
226
|
+
|
|
227
|
+
return features
|
|
228
|
+
|
|
229
|
+
@beartype
|
|
230
|
+
@require(lambda feature_data: isinstance(feature_data, dict), "Must be dict")
|
|
231
|
+
@ensure(lambda result: isinstance(result, list), "Must return list")
|
|
232
|
+
@ensure(lambda result: all(isinstance(s, Story) for s in result), "All items must be Stories")
|
|
233
|
+
def _extract_stories_from_spec(self, feature_data: dict[str, Any]) -> list[Story]:
|
|
234
|
+
"""Extract user stories from Spec-Kit spec.md data."""
|
|
235
|
+
stories: list[Story] = []
|
|
236
|
+
spec_stories = feature_data.get("stories", [])
|
|
237
|
+
|
|
238
|
+
for story_data in spec_stories:
|
|
239
|
+
story_key = story_data.get("key", "UNKNOWN")
|
|
240
|
+
story_title = story_data.get("title", "Unknown Story")
|
|
241
|
+
priority = story_data.get("priority", "P3")
|
|
242
|
+
|
|
243
|
+
# Calculate story points from priority
|
|
244
|
+
priority_map = {"P1": 8, "P2": 5, "P3": 3, "P4": 1}
|
|
245
|
+
story_points = priority_map.get(priority, 3)
|
|
246
|
+
value_points = story_points # Use same value for simplicity
|
|
247
|
+
|
|
248
|
+
# Extract acceptance criteria
|
|
249
|
+
acceptance = story_data.get("acceptance", [])
|
|
250
|
+
|
|
251
|
+
# Extract tasks from tasks.md if available
|
|
252
|
+
tasks_data = feature_data.get("tasks", {})
|
|
253
|
+
tasks: list[str] = []
|
|
254
|
+
if tasks_data and "tasks" in tasks_data:
|
|
255
|
+
for task in tasks_data["tasks"]:
|
|
256
|
+
if isinstance(task, dict):
|
|
257
|
+
story_ref = task.get("story_ref", "")
|
|
258
|
+
# Match story reference to this story
|
|
259
|
+
if (story_ref and story_ref in story_key) or not story_ref:
|
|
260
|
+
tasks.append(task.get("description", ""))
|
|
261
|
+
|
|
262
|
+
story = Story(
|
|
263
|
+
key=story_key,
|
|
264
|
+
title=story_title,
|
|
265
|
+
acceptance=acceptance if acceptance else [f"{story_title} is implemented"],
|
|
266
|
+
tags=[priority],
|
|
267
|
+
story_points=story_points,
|
|
268
|
+
value_points=value_points,
|
|
269
|
+
tasks=tasks,
|
|
270
|
+
confidence=0.8, # High confidence from spec
|
|
271
|
+
draft=False,
|
|
272
|
+
)
|
|
273
|
+
stories.append(story)
|
|
274
|
+
|
|
275
|
+
return stories
|
|
276
|
+
|
|
277
|
+
@beartype
|
|
278
|
+
@require(lambda features: isinstance(features, list), "Must be list")
|
|
279
|
+
@require(lambda features: all(isinstance(f, Feature) for f in features), "All items must be Features")
|
|
280
|
+
@ensure(lambda result: isinstance(result, list), "Must return list")
|
|
281
|
+
@ensure(lambda result: all(isinstance(t, str) for t in result), "All items must be strings")
|
|
282
|
+
@ensure(lambda result: len(result) > 0, "Must have at least one theme")
|
|
283
|
+
def _extract_themes_from_features(self, features: list[Feature]) -> list[str]:
|
|
284
|
+
"""Extract themes from feature titles."""
|
|
285
|
+
themes: set[str] = set()
|
|
286
|
+
themes.add("Core")
|
|
287
|
+
|
|
288
|
+
for feature in features:
|
|
289
|
+
# Extract theme from feature title (first word or key pattern)
|
|
290
|
+
title = feature.title
|
|
291
|
+
if title:
|
|
292
|
+
# Try to extract meaningful theme from title
|
|
293
|
+
words = title.split()
|
|
294
|
+
if words:
|
|
295
|
+
# Use first significant word as theme
|
|
296
|
+
theme = words[0]
|
|
297
|
+
if len(theme) > 2:
|
|
298
|
+
themes.add(theme)
|
|
299
|
+
|
|
300
|
+
return sorted(list(themes))
|
|
301
|
+
|
|
302
|
+
@beartype
|
|
303
|
+
@ensure(lambda result: result.exists(), "Output path must exist")
|
|
304
|
+
@ensure(lambda result: result.suffix == ".yml", "Must be YAML file")
|
|
305
|
+
def generate_semgrep_rules(self, output_path: Path | None = None) -> Path:
|
|
306
|
+
"""
|
|
307
|
+
Generate Semgrep async rules for the repository.
|
|
308
|
+
|
|
309
|
+
Args:
|
|
310
|
+
output_path: Optional path to write Semgrep rules (default: .semgrep/async-anti-patterns.yml)
|
|
311
|
+
|
|
312
|
+
Returns:
|
|
313
|
+
Path to generated Semgrep rules file
|
|
314
|
+
"""
|
|
315
|
+
if output_path is None:
|
|
316
|
+
# Use default path
|
|
317
|
+
output_path = self.repo_path / ".semgrep" / "async-anti-patterns.yml"
|
|
318
|
+
|
|
319
|
+
self.workflow_generator.generate_semgrep_rules(output_path)
|
|
320
|
+
return output_path
|
|
321
|
+
|
|
322
|
+
@beartype
|
|
323
|
+
@require(lambda budget: budget > 0, "Budget must be positive")
|
|
324
|
+
@require(lambda python_version: python_version.startswith("3."), "Python version must be 3.x")
|
|
325
|
+
@ensure(lambda result: result.exists(), "Output path must exist")
|
|
326
|
+
@ensure(lambda result: result.suffix == ".yml", "Must be YAML file")
|
|
327
|
+
def generate_github_action(
|
|
328
|
+
self,
|
|
329
|
+
output_path: Path | None = None,
|
|
330
|
+
repo_name: str | None = None,
|
|
331
|
+
budget: int = 90,
|
|
332
|
+
python_version: str = "3.12",
|
|
333
|
+
) -> Path:
|
|
334
|
+
"""
|
|
335
|
+
Generate GitHub Action workflow for SpecFact validation.
|
|
336
|
+
|
|
337
|
+
Args:
|
|
338
|
+
output_path: Optional path to write workflow (default: .github/workflows/specfact-gate.yml)
|
|
339
|
+
repo_name: Repository name for context
|
|
340
|
+
budget: Time budget in seconds for validation (must be > 0)
|
|
341
|
+
python_version: Python version for workflow (must be 3.x)
|
|
342
|
+
|
|
343
|
+
Returns:
|
|
344
|
+
Path to generated GitHub Action workflow file
|
|
345
|
+
"""
|
|
346
|
+
if output_path is None:
|
|
347
|
+
# Use default path
|
|
348
|
+
output_path = self.repo_path / ".github" / "workflows" / "specfact-gate.yml"
|
|
349
|
+
|
|
350
|
+
if repo_name is None:
|
|
351
|
+
repo_name = self.repo_path.name or "specfact-project"
|
|
352
|
+
|
|
353
|
+
self.workflow_generator.generate_github_action(output_path, repo_name, budget, python_version)
|
|
354
|
+
return output_path
|
|
355
|
+
|
|
356
|
+
@beartype
|
|
357
|
+
@require(lambda plan_bundle: isinstance(plan_bundle, PlanBundle), "Must be PlanBundle instance")
|
|
358
|
+
@ensure(lambda result: isinstance(result, int), "Must return int (number of features converted)")
|
|
359
|
+
@ensure(lambda result: result >= 0, "Result must be non-negative")
|
|
360
|
+
def convert_to_speckit(self, plan_bundle: PlanBundle) -> int:
|
|
361
|
+
"""
|
|
362
|
+
Convert SpecFact plan bundle to Spec-Kit markdown artifacts.
|
|
363
|
+
|
|
364
|
+
Generates spec.md, plan.md, and tasks.md files for each feature in the plan bundle.
|
|
365
|
+
|
|
366
|
+
Args:
|
|
367
|
+
plan_bundle: SpecFact plan bundle to convert
|
|
368
|
+
|
|
369
|
+
Returns:
|
|
370
|
+
Number of features converted
|
|
371
|
+
"""
|
|
372
|
+
features_converted = 0
|
|
373
|
+
|
|
374
|
+
for feature in plan_bundle.features:
|
|
375
|
+
# Generate feature directory name from key (FEATURE-001 -> 001-feature-name)
|
|
376
|
+
feature_num = self._extract_feature_number(feature.key)
|
|
377
|
+
feature_name = self._to_feature_dir_name(feature.title)
|
|
378
|
+
|
|
379
|
+
# Create feature directory
|
|
380
|
+
feature_dir = self.repo_path / "specs" / f"{feature_num:03d}-{feature_name}"
|
|
381
|
+
feature_dir.mkdir(parents=True, exist_ok=True)
|
|
382
|
+
|
|
383
|
+
# Generate spec.md
|
|
384
|
+
spec_content = self._generate_spec_markdown(feature)
|
|
385
|
+
(feature_dir / "spec.md").write_text(spec_content, encoding="utf-8")
|
|
386
|
+
|
|
387
|
+
# Generate plan.md
|
|
388
|
+
plan_content = self._generate_plan_markdown(feature)
|
|
389
|
+
(feature_dir / "plan.md").write_text(plan_content, encoding="utf-8")
|
|
390
|
+
|
|
391
|
+
# Generate tasks.md
|
|
392
|
+
tasks_content = self._generate_tasks_markdown(feature)
|
|
393
|
+
(feature_dir / "tasks.md").write_text(tasks_content, encoding="utf-8")
|
|
394
|
+
|
|
395
|
+
features_converted += 1
|
|
396
|
+
|
|
397
|
+
return features_converted
|
|
398
|
+
|
|
399
|
+
@beartype
|
|
400
|
+
@require(lambda feature: isinstance(feature, Feature), "Must be Feature instance")
|
|
401
|
+
@ensure(lambda result: isinstance(result, str), "Must return string")
|
|
402
|
+
@ensure(lambda result: len(result) > 0, "Result must be non-empty")
|
|
403
|
+
def _generate_spec_markdown(self, feature: Feature) -> str:
|
|
404
|
+
"""Generate Spec-Kit spec.md content from SpecFact feature."""
|
|
405
|
+
from datetime import datetime
|
|
406
|
+
|
|
407
|
+
# Extract feature branch from feature key (FEATURE-001 -> 001-feature-name)
|
|
408
|
+
feature_num = self._extract_feature_number(feature.key)
|
|
409
|
+
feature_name = self._to_feature_dir_name(feature.title)
|
|
410
|
+
feature_branch = f"{feature_num:03d}-{feature_name}"
|
|
411
|
+
|
|
412
|
+
# Generate frontmatter (CRITICAL for Spec-Kit compatibility)
|
|
413
|
+
lines = [
|
|
414
|
+
"---",
|
|
415
|
+
f"**Feature Branch**: `{feature_branch}`",
|
|
416
|
+
f"**Created**: {datetime.now().strftime('%Y-%m-%d')}",
|
|
417
|
+
"**Status**: Draft",
|
|
418
|
+
"---",
|
|
419
|
+
"",
|
|
420
|
+
f"# Feature Specification: {feature.title}",
|
|
421
|
+
"",
|
|
422
|
+
]
|
|
423
|
+
|
|
424
|
+
# Add stories
|
|
425
|
+
if feature.stories:
|
|
426
|
+
lines.append("## User Scenarios & Testing")
|
|
427
|
+
lines.append("")
|
|
428
|
+
|
|
429
|
+
for idx, story in enumerate(feature.stories, start=1):
|
|
430
|
+
# Extract priority from tags or default to P3
|
|
431
|
+
priority = "P3"
|
|
432
|
+
if story.tags:
|
|
433
|
+
for tag in story.tags:
|
|
434
|
+
if tag.startswith("P") and tag[1:].isdigit():
|
|
435
|
+
priority = tag
|
|
436
|
+
break
|
|
437
|
+
|
|
438
|
+
lines.append(f"### User Story {idx} - {story.title} (Priority: {priority})")
|
|
439
|
+
lines.append(f"Users can {story.title}")
|
|
440
|
+
lines.append("")
|
|
441
|
+
lines.append("**Why this priority**: Core functionality")
|
|
442
|
+
lines.append("")
|
|
443
|
+
|
|
444
|
+
# INVSEST criteria (CRITICAL for /speckit.analyze and /speckit.checklist)
|
|
445
|
+
lines.append("**Independent**: YES")
|
|
446
|
+
lines.append("**Negotiable**: YES")
|
|
447
|
+
lines.append("**Valuable**: YES")
|
|
448
|
+
lines.append("**Estimable**: YES")
|
|
449
|
+
lines.append("**Small**: YES")
|
|
450
|
+
lines.append("**Testable**: YES")
|
|
451
|
+
lines.append("")
|
|
452
|
+
|
|
453
|
+
lines.append("**Acceptance Criteria:**")
|
|
454
|
+
lines.append("")
|
|
455
|
+
|
|
456
|
+
scenarios_primary: list[str] = []
|
|
457
|
+
scenarios_alternate: list[str] = []
|
|
458
|
+
scenarios_exception: list[str] = []
|
|
459
|
+
scenarios_recovery: list[str] = []
|
|
460
|
+
|
|
461
|
+
for acc_idx, acc in enumerate(story.acceptance, start=1):
|
|
462
|
+
# Parse Given/When/Then if available
|
|
463
|
+
if "Given" in acc and "When" in acc and "Then" in acc:
|
|
464
|
+
parts = acc.split(", ")
|
|
465
|
+
given = parts[0].replace("Given ", "").strip()
|
|
466
|
+
when = parts[1].replace("When ", "").strip()
|
|
467
|
+
then = parts[2].replace("Then ", "").strip()
|
|
468
|
+
lines.append(f"{acc_idx}. **Given** {given}, **When** {when}, **Then** {then}")
|
|
469
|
+
|
|
470
|
+
# Categorize scenarios based on keywords
|
|
471
|
+
scenario_text = f"{given}, {when}, {then}"
|
|
472
|
+
acc_lower = acc.lower()
|
|
473
|
+
if any(keyword in acc_lower for keyword in ["error", "exception", "fail", "invalid"]):
|
|
474
|
+
scenarios_exception.append(scenario_text)
|
|
475
|
+
elif any(keyword in acc_lower for keyword in ["recover", "retry", "fallback"]):
|
|
476
|
+
scenarios_recovery.append(scenario_text)
|
|
477
|
+
elif any(keyword in acc_lower for keyword in ["alternate", "alternative", "different"]):
|
|
478
|
+
scenarios_alternate.append(scenario_text)
|
|
479
|
+
else:
|
|
480
|
+
scenarios_primary.append(scenario_text)
|
|
481
|
+
else:
|
|
482
|
+
lines.append(f"{acc_idx}. {acc}")
|
|
483
|
+
scenarios_primary.append(acc)
|
|
484
|
+
|
|
485
|
+
lines.append("")
|
|
486
|
+
|
|
487
|
+
# Scenarios section (CRITICAL for /speckit.analyze and /speckit.checklist)
|
|
488
|
+
if scenarios_primary or scenarios_alternate or scenarios_exception or scenarios_recovery:
|
|
489
|
+
lines.append("**Scenarios:**")
|
|
490
|
+
lines.append("")
|
|
491
|
+
|
|
492
|
+
if scenarios_primary:
|
|
493
|
+
for scenario in scenarios_primary:
|
|
494
|
+
lines.append(f"- **Primary Scenario**: {scenario}")
|
|
495
|
+
else:
|
|
496
|
+
lines.append("- **Primary Scenario**: Standard user flow")
|
|
497
|
+
|
|
498
|
+
if scenarios_alternate:
|
|
499
|
+
for scenario in scenarios_alternate:
|
|
500
|
+
lines.append(f"- **Alternate Scenario**: {scenario}")
|
|
501
|
+
else:
|
|
502
|
+
lines.append("- **Alternate Scenario**: Alternative user flow")
|
|
503
|
+
|
|
504
|
+
if scenarios_exception:
|
|
505
|
+
for scenario in scenarios_exception:
|
|
506
|
+
lines.append(f"- **Exception Scenario**: {scenario}")
|
|
507
|
+
else:
|
|
508
|
+
lines.append("- **Exception Scenario**: Error handling")
|
|
509
|
+
|
|
510
|
+
if scenarios_recovery:
|
|
511
|
+
for scenario in scenarios_recovery:
|
|
512
|
+
lines.append(f"- **Recovery Scenario**: {scenario}")
|
|
513
|
+
else:
|
|
514
|
+
lines.append("- **Recovery Scenario**: Recovery from errors")
|
|
515
|
+
|
|
516
|
+
lines.append("")
|
|
517
|
+
lines.append("")
|
|
518
|
+
|
|
519
|
+
# Add functional requirements from outcomes
|
|
520
|
+
if feature.outcomes:
|
|
521
|
+
lines.append("## Functional Requirements")
|
|
522
|
+
lines.append("")
|
|
523
|
+
|
|
524
|
+
for idx, outcome in enumerate(feature.outcomes, start=1):
|
|
525
|
+
lines.append(f"**FR-{idx:03d}**: System MUST {outcome}")
|
|
526
|
+
lines.append("")
|
|
527
|
+
|
|
528
|
+
# Add success criteria from acceptance
|
|
529
|
+
if feature.acceptance:
|
|
530
|
+
lines.append("## Success Criteria")
|
|
531
|
+
lines.append("")
|
|
532
|
+
|
|
533
|
+
for idx, acc in enumerate(feature.acceptance, start=1):
|
|
534
|
+
lines.append(f"**SC-{idx:03d}**: {acc}")
|
|
535
|
+
lines.append("")
|
|
536
|
+
|
|
537
|
+
# Add edge cases from constraints
|
|
538
|
+
if feature.constraints:
|
|
539
|
+
lines.append("### Edge Cases")
|
|
540
|
+
lines.append("")
|
|
541
|
+
|
|
542
|
+
for constraint in feature.constraints:
|
|
543
|
+
lines.append(f"- {constraint}")
|
|
544
|
+
lines.append("")
|
|
545
|
+
|
|
546
|
+
return "\n".join(lines)
|
|
547
|
+
|
|
548
|
+
@beartype
|
|
549
|
+
@require(lambda feature: isinstance(feature, Feature), "Must be Feature instance")
|
|
550
|
+
@ensure(lambda result: isinstance(result, str), "Must return string")
|
|
551
|
+
def _generate_plan_markdown(self, feature: Feature) -> str:
|
|
552
|
+
"""Generate Spec-Kit plan.md content from SpecFact feature."""
|
|
553
|
+
lines = [f"# Implementation Plan: {feature.title}", ""]
|
|
554
|
+
lines.append("## Summary")
|
|
555
|
+
lines.append(f"Implementation plan for {feature.title}.")
|
|
556
|
+
lines.append("")
|
|
557
|
+
|
|
558
|
+
lines.append("## Technical Context")
|
|
559
|
+
lines.append("")
|
|
560
|
+
lines.append("**Language/Version**: Python 3.11+")
|
|
561
|
+
lines.append("")
|
|
562
|
+
|
|
563
|
+
lines.append("**Primary Dependencies:**")
|
|
564
|
+
lines.append("")
|
|
565
|
+
# Could extract from feature context if available
|
|
566
|
+
lines.append("- `typer` - CLI framework")
|
|
567
|
+
lines.append("- `pydantic` - Data validation")
|
|
568
|
+
lines.append("")
|
|
569
|
+
|
|
570
|
+
lines.append("**Technology Stack:**")
|
|
571
|
+
lines.append("")
|
|
572
|
+
lines.append("- Python 3.11+")
|
|
573
|
+
lines.append("- Typer for CLI")
|
|
574
|
+
lines.append("- Pydantic for data validation")
|
|
575
|
+
lines.append("")
|
|
576
|
+
|
|
577
|
+
lines.append("**Constraints:**")
|
|
578
|
+
lines.append("")
|
|
579
|
+
if feature.constraints:
|
|
580
|
+
for constraint in feature.constraints:
|
|
581
|
+
lines.append(f"- {constraint}")
|
|
582
|
+
else:
|
|
583
|
+
lines.append("- None specified")
|
|
584
|
+
lines.append("")
|
|
585
|
+
|
|
586
|
+
lines.append("**Unknowns:**")
|
|
587
|
+
lines.append("")
|
|
588
|
+
lines.append("- None at this time")
|
|
589
|
+
lines.append("")
|
|
590
|
+
|
|
591
|
+
# Constitution Check section (CRITICAL for /speckit.analyze)
|
|
592
|
+
lines.append("## Constitution Check")
|
|
593
|
+
lines.append("")
|
|
594
|
+
lines.append("**Article VII (Simplicity)**:")
|
|
595
|
+
lines.append("- [ ] Using ≤3 projects?")
|
|
596
|
+
lines.append("- [ ] No future-proofing?")
|
|
597
|
+
lines.append("")
|
|
598
|
+
lines.append("**Article VIII (Anti-Abstraction)**:")
|
|
599
|
+
lines.append("- [ ] Using framework directly?")
|
|
600
|
+
lines.append("- [ ] Single model representation?")
|
|
601
|
+
lines.append("")
|
|
602
|
+
lines.append("**Article IX (Integration-First)**:")
|
|
603
|
+
lines.append("- [ ] Contracts defined?")
|
|
604
|
+
lines.append("- [ ] Contract tests written?")
|
|
605
|
+
lines.append("")
|
|
606
|
+
lines.append("**Status**: PASS")
|
|
607
|
+
lines.append("")
|
|
608
|
+
|
|
609
|
+
# Phases section
|
|
610
|
+
lines.append("## Phase 0: Research")
|
|
611
|
+
lines.append("")
|
|
612
|
+
lines.append(f"Research and technical decisions for {feature.title}.")
|
|
613
|
+
lines.append("")
|
|
614
|
+
|
|
615
|
+
lines.append("## Phase 1: Design")
|
|
616
|
+
lines.append("")
|
|
617
|
+
lines.append(f"Design phase for {feature.title}.")
|
|
618
|
+
lines.append("")
|
|
619
|
+
|
|
620
|
+
lines.append("## Phase 2: Implementation")
|
|
621
|
+
lines.append("")
|
|
622
|
+
lines.append(f"Implementation phase for {feature.title}.")
|
|
623
|
+
lines.append("")
|
|
624
|
+
|
|
625
|
+
lines.append("## Phase -1: Pre-Implementation Gates")
|
|
626
|
+
lines.append("")
|
|
627
|
+
lines.append("Pre-implementation gate checks:")
|
|
628
|
+
lines.append("- [ ] Constitution check passed")
|
|
629
|
+
lines.append("- [ ] Contracts defined")
|
|
630
|
+
lines.append("- [ ] Technical context validated")
|
|
631
|
+
lines.append("")
|
|
632
|
+
|
|
633
|
+
return "\n".join(lines)
|
|
634
|
+
|
|
635
|
+
@beartype
|
|
636
|
+
@require(lambda feature: isinstance(feature, Feature), "Must be Feature instance")
|
|
637
|
+
@ensure(lambda result: isinstance(result, str), "Must return string")
|
|
638
|
+
def _generate_tasks_markdown(self, feature: Feature) -> str:
|
|
639
|
+
"""Generate Spec-Kit tasks.md content from SpecFact feature."""
|
|
640
|
+
lines = ["# Tasks", ""]
|
|
641
|
+
|
|
642
|
+
task_counter = 1
|
|
643
|
+
|
|
644
|
+
# Phase 1: Setup (initial tasks if any)
|
|
645
|
+
setup_tasks: list[tuple[int, str, int]] = [] # (task_num, description, story_num)
|
|
646
|
+
foundational_tasks: list[tuple[int, str, int]] = []
|
|
647
|
+
story_tasks: dict[int, list[tuple[int, str]]] = {} # story_num -> [(task_num, description)]
|
|
648
|
+
|
|
649
|
+
# Organize tasks by phase
|
|
650
|
+
for story_idx, story in enumerate(feature.stories, start=1):
|
|
651
|
+
story_num = self._extract_story_number(story.key)
|
|
652
|
+
|
|
653
|
+
if story.tasks:
|
|
654
|
+
for task_desc in story.tasks:
|
|
655
|
+
# Check if task is setup/foundational (common patterns)
|
|
656
|
+
task_lower = task_desc.lower()
|
|
657
|
+
if any(
|
|
658
|
+
keyword in task_lower
|
|
659
|
+
for keyword in ["setup", "install", "configure", "create project", "initialize"]
|
|
660
|
+
):
|
|
661
|
+
setup_tasks.append((task_counter, task_desc, story_num))
|
|
662
|
+
task_counter += 1
|
|
663
|
+
elif any(
|
|
664
|
+
keyword in task_lower
|
|
665
|
+
for keyword in ["implement", "create model", "set up database", "middleware"]
|
|
666
|
+
):
|
|
667
|
+
foundational_tasks.append((task_counter, task_desc, story_num))
|
|
668
|
+
task_counter += 1
|
|
669
|
+
else:
|
|
670
|
+
if story_num not in story_tasks:
|
|
671
|
+
story_tasks[story_num] = []
|
|
672
|
+
story_tasks[story_num].append((task_counter, task_desc))
|
|
673
|
+
task_counter += 1
|
|
674
|
+
else:
|
|
675
|
+
# Generate default task - put in foundational phase
|
|
676
|
+
foundational_tasks.append((task_counter, f"Implement {story.title}", story_num))
|
|
677
|
+
task_counter += 1
|
|
678
|
+
|
|
679
|
+
# Generate Phase 1: Setup
|
|
680
|
+
if setup_tasks:
|
|
681
|
+
lines.append("## Phase 1: Setup")
|
|
682
|
+
lines.append("")
|
|
683
|
+
for task_num, task_desc, story_num in setup_tasks:
|
|
684
|
+
lines.append(f"- [ ] [T{task_num:03d}] [P] [US{story_num}] {task_desc}")
|
|
685
|
+
lines.append("")
|
|
686
|
+
|
|
687
|
+
# Generate Phase 2: Foundational
|
|
688
|
+
if foundational_tasks:
|
|
689
|
+
lines.append("## Phase 2: Foundational")
|
|
690
|
+
lines.append("")
|
|
691
|
+
for task_num, task_desc, story_num in foundational_tasks:
|
|
692
|
+
lines.append(f"- [ ] [T{task_num:03d}] [P] [US{story_num}] {task_desc}")
|
|
693
|
+
lines.append("")
|
|
694
|
+
|
|
695
|
+
# Generate Phase 3+: User Stories (one phase per story)
|
|
696
|
+
for story_idx, story in enumerate(feature.stories, start=1):
|
|
697
|
+
story_num = self._extract_story_number(story.key)
|
|
698
|
+
phase_num = story_idx + 2 # Phase 3, 4, 5, etc.
|
|
699
|
+
|
|
700
|
+
# Get tasks for this story
|
|
701
|
+
story_task_list = story_tasks.get(story_num, [])
|
|
702
|
+
|
|
703
|
+
if story_task_list:
|
|
704
|
+
# Extract priority from tags
|
|
705
|
+
priority = "P3"
|
|
706
|
+
if story.tags:
|
|
707
|
+
for tag in story.tags:
|
|
708
|
+
if tag.startswith("P") and tag[1:].isdigit():
|
|
709
|
+
priority = tag
|
|
710
|
+
break
|
|
711
|
+
|
|
712
|
+
lines.append(f"## Phase {phase_num}: User Story {story_idx} (Priority: {priority})")
|
|
713
|
+
lines.append("")
|
|
714
|
+
for task_num, task_desc in story_task_list:
|
|
715
|
+
lines.append(f"- [ ] [T{task_num:03d}] [US{story_idx}] {task_desc}")
|
|
716
|
+
lines.append("")
|
|
717
|
+
|
|
718
|
+
# If no stories, create a default task in Phase 1
|
|
719
|
+
if not feature.stories:
|
|
720
|
+
lines.append("## Phase 1: Setup")
|
|
721
|
+
lines.append("")
|
|
722
|
+
lines.append(f"- [ ] [T001] Implement {feature.title}")
|
|
723
|
+
lines.append("")
|
|
724
|
+
|
|
725
|
+
return "\n".join(lines)
|
|
726
|
+
|
|
727
|
+
@beartype
|
|
728
|
+
@require(lambda feature_key: isinstance(feature_key, str), "Must be string")
|
|
729
|
+
@ensure(lambda result: isinstance(result, int), "Must return int")
|
|
730
|
+
def _extract_feature_number(self, feature_key: str) -> int:
|
|
731
|
+
"""Extract feature number from key (FEATURE-001 -> 1)."""
|
|
732
|
+
import re
|
|
733
|
+
|
|
734
|
+
match = re.search(r"(\d+)", feature_key)
|
|
735
|
+
return int(match.group(1)) if match else 0
|
|
736
|
+
|
|
737
|
+
@beartype
|
|
738
|
+
@require(lambda story_key: isinstance(story_key, str), "Must be string")
|
|
739
|
+
@ensure(lambda result: isinstance(result, int), "Must return int")
|
|
740
|
+
def _extract_story_number(self, story_key: str) -> int:
|
|
741
|
+
"""Extract story number from key (STORY-001 -> 1)."""
|
|
742
|
+
import re
|
|
743
|
+
|
|
744
|
+
match = re.search(r"(\d+)", story_key)
|
|
745
|
+
return int(match.group(1)) if match else 0
|
|
746
|
+
|
|
747
|
+
@beartype
|
|
748
|
+
@require(lambda title: isinstance(title, str), "Must be string")
|
|
749
|
+
@ensure(lambda result: isinstance(result, str), "Must return string")
|
|
750
|
+
@ensure(lambda result: len(result) > 0, "Result must be non-empty")
|
|
751
|
+
def _to_feature_dir_name(self, title: str) -> str:
|
|
752
|
+
"""Convert feature title to directory name (User Authentication -> user-authentication)."""
|
|
753
|
+
import re
|
|
754
|
+
|
|
755
|
+
# Convert to lowercase, replace spaces and special chars with hyphens
|
|
756
|
+
name = title.lower()
|
|
757
|
+
name = re.sub(r"[^a-z0-9]+", "-", name)
|
|
758
|
+
name = re.sub(r"-+", "-", name) # Collapse multiple hyphens
|
|
759
|
+
return name.strip("-")
|
|
760
|
+
|
|
761
|
+
@beartype
|
|
762
|
+
@require(lambda name: isinstance(name, str) and len(name) > 0, "Name must be non-empty string")
|
|
763
|
+
@ensure(lambda result: isinstance(result, str), "Must return string")
|
|
764
|
+
@ensure(lambda result: len(result) > 0, "Result must be non-empty")
|
|
765
|
+
def _humanize_name(self, name: str) -> str:
|
|
766
|
+
"""Convert component name to human-readable title."""
|
|
767
|
+
import re
|
|
768
|
+
|
|
769
|
+
# Handle PascalCase
|
|
770
|
+
name = re.sub(r"([A-Z])", r" \1", name).strip()
|
|
771
|
+
# Handle snake_case
|
|
772
|
+
name = name.replace("_", " ").replace("-", " ")
|
|
773
|
+
return name.title()
|