specfact-cli 0.4.2__py3-none-any.whl → 0.6.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. specfact_cli/__init__.py +1 -1
  2. specfact_cli/agents/analyze_agent.py +2 -3
  3. specfact_cli/analyzers/__init__.py +2 -1
  4. specfact_cli/analyzers/ambiguity_scanner.py +601 -0
  5. specfact_cli/analyzers/code_analyzer.py +462 -30
  6. specfact_cli/analyzers/constitution_evidence_extractor.py +491 -0
  7. specfact_cli/analyzers/contract_extractor.py +419 -0
  8. specfact_cli/analyzers/control_flow_analyzer.py +281 -0
  9. specfact_cli/analyzers/requirement_extractor.py +337 -0
  10. specfact_cli/analyzers/test_pattern_extractor.py +330 -0
  11. specfact_cli/cli.py +151 -206
  12. specfact_cli/commands/constitution.py +281 -0
  13. specfact_cli/commands/enforce.py +42 -34
  14. specfact_cli/commands/import_cmd.py +481 -152
  15. specfact_cli/commands/init.py +224 -55
  16. specfact_cli/commands/plan.py +2133 -547
  17. specfact_cli/commands/repro.py +100 -78
  18. specfact_cli/commands/sync.py +701 -186
  19. specfact_cli/enrichers/constitution_enricher.py +765 -0
  20. specfact_cli/enrichers/plan_enricher.py +294 -0
  21. specfact_cli/importers/speckit_converter.py +364 -48
  22. specfact_cli/importers/speckit_scanner.py +65 -0
  23. specfact_cli/models/plan.py +42 -0
  24. specfact_cli/resources/mappings/node-async.yaml +49 -0
  25. specfact_cli/resources/mappings/python-async.yaml +47 -0
  26. specfact_cli/resources/mappings/speckit-default.yaml +82 -0
  27. specfact_cli/resources/prompts/specfact-enforce.md +185 -0
  28. specfact_cli/resources/prompts/specfact-import-from-code.md +626 -0
  29. specfact_cli/resources/prompts/specfact-plan-add-feature.md +188 -0
  30. specfact_cli/resources/prompts/specfact-plan-add-story.md +212 -0
  31. specfact_cli/resources/prompts/specfact-plan-compare.md +571 -0
  32. specfact_cli/resources/prompts/specfact-plan-init.md +531 -0
  33. specfact_cli/resources/prompts/specfact-plan-promote.md +352 -0
  34. specfact_cli/resources/prompts/specfact-plan-review.md +1276 -0
  35. specfact_cli/resources/prompts/specfact-plan-select.md +401 -0
  36. specfact_cli/resources/prompts/specfact-plan-update-feature.md +242 -0
  37. specfact_cli/resources/prompts/specfact-plan-update-idea.md +211 -0
  38. specfact_cli/resources/prompts/specfact-repro.md +268 -0
  39. specfact_cli/resources/prompts/specfact-sync.md +497 -0
  40. specfact_cli/resources/schemas/deviation.schema.json +61 -0
  41. specfact_cli/resources/schemas/plan.schema.json +204 -0
  42. specfact_cli/resources/schemas/protocol.schema.json +53 -0
  43. specfact_cli/resources/templates/github-action.yml.j2 +140 -0
  44. specfact_cli/resources/templates/plan.bundle.yaml.j2 +141 -0
  45. specfact_cli/resources/templates/pr-template.md.j2 +58 -0
  46. specfact_cli/resources/templates/protocol.yaml.j2 +24 -0
  47. specfact_cli/resources/templates/telemetry.yaml.example +35 -0
  48. specfact_cli/sync/__init__.py +10 -1
  49. specfact_cli/sync/watcher.py +268 -0
  50. specfact_cli/telemetry.py +440 -0
  51. specfact_cli/utils/acceptance_criteria.py +127 -0
  52. specfact_cli/utils/enrichment_parser.py +445 -0
  53. specfact_cli/utils/feature_keys.py +12 -3
  54. specfact_cli/utils/ide_setup.py +170 -0
  55. specfact_cli/utils/structure.py +179 -2
  56. specfact_cli/utils/yaml_utils.py +33 -0
  57. specfact_cli/validators/repro_checker.py +22 -1
  58. specfact_cli/validators/schema.py +15 -4
  59. specfact_cli-0.6.8.dist-info/METADATA +456 -0
  60. specfact_cli-0.6.8.dist-info/RECORD +99 -0
  61. {specfact_cli-0.4.2.dist-info → specfact_cli-0.6.8.dist-info}/entry_points.txt +1 -0
  62. specfact_cli-0.6.8.dist-info/licenses/LICENSE.md +202 -0
  63. specfact_cli-0.4.2.dist-info/METADATA +0 -370
  64. specfact_cli-0.4.2.dist-info/RECORD +0 -62
  65. specfact_cli-0.4.2.dist-info/licenses/LICENSE.md +0 -61
  66. {specfact_cli-0.4.2.dist-info → specfact_cli-0.6.8.dist-info}/WHEEL +0 -0
@@ -0,0 +1,127 @@
1
+ """
2
+ Utility functions for validating and analyzing acceptance criteria.
3
+
4
+ This module provides shared logic for detecting code-specific acceptance criteria
5
+ to prevent false positives in ambiguity scanning and plan enrichment.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import re
11
+
12
+ from beartype import beartype
13
+ from icontract import ensure, require
14
+
15
+
16
+ @beartype
17
+ @require(lambda acceptance: isinstance(acceptance, str), "Acceptance must be string")
18
+ @ensure(lambda result: isinstance(result, bool), "Must return bool")
19
+ def is_code_specific_criteria(acceptance: str) -> bool:
20
+ """
21
+ Check if acceptance criteria are already code-specific (should not be replaced).
22
+
23
+ Code-specific criteria contain:
24
+ - Method signatures: method(), method(param: type)
25
+ - Class names: ClassName, ClassName.method()
26
+ - File paths: src/, path/to/file.py
27
+ - Type hints: : Path, : str, -> bool
28
+ - Specific return values: returns dict with 'key'
29
+ - Specific assertions: ==, in, >=, <=
30
+
31
+ Args:
32
+ acceptance: Acceptance criteria text to check
33
+
34
+ Returns:
35
+ True if criteria are code-specific, False if vague/generic
36
+ """
37
+ acceptance_lower = acceptance.lower()
38
+
39
+ # FIRST: Check for generic placeholders that indicate non-code-specific
40
+ # If found, return False immediately (don't enrich)
41
+ generic_placeholders = [
42
+ "interact with the system",
43
+ "perform the action",
44
+ "access the system",
45
+ "works correctly",
46
+ "works as expected",
47
+ "is functional and verified",
48
+ ]
49
+
50
+ if any(placeholder in acceptance_lower for placeholder in generic_placeholders):
51
+ return False
52
+
53
+ # SECOND: Check for vague patterns that should be enriched
54
+ # Use word boundaries to avoid false positives (e.g., "works" in "workspace")
55
+ vague_patterns = [
56
+ r"\bis\s+implemented\b",
57
+ r"\bis\s+functional\b",
58
+ r"\bworks\b", # Word boundary prevents matching "workspace", "framework", etc.
59
+ r"\bis\s+done\b",
60
+ r"\bis\s+complete\b",
61
+ r"\bis\s+ready\b",
62
+ ]
63
+ if any(re.search(pattern, acceptance_lower) for pattern in vague_patterns):
64
+ return False # Not code-specific, should be enriched
65
+
66
+ # THIRD: Check for code-specific indicators
67
+ code_specific_patterns = [
68
+ # Method signatures with parentheses
69
+ r"\([^)]*\)", # method() or method(param)
70
+ r":\s*(path|str|int|bool|dict|list|tuple|set|float|bytes|any|none)", # Type hints
71
+ r"->\s*(path|str|int|bool|dict|list|tuple|set|float|bytes|any|none)", # Return type hints
72
+ # File paths
73
+ r"src/",
74
+ r"tests/",
75
+ r"\.py",
76
+ r"\.yaml",
77
+ r"\.json",
78
+ # Class names (PascalCase with method/dot, or in specific contexts)
79
+ r"[A-Z][a-zA-Z0-9]*\.",
80
+ r"[A-Z][a-zA-Z0-9]*\(",
81
+ r"returns\s+[A-Z][a-zA-Z0-9]{3,}\b", # Returns ClassName (4+ chars)
82
+ r"instance\s+of\s+[A-Z][a-zA-Z0-9]{3,}\b", # instance of ClassName
83
+ r"\b[A-Z][a-zA-Z0-9]{4,}\b", # Standalone class names (5+ chars, PascalCase) - avoids common words
84
+ # Specific assertions
85
+ r"==\s*['\"]",
86
+ r"in\s*\(",
87
+ r">=\s*\d",
88
+ r"<=\s*\d",
89
+ r"returns\s+(dict|list|tuple|set|str|int|bool|float)\s+with",
90
+ r"returns\s+[A-Z][a-zA-Z0-9]*", # Returns a class instance
91
+ # NetworkX, Path.resolve(), etc.
92
+ r"nx\.",
93
+ r"Path\.",
94
+ r"resolve\(\)",
95
+ # Version strings, specific values
96
+ r"version\s*=\s*['\"]",
97
+ r"version\s*==\s*['\"]",
98
+ ]
99
+
100
+ for pattern in code_specific_patterns:
101
+ if re.search(pattern, acceptance, re.IGNORECASE):
102
+ # Verify match is not a common word
103
+ matches = re.findall(pattern, acceptance, re.IGNORECASE)
104
+ common_words = [
105
+ "given",
106
+ "when",
107
+ "then",
108
+ "user",
109
+ "system",
110
+ "developer",
111
+ "they",
112
+ "the",
113
+ "with",
114
+ "from",
115
+ "that",
116
+ ]
117
+ # Filter out common words from matches
118
+ if isinstance(matches, list):
119
+ actual_matches = [m for m in matches if isinstance(m, str) and m.lower() not in common_words]
120
+ else:
121
+ actual_matches = [matches] if isinstance(matches, str) and matches.lower() not in common_words else []
122
+
123
+ if actual_matches:
124
+ return True
125
+
126
+ # If no code-specific patterns found, it's not code-specific
127
+ return False
@@ -0,0 +1,445 @@
1
+ """
2
+ Enrichment parser for LLM-generated enrichment reports.
3
+
4
+ This module parses Markdown enrichment reports generated by LLMs during
5
+ the dual-stack enrichment workflow and applies them to plan bundles.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import re
11
+ from contextlib import suppress
12
+ from pathlib import Path
13
+ from typing import Any
14
+
15
+ from beartype import beartype
16
+ from icontract import ensure, require
17
+
18
+ from specfact_cli.models.plan import Feature, PlanBundle, Story
19
+
20
+
21
+ class EnrichmentReport:
22
+ """Parsed enrichment report from LLM."""
23
+
24
+ def __init__(self) -> None:
25
+ """Initialize empty enrichment report."""
26
+ self.missing_features: list[dict[str, Any]] = []
27
+ self.confidence_adjustments: dict[str, float] = {}
28
+ self.business_context: dict[str, list[str]] = {
29
+ "priorities": [],
30
+ "constraints": [],
31
+ "unknowns": [],
32
+ }
33
+
34
+ @beartype
35
+ @require(lambda feature: isinstance(feature, dict), "Feature must be dictionary")
36
+ def add_missing_feature(self, feature: dict[str, Any]) -> None:
37
+ """Add a missing feature discovered by LLM."""
38
+ self.missing_features.append(feature)
39
+
40
+ @beartype
41
+ @require(lambda feature_key: isinstance(feature_key, str), "Feature key must be string")
42
+ @require(lambda confidence: 0.0 <= confidence <= 1.0, "Confidence must be 0.0-1.0")
43
+ def adjust_confidence(self, feature_key: str, confidence: float) -> None:
44
+ """Adjust confidence score for a feature."""
45
+ self.confidence_adjustments[feature_key] = confidence
46
+
47
+ @beartype
48
+ @require(lambda category: isinstance(category, str), "Category must be string")
49
+ @require(lambda items: isinstance(items, list), "Items must be list")
50
+ def add_business_context(self, category: str, items: list[str]) -> None:
51
+ """Add business context items."""
52
+ if category in self.business_context:
53
+ self.business_context[category].extend(items)
54
+
55
+
56
+ class EnrichmentParser:
57
+ """Parser for Markdown enrichment reports."""
58
+
59
+ @beartype
60
+ @require(
61
+ lambda report_path: isinstance(report_path, (Path, str)) and bool(str(report_path).strip()),
62
+ "Report path must be non-empty Path or str",
63
+ )
64
+ @ensure(lambda result: isinstance(result, EnrichmentReport), "Must return EnrichmentReport")
65
+ def parse(self, report_path: Path | str) -> EnrichmentReport:
66
+ """
67
+ Parse Markdown enrichment report.
68
+
69
+ Args:
70
+ report_path: Path to Markdown enrichment report (must be non-empty)
71
+
72
+ Returns:
73
+ Parsed EnrichmentReport
74
+
75
+ Raises:
76
+ FileNotFoundError: If report file doesn't exist
77
+ ValueError: If report_path is empty or invalid
78
+ """
79
+ report_path = Path(report_path)
80
+ if not str(report_path).strip():
81
+ raise ValueError("Report path cannot be empty")
82
+ if not report_path.exists():
83
+ raise FileNotFoundError(f"Enrichment report not found: {report_path}")
84
+ if report_path.is_dir():
85
+ raise ValueError(f"Report path must be a file, not a directory: {report_path}")
86
+
87
+ content = report_path.read_text(encoding="utf-8")
88
+ report = EnrichmentReport()
89
+
90
+ # Parse missing features section
91
+ self._parse_missing_features(content, report)
92
+
93
+ # Parse confidence adjustments section
94
+ self._parse_confidence_adjustments(content, report)
95
+
96
+ # Parse business context section
97
+ self._parse_business_context(content, report)
98
+
99
+ return report
100
+
101
+ @beartype
102
+ @require(lambda content: isinstance(content, str), "Content must be string")
103
+ @require(lambda report: isinstance(report, EnrichmentReport), "Report must be EnrichmentReport")
104
+ def _parse_missing_features(self, content: str, report: EnrichmentReport) -> None:
105
+ """Parse missing features section from enrichment report."""
106
+ # Look for "Missing Features" or "Missing features" section
107
+ pattern = r"##\s*(?:Missing\s+)?Features?\s*(?:\(.*?\))?\s*\n(.*?)(?=##|\Z)"
108
+ match = re.search(pattern, content, re.IGNORECASE | re.DOTALL)
109
+ if not match:
110
+ return
111
+
112
+ section = match.group(1)
113
+
114
+ # Extract individual features (numbered or bulleted)
115
+ feature_pattern = r"(?:^|\n)(?:\d+\.|\*|\-)\s*(.+?)(?=\n(?:^\d+\.|\*|\-|\Z))"
116
+ features = re.findall(feature_pattern, section, re.MULTILINE | re.DOTALL)
117
+
118
+ for feature_text in features:
119
+ feature = self._parse_feature_block(feature_text)
120
+ if feature:
121
+ report.add_missing_feature(feature)
122
+
123
+ @beartype
124
+ @require(lambda feature_text: isinstance(feature_text, str), "Feature text must be string")
125
+ @ensure(lambda result: result is None or isinstance(result, dict), "Must return None or dict")
126
+ def _parse_feature_block(self, feature_text: str) -> dict[str, Any] | None:
127
+ """Parse a single feature block from enrichment report."""
128
+ feature: dict[str, Any] = {
129
+ "key": "",
130
+ "title": "",
131
+ "confidence": 0.5,
132
+ "outcomes": [],
133
+ "stories": [],
134
+ }
135
+
136
+ # Extract key (e.g., "FEATURE-IDEINTEGRATION" or "Suggested key: FEATURE-IDEINTEGRATION")
137
+ key_match = re.search(r"(?:key|Key):\s*([A-Z0-9_-]+)", feature_text, re.IGNORECASE)
138
+ if key_match:
139
+ feature["key"] = key_match.group(1)
140
+ else:
141
+ # Try to extract from title
142
+ title_match = re.search(r"^\*\*([^*]+)\*\*", feature_text, re.MULTILINE)
143
+ if title_match:
144
+ # Generate key from title
145
+ title = title_match.group(1).strip()
146
+ feature["title"] = title
147
+ feature["key"] = f"FEATURE-{title.upper().replace(' ', '').replace('-', '')[:20]}"
148
+
149
+ # Extract title
150
+ if not feature["title"]:
151
+ title_match = re.search(r"(?:title|Title):\s*(.+?)(?:\n|$)", feature_text, re.IGNORECASE)
152
+ if title_match:
153
+ feature["title"] = title_match.group(1).strip()
154
+
155
+ # Extract confidence
156
+ confidence_match = re.search(r"(?:confidence|Confidence):\s*([0-9.]+)", feature_text, re.IGNORECASE)
157
+ if confidence_match:
158
+ with suppress(ValueError):
159
+ feature["confidence"] = float(confidence_match.group(1))
160
+
161
+ # Extract outcomes
162
+ outcomes_match = re.search(
163
+ r"(?:outcomes?|Outcomes?):\s*(.+?)(?:\n(?:stories?|Stories?)|$)", feature_text, re.IGNORECASE | re.DOTALL
164
+ )
165
+ if outcomes_match:
166
+ outcomes_text = outcomes_match.group(1)
167
+ # Split by lines or bullets
168
+ outcomes = [o.strip() for o in re.split(r"\n|,", outcomes_text) if o.strip()]
169
+ feature["outcomes"] = outcomes
170
+
171
+ # Extract business value or reason
172
+ reason_match = re.search(
173
+ r"(?:reason|Reason|Business value):\s*(.+?)(?:\n(?:stories?|Stories?)|$)",
174
+ feature_text,
175
+ re.IGNORECASE | re.DOTALL,
176
+ )
177
+ if reason_match:
178
+ reason = reason_match.group(1).strip()
179
+ if reason and reason not in feature["outcomes"]:
180
+ feature["outcomes"].append(reason)
181
+
182
+ # Extract stories (REQUIRED for features to pass promotion validation)
183
+ stories_match = re.search(
184
+ r"(?:stories?|Stories?):\s*(.+?)(?:\n(?:##|$))", feature_text, re.IGNORECASE | re.DOTALL
185
+ )
186
+ if stories_match:
187
+ stories_text = stories_match.group(1)
188
+ stories = self._parse_stories_from_text(stories_text, feature.get("key", ""))
189
+ feature["stories"] = stories
190
+
191
+ # Only return if we have at least a key or title
192
+ if feature["key"] or feature["title"]:
193
+ return feature
194
+
195
+ return None
196
+
197
+ @beartype
198
+ @require(lambda stories_text: isinstance(stories_text, str), "Stories text must be string")
199
+ @require(lambda feature_key: isinstance(feature_key, str), "Feature key must be string")
200
+ @ensure(lambda result: isinstance(result, list), "Must return list of story dicts")
201
+ def _parse_stories_from_text(self, stories_text: str, feature_key: str) -> list[dict[str, Any]]:
202
+ """Parse stories from enrichment report text."""
203
+ stories: list[dict[str, Any]] = []
204
+
205
+ # Extract individual stories (numbered, bulleted, or sub-headers)
206
+ # Pattern matches: "1. Story title", "- Story title", "### Story title", etc.
207
+ story_pattern = r"(?:^|\n)(?:(?:\d+\.|\*|\-|\#\#\#)\s*)?(.+?)(?=\n(?:^\d+\.|\*|\-|\#\#\#|\Z))"
208
+ story_matches = re.findall(story_pattern, stories_text, re.MULTILINE | re.DOTALL)
209
+
210
+ for idx, story_text in enumerate(story_matches, start=1):
211
+ story = self._parse_story_block(story_text, feature_key, idx)
212
+ if story:
213
+ stories.append(story)
214
+
215
+ return stories
216
+
217
+ @beartype
218
+ @require(lambda story_text: isinstance(story_text, str), "Story text must be string")
219
+ @require(lambda feature_key: isinstance(feature_key, str), "Feature key must be string")
220
+ @require(
221
+ lambda story_number: isinstance(story_number, int) and story_number > 0, "Story number must be positive int"
222
+ )
223
+ @ensure(lambda result: result is None or isinstance(result, dict), "Must return None or story dict")
224
+ def _parse_story_block(self, story_text: str, feature_key: str, story_number: int) -> dict[str, Any] | None:
225
+ """Parse a single story block from enrichment report."""
226
+ story: dict[str, Any] = {
227
+ "key": "",
228
+ "title": "",
229
+ "acceptance": [],
230
+ "story_points": None,
231
+ "value_points": None,
232
+ "tasks": [],
233
+ "confidence": 0.8,
234
+ }
235
+
236
+ # Generate story key from feature key and number
237
+ if feature_key:
238
+ # Extract base from feature key (e.g., "FEATURE-DUALSTACK" -> "DUALSTACK")
239
+ base = feature_key.replace("FEATURE-", "").upper()
240
+ story["key"] = f"STORY-{base}-{story_number:03d}"
241
+ else:
242
+ story["key"] = f"STORY-{story_number:03d}"
243
+
244
+ # Extract title (first line or after "Title:")
245
+ title_match = re.search(r"(?:title|Title):\s*(.+?)(?:\n|$)", story_text, re.IGNORECASE)
246
+ if title_match:
247
+ story["title"] = title_match.group(1).strip()
248
+ else:
249
+ # Use first line as title
250
+ first_line = story_text.split("\n")[0].strip()
251
+ if first_line and not first_line.startswith("#"):
252
+ story["title"] = first_line
253
+
254
+ # Extract acceptance criteria
255
+ acceptance_match = re.search(
256
+ r"(?:acceptance|Acceptance|criteria|Criteria):\s*(.+?)(?:\n(?:tasks?|Tasks?|points?|Points?)|$)",
257
+ story_text,
258
+ re.IGNORECASE | re.DOTALL,
259
+ )
260
+ if acceptance_match:
261
+ acceptance_text = acceptance_match.group(1)
262
+ acceptance = [a.strip() for a in re.split(r"\n|,", acceptance_text) if a.strip()]
263
+ story["acceptance"] = acceptance
264
+ else:
265
+ # Default acceptance if none found
266
+ story["acceptance"] = [f"{story.get('title', 'Story')} works as expected"]
267
+
268
+ # Extract tasks
269
+ tasks_match = re.search(
270
+ r"(?:tasks?|Tasks?):\s*(.+?)(?:\n(?:points?|Points?|$))", story_text, re.IGNORECASE | re.DOTALL
271
+ )
272
+ if tasks_match:
273
+ tasks_text = tasks_match.group(1)
274
+ tasks = [t.strip() for t in re.split(r"\n|,", tasks_text) if t.strip()]
275
+ story["tasks"] = tasks
276
+
277
+ # Extract story points
278
+ story_points_match = re.search(r"(?:story\s+points?|Story\s+Points?):\s*(\d+)", story_text, re.IGNORECASE)
279
+ if story_points_match:
280
+ with suppress(ValueError):
281
+ story["story_points"] = int(story_points_match.group(1))
282
+
283
+ # Extract value points
284
+ value_points_match = re.search(r"(?:value\s+points?|Value\s+Points?):\s*(\d+)", story_text, re.IGNORECASE)
285
+ if value_points_match:
286
+ with suppress(ValueError):
287
+ story["value_points"] = int(value_points_match.group(1))
288
+
289
+ # Only return if we have at least a title
290
+ if story["title"]:
291
+ return story
292
+
293
+ return None
294
+
295
+ @beartype
296
+ @require(lambda content: isinstance(content, str), "Content must be string")
297
+ @require(lambda report: isinstance(report, EnrichmentReport), "Report must be EnrichmentReport")
298
+ def _parse_confidence_adjustments(self, content: str, report: EnrichmentReport) -> None:
299
+ """Parse confidence adjustments section from enrichment report."""
300
+ # Look for "Confidence Adjustments" or "Confidence adjustments" section
301
+ pattern = r"##\s*Confidence\s+Adjustments?\s*\n(.*?)(?=##|\Z)"
302
+ match = re.search(pattern, content, re.IGNORECASE | re.DOTALL)
303
+ if not match:
304
+ return
305
+
306
+ section = match.group(1)
307
+
308
+ # Extract adjustments (format: "FEATURE-KEY → 0.95" or "FEATURE-KEY: 0.95")
309
+ adjustment_pattern = r"([A-Z0-9_-]+)\s*(?:→|:)\s*([0-9.]+)"
310
+ adjustments = re.findall(adjustment_pattern, section, re.IGNORECASE)
311
+
312
+ for feature_key, confidence_str in adjustments:
313
+ try:
314
+ confidence = float(confidence_str)
315
+ if 0.0 <= confidence <= 1.0:
316
+ report.adjust_confidence(feature_key.upper(), confidence)
317
+ except ValueError:
318
+ pass
319
+
320
+ @beartype
321
+ @require(lambda content: isinstance(content, str), "Content must be string")
322
+ @require(lambda report: isinstance(report, EnrichmentReport), "Report must be EnrichmentReport")
323
+ def _parse_business_context(self, content: str, report: EnrichmentReport) -> None:
324
+ """Parse business context section from enrichment report."""
325
+ # Look for "Business Context" section
326
+ pattern = r"##\s*Business\s+Context\s*\n(.*?)(?=##|\Z)"
327
+ match = re.search(pattern, content, re.IGNORECASE | re.DOTALL)
328
+ if not match:
329
+ return
330
+
331
+ section = match.group(1)
332
+
333
+ # Extract priorities
334
+ priorities_match = re.search(
335
+ r"(?:Priorities?|Priority):\s*(.+?)(?:\n(?:Constraints?|Unknowns?)|$)", section, re.IGNORECASE | re.DOTALL
336
+ )
337
+ if priorities_match:
338
+ priorities_text = priorities_match.group(1)
339
+ priorities = [
340
+ p.strip() for p in re.split(r"\n|,", priorities_text) if p.strip() and not p.strip().startswith("-")
341
+ ]
342
+ report.add_business_context("priorities", priorities)
343
+
344
+ # Extract constraints
345
+ constraints_match = re.search(
346
+ r"(?:Constraints?|Constraint):\s*(.+?)(?:\n(?:Unknowns?|Priorities?)|$)", section, re.IGNORECASE | re.DOTALL
347
+ )
348
+ if constraints_match:
349
+ constraints_text = constraints_match.group(1)
350
+ constraints = [
351
+ c.strip() for c in re.split(r"\n|,", constraints_text) if c.strip() and not c.strip().startswith("-")
352
+ ]
353
+ report.add_business_context("constraints", constraints)
354
+
355
+ # Extract unknowns
356
+ unknowns_match = re.search(
357
+ r"(?:Unknowns?|Unknown):\s*(.+?)(?:\n(?:Priorities?|Constraints?)|$)", section, re.IGNORECASE | re.DOTALL
358
+ )
359
+ if unknowns_match:
360
+ unknowns_text = unknowns_match.group(1)
361
+ unknowns = [
362
+ u.strip() for u in re.split(r"\n|,", unknowns_text) if u.strip() and not u.strip().startswith("-")
363
+ ]
364
+ report.add_business_context("unknowns", unknowns)
365
+
366
+
367
+ @beartype
368
+ @require(lambda plan_bundle: isinstance(plan_bundle, PlanBundle), "Plan bundle must be PlanBundle")
369
+ @require(lambda enrichment: isinstance(enrichment, EnrichmentReport), "Enrichment must be EnrichmentReport")
370
+ @ensure(lambda result: isinstance(result, PlanBundle), "Must return PlanBundle")
371
+ def apply_enrichment(plan_bundle: PlanBundle, enrichment: EnrichmentReport) -> PlanBundle:
372
+ """
373
+ Apply enrichment report to plan bundle.
374
+
375
+ Args:
376
+ plan_bundle: Original plan bundle from CLI
377
+ enrichment: Parsed enrichment report
378
+
379
+ Returns:
380
+ Enriched plan bundle
381
+ """
382
+ # Create a copy to avoid mutating the original
383
+ enriched = plan_bundle.model_copy(deep=True)
384
+
385
+ # Apply confidence adjustments
386
+ feature_keys = {f.key: i for i, f in enumerate(enriched.features)}
387
+ for feature_key, new_confidence in enrichment.confidence_adjustments.items():
388
+ if feature_key in feature_keys:
389
+ enriched.features[feature_keys[feature_key]].confidence = new_confidence
390
+
391
+ # Add missing features
392
+ for missing_feature_data in enrichment.missing_features:
393
+ # Check if feature already exists
394
+ feature_key = missing_feature_data.get("key", "")
395
+ if feature_key and feature_key in feature_keys:
396
+ # Update existing feature instead of adding duplicate
397
+ existing_idx = feature_keys[feature_key]
398
+ existing_feature = enriched.features[existing_idx]
399
+ # Update confidence if provided
400
+ if "confidence" in missing_feature_data:
401
+ existing_feature.confidence = missing_feature_data["confidence"]
402
+ # Merge outcomes
403
+ if "outcomes" in missing_feature_data:
404
+ for outcome in missing_feature_data["outcomes"]:
405
+ if outcome not in existing_feature.outcomes:
406
+ existing_feature.outcomes.append(outcome)
407
+ else:
408
+ # Create new feature with stories (if provided)
409
+ stories_data = missing_feature_data.get("stories", [])
410
+ stories: list[Story] = []
411
+ for story_data in stories_data:
412
+ if isinstance(story_data, dict):
413
+ story = Story(
414
+ key=story_data.get("key", f"STORY-{len(stories) + 1:03d}"),
415
+ title=story_data.get("title", "Untitled Story"),
416
+ acceptance=story_data.get("acceptance", []),
417
+ story_points=story_data.get("story_points"),
418
+ value_points=story_data.get("value_points"),
419
+ tasks=story_data.get("tasks", []),
420
+ confidence=story_data.get("confidence", 0.8),
421
+ draft=False,
422
+ scenarios=None,
423
+ contracts=None,
424
+ )
425
+ stories.append(story)
426
+
427
+ feature = Feature(
428
+ key=missing_feature_data.get("key", f"FEATURE-{len(enriched.features) + 1:03d}"),
429
+ title=missing_feature_data.get("title", "Untitled Feature"),
430
+ outcomes=missing_feature_data.get("outcomes", []),
431
+ acceptance=[],
432
+ constraints=[],
433
+ stories=stories, # Include parsed stories
434
+ confidence=missing_feature_data.get("confidence", 0.5),
435
+ draft=False,
436
+ )
437
+ enriched.features.append(feature)
438
+
439
+ # Apply business context to idea if present
440
+ if enriched.idea and enrichment.business_context and enrichment.business_context.get("constraints"):
441
+ if enriched.idea.constraints is None:
442
+ enriched.idea.constraints = []
443
+ enriched.idea.constraints.extend(enrichment.business_context["constraints"])
444
+
445
+ return enriched
@@ -21,12 +21,14 @@ def normalize_feature_key(key: str) -> str:
21
21
  - `FEATURE-CONTRACTFIRSTTESTMANAGER` -> `CONTRACTFIRSTTESTMANAGER`
22
22
  - `FEATURE-001` -> `001`
23
23
  - `CONTRACT_FIRST_TEST_MANAGER` -> `CONTRACTFIRSTTESTMANAGER`
24
+ - `041-ide-integration-system` -> `IDEINTEGRATIONSYSTEM`
25
+ - `047-ide-integration-system` -> `IDEINTEGRATIONSYSTEM` (same as above)
24
26
 
25
27
  Args:
26
28
  key: Feature key in any format
27
29
 
28
30
  Returns:
29
- Normalized key (uppercase, no prefixes, no underscores)
31
+ Normalized key (uppercase, no prefixes, no underscores, no hyphens)
30
32
 
31
33
  Examples:
32
34
  >>> normalize_feature_key("000_CONTRACT_FIRST_TEST_MANAGER")
@@ -35,9 +37,16 @@ def normalize_feature_key(key: str) -> str:
35
37
  'CONTRACTFIRSTTESTMANAGER'
36
38
  >>> normalize_feature_key("FEATURE-001")
37
39
  '001'
40
+ >>> normalize_feature_key("041-ide-integration-system")
41
+ 'IDEINTEGRATIONSYSTEM'
38
42
  """
39
- # Remove common prefixes
40
- key = key.replace("FEATURE-", "").replace("000_", "").replace("001_", "")
43
+ # Remove common prefixes (FEATURE-, and numbered prefixes like 000_, 001_, 002_, etc.)
44
+ key = key.replace("FEATURE-", "")
45
+ # Remove numbered prefixes with underscores (000_, 001_, 002_, ..., 999_)
46
+ key = re.sub(r"^\d{3}_", "", key)
47
+ # Remove numbered prefixes with hyphens (000-, 001-, 002-, ..., 999-)
48
+ # This handles Spec-Kit directory format like "041-ide-integration-system"
49
+ key = re.sub(r"^\d{3}-", "", key)
41
50
 
42
51
  # Remove underscores and spaces, convert to uppercase
43
52
  return re.sub(r"[_\s-]", "", key).upper()