specfact-cli 0.4.2__py3-none-any.whl → 0.6.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- specfact_cli/__init__.py +1 -1
- specfact_cli/agents/analyze_agent.py +2 -3
- specfact_cli/analyzers/__init__.py +2 -1
- specfact_cli/analyzers/ambiguity_scanner.py +601 -0
- specfact_cli/analyzers/code_analyzer.py +462 -30
- specfact_cli/analyzers/constitution_evidence_extractor.py +491 -0
- specfact_cli/analyzers/contract_extractor.py +419 -0
- specfact_cli/analyzers/control_flow_analyzer.py +281 -0
- specfact_cli/analyzers/requirement_extractor.py +337 -0
- specfact_cli/analyzers/test_pattern_extractor.py +330 -0
- specfact_cli/cli.py +151 -206
- specfact_cli/commands/constitution.py +281 -0
- specfact_cli/commands/enforce.py +42 -34
- specfact_cli/commands/import_cmd.py +481 -152
- specfact_cli/commands/init.py +224 -55
- specfact_cli/commands/plan.py +2133 -547
- specfact_cli/commands/repro.py +100 -78
- specfact_cli/commands/sync.py +701 -186
- specfact_cli/enrichers/constitution_enricher.py +765 -0
- specfact_cli/enrichers/plan_enricher.py +294 -0
- specfact_cli/importers/speckit_converter.py +364 -48
- specfact_cli/importers/speckit_scanner.py +65 -0
- specfact_cli/models/plan.py +42 -0
- specfact_cli/resources/mappings/node-async.yaml +49 -0
- specfact_cli/resources/mappings/python-async.yaml +47 -0
- specfact_cli/resources/mappings/speckit-default.yaml +82 -0
- specfact_cli/resources/prompts/specfact-enforce.md +185 -0
- specfact_cli/resources/prompts/specfact-import-from-code.md +626 -0
- specfact_cli/resources/prompts/specfact-plan-add-feature.md +188 -0
- specfact_cli/resources/prompts/specfact-plan-add-story.md +212 -0
- specfact_cli/resources/prompts/specfact-plan-compare.md +571 -0
- specfact_cli/resources/prompts/specfact-plan-init.md +531 -0
- specfact_cli/resources/prompts/specfact-plan-promote.md +352 -0
- specfact_cli/resources/prompts/specfact-plan-review.md +1276 -0
- specfact_cli/resources/prompts/specfact-plan-select.md +401 -0
- specfact_cli/resources/prompts/specfact-plan-update-feature.md +242 -0
- specfact_cli/resources/prompts/specfact-plan-update-idea.md +211 -0
- specfact_cli/resources/prompts/specfact-repro.md +268 -0
- specfact_cli/resources/prompts/specfact-sync.md +497 -0
- specfact_cli/resources/schemas/deviation.schema.json +61 -0
- specfact_cli/resources/schemas/plan.schema.json +204 -0
- specfact_cli/resources/schemas/protocol.schema.json +53 -0
- specfact_cli/resources/templates/github-action.yml.j2 +140 -0
- specfact_cli/resources/templates/plan.bundle.yaml.j2 +141 -0
- specfact_cli/resources/templates/pr-template.md.j2 +58 -0
- specfact_cli/resources/templates/protocol.yaml.j2 +24 -0
- specfact_cli/resources/templates/telemetry.yaml.example +35 -0
- specfact_cli/sync/__init__.py +10 -1
- specfact_cli/sync/watcher.py +268 -0
- specfact_cli/telemetry.py +440 -0
- specfact_cli/utils/acceptance_criteria.py +127 -0
- specfact_cli/utils/enrichment_parser.py +445 -0
- specfact_cli/utils/feature_keys.py +12 -3
- specfact_cli/utils/ide_setup.py +170 -0
- specfact_cli/utils/structure.py +179 -2
- specfact_cli/utils/yaml_utils.py +33 -0
- specfact_cli/validators/repro_checker.py +22 -1
- specfact_cli/validators/schema.py +15 -4
- specfact_cli-0.6.8.dist-info/METADATA +456 -0
- specfact_cli-0.6.8.dist-info/RECORD +99 -0
- {specfact_cli-0.4.2.dist-info → specfact_cli-0.6.8.dist-info}/entry_points.txt +1 -0
- specfact_cli-0.6.8.dist-info/licenses/LICENSE.md +202 -0
- specfact_cli-0.4.2.dist-info/METADATA +0 -370
- specfact_cli-0.4.2.dist-info/RECORD +0 -62
- specfact_cli-0.4.2.dist-info/licenses/LICENSE.md +0 -61
- {specfact_cli-0.4.2.dist-info → specfact_cli-0.6.8.dist-info}/WHEEL +0 -0
specfact_cli/__init__.py
CHANGED
|
@@ -294,9 +294,7 @@ Focus on semantic understanding, not just structural parsing. Generate the plan
|
|
|
294
294
|
context["dependencies"] = dependencies
|
|
295
295
|
|
|
296
296
|
# Generate summary
|
|
297
|
-
context[
|
|
298
|
-
"summary"
|
|
299
|
-
] = f"""
|
|
297
|
+
context["summary"] = f"""
|
|
300
298
|
Repository: {repo_path.name}
|
|
301
299
|
Total code files: {len(filtered_files)}
|
|
302
300
|
Languages detected: {", ".join({f.suffix for f in filtered_files[:20]})}
|
|
@@ -389,4 +387,5 @@ Dependencies: {len(dependencies)} dependency files found
|
|
|
389
387
|
product=product,
|
|
390
388
|
features=[],
|
|
391
389
|
metadata=Metadata(stage="draft", promoted_at=None, promoted_by=None),
|
|
390
|
+
clarifications=None,
|
|
392
391
|
)
|
|
@@ -5,7 +5,8 @@ This module provides classes for analyzing code to extract features,
|
|
|
5
5
|
stories, and generate plan bundles from brownfield codebases.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
+
from specfact_cli.analyzers.ambiguity_scanner import AmbiguityScanner
|
|
8
9
|
from specfact_cli.analyzers.code_analyzer import CodeAnalyzer
|
|
9
10
|
|
|
10
11
|
|
|
11
|
-
__all__ = ["CodeAnalyzer"]
|
|
12
|
+
__all__ = ["AmbiguityScanner", "CodeAnalyzer"]
|
|
@@ -0,0 +1,601 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Ambiguity scanner for plan bundle review.
|
|
3
|
+
|
|
4
|
+
This module analyzes plan bundles to identify ambiguities, missing information,
|
|
5
|
+
and unknowns using a structured taxonomy.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from dataclasses import dataclass
|
|
11
|
+
from enum import Enum
|
|
12
|
+
|
|
13
|
+
from beartype import beartype
|
|
14
|
+
from icontract import ensure, require
|
|
15
|
+
|
|
16
|
+
from specfact_cli.models.plan import PlanBundle
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class AmbiguityStatus(str, Enum):
|
|
20
|
+
"""Ambiguity status levels."""
|
|
21
|
+
|
|
22
|
+
CLEAR = "Clear"
|
|
23
|
+
PARTIAL = "Partial"
|
|
24
|
+
MISSING = "Missing"
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class TaxonomyCategory(str, Enum):
|
|
28
|
+
"""Taxonomy categories for ambiguity detection."""
|
|
29
|
+
|
|
30
|
+
FUNCTIONAL_SCOPE = "Functional Scope & Behavior"
|
|
31
|
+
DATA_MODEL = "Domain & Data Model"
|
|
32
|
+
INTERACTION_UX = "Interaction & UX Flow"
|
|
33
|
+
NON_FUNCTIONAL = "Non-Functional Quality Attributes"
|
|
34
|
+
INTEGRATION = "Integration & External Dependencies"
|
|
35
|
+
EDGE_CASES = "Edge Cases & Failure Handling"
|
|
36
|
+
CONSTRAINTS = "Constraints & Tradeoffs"
|
|
37
|
+
TERMINOLOGY = "Terminology & Consistency"
|
|
38
|
+
COMPLETION_SIGNALS = "Completion Signals"
|
|
39
|
+
FEATURE_COMPLETENESS = "Feature/Story Completeness"
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
@dataclass
|
|
43
|
+
class AmbiguityFinding:
|
|
44
|
+
"""Single ambiguity finding."""
|
|
45
|
+
|
|
46
|
+
category: TaxonomyCategory
|
|
47
|
+
status: AmbiguityStatus
|
|
48
|
+
description: str
|
|
49
|
+
impact: float = 0.5
|
|
50
|
+
uncertainty: float = 0.5
|
|
51
|
+
question: str | None = None
|
|
52
|
+
related_sections: list[str] | None = None
|
|
53
|
+
|
|
54
|
+
def __post_init__(self) -> None:
|
|
55
|
+
"""Validate and initialize defaults."""
|
|
56
|
+
if self.related_sections is None:
|
|
57
|
+
self.related_sections = []
|
|
58
|
+
if not 0.0 <= self.impact <= 1.0:
|
|
59
|
+
raise ValueError(f"Impact must be 0.0-1.0, got {self.impact}")
|
|
60
|
+
if not 0.0 <= self.uncertainty <= 1.0:
|
|
61
|
+
raise ValueError(f"Uncertainty must be 0.0-1.0, got {self.uncertainty}")
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
@dataclass
|
|
65
|
+
class AmbiguityReport:
|
|
66
|
+
"""Complete ambiguity analysis report."""
|
|
67
|
+
|
|
68
|
+
findings: list[AmbiguityFinding] | None = None
|
|
69
|
+
coverage: dict[TaxonomyCategory, AmbiguityStatus] | None = None
|
|
70
|
+
priority_score: float = 0.0
|
|
71
|
+
|
|
72
|
+
def __post_init__(self) -> None:
|
|
73
|
+
"""Validate and initialize defaults."""
|
|
74
|
+
if self.findings is None:
|
|
75
|
+
self.findings = []
|
|
76
|
+
if self.coverage is None:
|
|
77
|
+
self.coverage = {}
|
|
78
|
+
if not 0.0 <= self.priority_score <= 1.0:
|
|
79
|
+
raise ValueError(f"Priority score must be 0.0-1.0, got {self.priority_score}")
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class AmbiguityScanner:
|
|
83
|
+
"""
|
|
84
|
+
Scanner for identifying ambiguities in plan bundles.
|
|
85
|
+
|
|
86
|
+
Uses structured taxonomy to detect missing information, unclear requirements,
|
|
87
|
+
and unknowns that should be resolved before promotion.
|
|
88
|
+
"""
|
|
89
|
+
|
|
90
|
+
@beartype
|
|
91
|
+
@require(lambda plan_bundle: isinstance(plan_bundle, PlanBundle), "Plan bundle must be PlanBundle")
|
|
92
|
+
@ensure(lambda result: isinstance(result, AmbiguityReport), "Must return AmbiguityReport")
|
|
93
|
+
def scan(self, plan_bundle: PlanBundle) -> AmbiguityReport:
|
|
94
|
+
"""
|
|
95
|
+
Scan plan bundle for ambiguities.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
plan_bundle: Plan bundle to analyze
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
Ambiguity report with findings and coverage
|
|
102
|
+
"""
|
|
103
|
+
findings: list[AmbiguityFinding] = []
|
|
104
|
+
coverage: dict[TaxonomyCategory, AmbiguityStatus] = {}
|
|
105
|
+
|
|
106
|
+
# Scan each taxonomy category
|
|
107
|
+
for category in TaxonomyCategory:
|
|
108
|
+
category_findings = self._scan_category(plan_bundle, category)
|
|
109
|
+
findings.extend(category_findings)
|
|
110
|
+
|
|
111
|
+
# Determine category status
|
|
112
|
+
if not category_findings:
|
|
113
|
+
coverage[category] = AmbiguityStatus.CLEAR
|
|
114
|
+
elif any(f.status == AmbiguityStatus.MISSING for f in category_findings):
|
|
115
|
+
coverage[category] = AmbiguityStatus.MISSING
|
|
116
|
+
else:
|
|
117
|
+
coverage[category] = AmbiguityStatus.PARTIAL
|
|
118
|
+
|
|
119
|
+
# Calculate priority score (highest impact x uncertainty)
|
|
120
|
+
priority_score = 0.0
|
|
121
|
+
if findings:
|
|
122
|
+
priority_score = max(f.impact * f.uncertainty for f in findings)
|
|
123
|
+
|
|
124
|
+
return AmbiguityReport(findings=findings, coverage=coverage, priority_score=priority_score)
|
|
125
|
+
|
|
126
|
+
@beartype
|
|
127
|
+
@require(lambda plan_bundle: isinstance(plan_bundle, PlanBundle), "Plan bundle must be PlanBundle")
|
|
128
|
+
@require(lambda category: isinstance(category, TaxonomyCategory), "Category must be TaxonomyCategory")
|
|
129
|
+
@ensure(lambda result: isinstance(result, list), "Must return list of findings")
|
|
130
|
+
def _scan_category(self, plan_bundle: PlanBundle, category: TaxonomyCategory) -> list[AmbiguityFinding]:
|
|
131
|
+
"""Scan specific taxonomy category."""
|
|
132
|
+
findings: list[AmbiguityFinding] = []
|
|
133
|
+
|
|
134
|
+
if category == TaxonomyCategory.FUNCTIONAL_SCOPE:
|
|
135
|
+
findings.extend(self._scan_functional_scope(plan_bundle))
|
|
136
|
+
elif category == TaxonomyCategory.DATA_MODEL:
|
|
137
|
+
findings.extend(self._scan_data_model(plan_bundle))
|
|
138
|
+
elif category == TaxonomyCategory.INTERACTION_UX:
|
|
139
|
+
findings.extend(self._scan_interaction_ux(plan_bundle))
|
|
140
|
+
elif category == TaxonomyCategory.NON_FUNCTIONAL:
|
|
141
|
+
findings.extend(self._scan_non_functional(plan_bundle))
|
|
142
|
+
elif category == TaxonomyCategory.INTEGRATION:
|
|
143
|
+
findings.extend(self._scan_integration(plan_bundle))
|
|
144
|
+
elif category == TaxonomyCategory.EDGE_CASES:
|
|
145
|
+
findings.extend(self._scan_edge_cases(plan_bundle))
|
|
146
|
+
elif category == TaxonomyCategory.CONSTRAINTS:
|
|
147
|
+
findings.extend(self._scan_constraints(plan_bundle))
|
|
148
|
+
elif category == TaxonomyCategory.TERMINOLOGY:
|
|
149
|
+
findings.extend(self._scan_terminology(plan_bundle))
|
|
150
|
+
elif category == TaxonomyCategory.COMPLETION_SIGNALS:
|
|
151
|
+
findings.extend(self._scan_completion_signals(plan_bundle))
|
|
152
|
+
elif category == TaxonomyCategory.FEATURE_COMPLETENESS:
|
|
153
|
+
findings.extend(self._scan_feature_completeness(plan_bundle))
|
|
154
|
+
|
|
155
|
+
return findings
|
|
156
|
+
|
|
157
|
+
@beartype
|
|
158
|
+
def _scan_functional_scope(self, plan_bundle: PlanBundle) -> list[AmbiguityFinding]:
|
|
159
|
+
"""Scan functional scope and behavior."""
|
|
160
|
+
findings: list[AmbiguityFinding] = []
|
|
161
|
+
|
|
162
|
+
# Check idea narrative
|
|
163
|
+
if plan_bundle.idea and (not plan_bundle.idea.narrative or len(plan_bundle.idea.narrative.strip()) < 20):
|
|
164
|
+
findings.append(
|
|
165
|
+
AmbiguityFinding(
|
|
166
|
+
category=TaxonomyCategory.FUNCTIONAL_SCOPE,
|
|
167
|
+
status=AmbiguityStatus.PARTIAL,
|
|
168
|
+
description="Idea narrative is too brief or missing",
|
|
169
|
+
impact=0.8,
|
|
170
|
+
uncertainty=0.7,
|
|
171
|
+
question="What is the core user goal and success criteria for this plan?",
|
|
172
|
+
related_sections=["idea.narrative"],
|
|
173
|
+
)
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
# Check target users
|
|
177
|
+
if plan_bundle.idea and not plan_bundle.idea.target_users:
|
|
178
|
+
findings.append(
|
|
179
|
+
AmbiguityFinding(
|
|
180
|
+
category=TaxonomyCategory.FUNCTIONAL_SCOPE,
|
|
181
|
+
status=AmbiguityStatus.MISSING,
|
|
182
|
+
description="Target users/personas not specified",
|
|
183
|
+
impact=0.7,
|
|
184
|
+
uncertainty=0.6,
|
|
185
|
+
question="Who are the target users or personas for this plan?",
|
|
186
|
+
related_sections=["idea.target_users"],
|
|
187
|
+
)
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
# Check features have clear outcomes
|
|
191
|
+
for feature in plan_bundle.features:
|
|
192
|
+
if not feature.outcomes:
|
|
193
|
+
findings.append(
|
|
194
|
+
AmbiguityFinding(
|
|
195
|
+
category=TaxonomyCategory.FUNCTIONAL_SCOPE,
|
|
196
|
+
status=AmbiguityStatus.MISSING,
|
|
197
|
+
description=f"Feature {feature.key} has no outcomes specified",
|
|
198
|
+
impact=0.6,
|
|
199
|
+
uncertainty=0.5,
|
|
200
|
+
question=f"What are the expected outcomes for feature {feature.key}?",
|
|
201
|
+
related_sections=[f"features.{feature.key}.outcomes"],
|
|
202
|
+
)
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
return findings
|
|
206
|
+
|
|
207
|
+
@beartype
|
|
208
|
+
def _scan_data_model(self, plan_bundle: PlanBundle) -> list[AmbiguityFinding]:
|
|
209
|
+
"""Scan domain and data model."""
|
|
210
|
+
findings: list[AmbiguityFinding] = []
|
|
211
|
+
|
|
212
|
+
# Check if features reference data entities without constraints
|
|
213
|
+
for feature in plan_bundle.features:
|
|
214
|
+
# Look for data-related keywords in outcomes/acceptance
|
|
215
|
+
data_keywords = ["data", "entity", "model", "record", "database", "storage"]
|
|
216
|
+
has_data_mentions = any(
|
|
217
|
+
keyword in outcome.lower() or keyword in acc.lower()
|
|
218
|
+
for outcome in feature.outcomes
|
|
219
|
+
for acc in feature.acceptance
|
|
220
|
+
for keyword in data_keywords
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
if has_data_mentions and not feature.constraints:
|
|
224
|
+
findings.append(
|
|
225
|
+
AmbiguityFinding(
|
|
226
|
+
category=TaxonomyCategory.DATA_MODEL,
|
|
227
|
+
status=AmbiguityStatus.PARTIAL,
|
|
228
|
+
description=f"Feature {feature.key} mentions data but has no constraints",
|
|
229
|
+
impact=0.5,
|
|
230
|
+
uncertainty=0.6,
|
|
231
|
+
question=f"What are the data model constraints for feature {feature.key}?",
|
|
232
|
+
related_sections=[f"features.{feature.key}.constraints"],
|
|
233
|
+
)
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
return findings
|
|
237
|
+
|
|
238
|
+
@beartype
|
|
239
|
+
def _scan_interaction_ux(self, plan_bundle: PlanBundle) -> list[AmbiguityFinding]:
|
|
240
|
+
"""Scan interaction and UX flow."""
|
|
241
|
+
findings: list[AmbiguityFinding] = []
|
|
242
|
+
|
|
243
|
+
# Check stories for UX-related acceptance criteria
|
|
244
|
+
for feature in plan_bundle.features:
|
|
245
|
+
for story in feature.stories:
|
|
246
|
+
# Check if story mentions user interaction but lacks error handling
|
|
247
|
+
ux_keywords = ["user", "click", "input", "form", "button", "interface", "ui"]
|
|
248
|
+
has_ux_mentions = any(keyword in story.title.lower() for keyword in ux_keywords)
|
|
249
|
+
|
|
250
|
+
if has_ux_mentions:
|
|
251
|
+
# Check for error/empty state handling
|
|
252
|
+
error_keywords = ["error", "empty", "invalid", "validation", "failure"]
|
|
253
|
+
has_error_handling = any(
|
|
254
|
+
keyword in acc.lower() for acc in story.acceptance for keyword in error_keywords
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
if not has_error_handling:
|
|
258
|
+
findings.append(
|
|
259
|
+
AmbiguityFinding(
|
|
260
|
+
category=TaxonomyCategory.INTERACTION_UX,
|
|
261
|
+
status=AmbiguityStatus.PARTIAL,
|
|
262
|
+
description=f"Story {story.key} mentions UX but lacks error handling",
|
|
263
|
+
impact=0.5,
|
|
264
|
+
uncertainty=0.4,
|
|
265
|
+
question=f"What error/empty states should be handled for story {story.key}?",
|
|
266
|
+
related_sections=[f"features.{feature.key}.stories.{story.key}.acceptance"],
|
|
267
|
+
)
|
|
268
|
+
)
|
|
269
|
+
|
|
270
|
+
return findings
|
|
271
|
+
|
|
272
|
+
@beartype
|
|
273
|
+
def _scan_non_functional(self, plan_bundle: PlanBundle) -> list[AmbiguityFinding]:
|
|
274
|
+
"""Scan non-functional quality attributes."""
|
|
275
|
+
findings: list[AmbiguityFinding] = []
|
|
276
|
+
|
|
277
|
+
# Check idea constraints for non-functional requirements
|
|
278
|
+
if (
|
|
279
|
+
plan_bundle.idea
|
|
280
|
+
and plan_bundle.idea.constraints
|
|
281
|
+
and any(
|
|
282
|
+
term in constraint.lower()
|
|
283
|
+
for constraint in plan_bundle.idea.constraints
|
|
284
|
+
for term in ["robust", "scalable", "fast", "secure", "reliable", "intuitive"]
|
|
285
|
+
)
|
|
286
|
+
):
|
|
287
|
+
findings.append(
|
|
288
|
+
AmbiguityFinding(
|
|
289
|
+
category=TaxonomyCategory.NON_FUNCTIONAL,
|
|
290
|
+
status=AmbiguityStatus.PARTIAL,
|
|
291
|
+
description="Non-functional requirements use vague terms without quantification",
|
|
292
|
+
impact=0.7,
|
|
293
|
+
uncertainty=0.8,
|
|
294
|
+
question="What are the measurable targets for non-functional requirements (performance, scalability, security)?",
|
|
295
|
+
related_sections=["idea.constraints"],
|
|
296
|
+
)
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
return findings
|
|
300
|
+
|
|
301
|
+
@beartype
|
|
302
|
+
def _scan_integration(self, plan_bundle: PlanBundle) -> list[AmbiguityFinding]:
|
|
303
|
+
"""Scan integration and external dependencies."""
|
|
304
|
+
findings: list[AmbiguityFinding] = []
|
|
305
|
+
|
|
306
|
+
# Check features for external service mentions
|
|
307
|
+
integration_keywords = ["api", "service", "external", "third-party", "integration", "sync"]
|
|
308
|
+
for feature in plan_bundle.features:
|
|
309
|
+
has_integration_mentions = any(
|
|
310
|
+
keyword in outcome.lower() or keyword in acc.lower()
|
|
311
|
+
for outcome in feature.outcomes
|
|
312
|
+
for acc in feature.acceptance
|
|
313
|
+
for keyword in integration_keywords
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
if has_integration_mentions and not feature.constraints:
|
|
317
|
+
findings.append(
|
|
318
|
+
AmbiguityFinding(
|
|
319
|
+
category=TaxonomyCategory.INTEGRATION,
|
|
320
|
+
status=AmbiguityStatus.PARTIAL,
|
|
321
|
+
description=f"Feature {feature.key} mentions integration but has no constraints",
|
|
322
|
+
impact=0.6,
|
|
323
|
+
uncertainty=0.5,
|
|
324
|
+
question=f"What are the external dependency constraints and failure modes for feature {feature.key}?",
|
|
325
|
+
related_sections=[f"features.{feature.key}.constraints"],
|
|
326
|
+
)
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
return findings
|
|
330
|
+
|
|
331
|
+
@beartype
|
|
332
|
+
def _scan_edge_cases(self, plan_bundle: PlanBundle) -> list[AmbiguityFinding]:
|
|
333
|
+
"""Scan edge cases and failure handling."""
|
|
334
|
+
findings: list[AmbiguityFinding] = []
|
|
335
|
+
|
|
336
|
+
# Check stories for edge case coverage
|
|
337
|
+
for feature in plan_bundle.features:
|
|
338
|
+
for story in feature.stories:
|
|
339
|
+
# Check if story has acceptance criteria but no edge cases
|
|
340
|
+
if (
|
|
341
|
+
story.acceptance
|
|
342
|
+
and not any(
|
|
343
|
+
keyword in acc.lower()
|
|
344
|
+
for acc in story.acceptance
|
|
345
|
+
for keyword in ["edge", "corner", "boundary", "limit", "invalid", "null", "empty"]
|
|
346
|
+
)
|
|
347
|
+
and len(story.acceptance) < 3
|
|
348
|
+
):
|
|
349
|
+
# Low acceptance criteria count might indicate missing edge cases
|
|
350
|
+
findings.append(
|
|
351
|
+
AmbiguityFinding(
|
|
352
|
+
category=TaxonomyCategory.EDGE_CASES,
|
|
353
|
+
status=AmbiguityStatus.PARTIAL,
|
|
354
|
+
description=f"Story {story.key} has limited acceptance criteria, may be missing edge cases",
|
|
355
|
+
impact=0.4,
|
|
356
|
+
uncertainty=0.5,
|
|
357
|
+
question=f"What edge cases or negative scenarios should be handled for story {story.key}?",
|
|
358
|
+
related_sections=[f"features.{feature.key}.stories.{story.key}.acceptance"],
|
|
359
|
+
)
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
return findings
|
|
363
|
+
|
|
364
|
+
@beartype
|
|
365
|
+
def _scan_constraints(self, plan_bundle: PlanBundle) -> list[AmbiguityFinding]:
|
|
366
|
+
"""Scan constraints and tradeoffs."""
|
|
367
|
+
findings: list[AmbiguityFinding] = []
|
|
368
|
+
|
|
369
|
+
# Check if idea has constraints
|
|
370
|
+
if plan_bundle.idea and not plan_bundle.idea.constraints:
|
|
371
|
+
findings.append(
|
|
372
|
+
AmbiguityFinding(
|
|
373
|
+
category=TaxonomyCategory.CONSTRAINTS,
|
|
374
|
+
status=AmbiguityStatus.MISSING,
|
|
375
|
+
description="No technical or business constraints specified",
|
|
376
|
+
impact=0.5,
|
|
377
|
+
uncertainty=0.6,
|
|
378
|
+
question="What are the technical constraints (language, storage, hosting) and explicit tradeoffs?",
|
|
379
|
+
related_sections=["idea.constraints"],
|
|
380
|
+
)
|
|
381
|
+
)
|
|
382
|
+
|
|
383
|
+
return findings
|
|
384
|
+
|
|
385
|
+
@beartype
|
|
386
|
+
def _scan_terminology(self, plan_bundle: PlanBundle) -> list[AmbiguityFinding]:
|
|
387
|
+
"""Scan terminology and consistency."""
|
|
388
|
+
findings: list[AmbiguityFinding] = []
|
|
389
|
+
|
|
390
|
+
# Check for inconsistent terminology across features
|
|
391
|
+
terms: dict[str, list[str]] = {}
|
|
392
|
+
for feature in plan_bundle.features:
|
|
393
|
+
# Extract key terms from title and outcomes
|
|
394
|
+
feature_terms = set(feature.title.lower().split())
|
|
395
|
+
for outcome in feature.outcomes:
|
|
396
|
+
feature_terms.update(outcome.lower().split())
|
|
397
|
+
|
|
398
|
+
for term in feature_terms:
|
|
399
|
+
if len(term) > 4: # Only check meaningful terms
|
|
400
|
+
if term not in terms:
|
|
401
|
+
terms[term] = []
|
|
402
|
+
terms[term].append(feature.key)
|
|
403
|
+
|
|
404
|
+
# Find terms used in multiple features (potential inconsistency)
|
|
405
|
+
# For now, skip terminology checks (low priority)
|
|
406
|
+
# This is a simple heuristic - could be enhanced
|
|
407
|
+
_ = terms # Unused for now
|
|
408
|
+
|
|
409
|
+
return findings
|
|
410
|
+
|
|
411
|
+
@beartype
|
|
412
|
+
def _scan_completion_signals(self, plan_bundle: PlanBundle) -> list[AmbiguityFinding]:
|
|
413
|
+
"""Scan completion signals and testability."""
|
|
414
|
+
findings: list[AmbiguityFinding] = []
|
|
415
|
+
|
|
416
|
+
# Check stories for testable acceptance criteria
|
|
417
|
+
for feature in plan_bundle.features:
|
|
418
|
+
for story in feature.stories:
|
|
419
|
+
if not story.acceptance:
|
|
420
|
+
findings.append(
|
|
421
|
+
AmbiguityFinding(
|
|
422
|
+
category=TaxonomyCategory.COMPLETION_SIGNALS,
|
|
423
|
+
status=AmbiguityStatus.MISSING,
|
|
424
|
+
description=f"Story {story.key} has no acceptance criteria",
|
|
425
|
+
impact=0.8,
|
|
426
|
+
uncertainty=0.7,
|
|
427
|
+
question=f"What are the testable acceptance criteria for story {story.key}?",
|
|
428
|
+
related_sections=[f"features.{feature.key}.stories.{story.key}.acceptance"],
|
|
429
|
+
)
|
|
430
|
+
)
|
|
431
|
+
else:
|
|
432
|
+
# Check for vague acceptance criteria patterns
|
|
433
|
+
# BUT: Skip if criteria are already code-specific (preserve code-specific criteria from code2spec)
|
|
434
|
+
from specfact_cli.utils.acceptance_criteria import is_code_specific_criteria
|
|
435
|
+
|
|
436
|
+
vague_patterns = [
|
|
437
|
+
"is implemented",
|
|
438
|
+
"is functional",
|
|
439
|
+
"works",
|
|
440
|
+
"is done",
|
|
441
|
+
"is complete",
|
|
442
|
+
"is ready",
|
|
443
|
+
]
|
|
444
|
+
|
|
445
|
+
# Only check criteria that are NOT code-specific
|
|
446
|
+
non_code_specific_criteria = [acc for acc in story.acceptance if not is_code_specific_criteria(acc)]
|
|
447
|
+
|
|
448
|
+
vague_criteria = [
|
|
449
|
+
acc
|
|
450
|
+
for acc in non_code_specific_criteria
|
|
451
|
+
if any(pattern in acc.lower() for pattern in vague_patterns)
|
|
452
|
+
]
|
|
453
|
+
|
|
454
|
+
if vague_criteria:
|
|
455
|
+
findings.append(
|
|
456
|
+
AmbiguityFinding(
|
|
457
|
+
category=TaxonomyCategory.COMPLETION_SIGNALS,
|
|
458
|
+
status=AmbiguityStatus.PARTIAL,
|
|
459
|
+
description=f"Story {story.key} has vague acceptance criteria: {', '.join(vague_criteria[:2])}",
|
|
460
|
+
impact=0.7,
|
|
461
|
+
uncertainty=0.6,
|
|
462
|
+
question=f"Story {story.key} has vague acceptance criteria. Should these be converted to testable Given/When/Then format?",
|
|
463
|
+
related_sections=[f"features.{feature.key}.stories.{story.key}.acceptance"],
|
|
464
|
+
)
|
|
465
|
+
)
|
|
466
|
+
elif not any(
|
|
467
|
+
keyword in acc.lower()
|
|
468
|
+
for acc in story.acceptance
|
|
469
|
+
for keyword in [
|
|
470
|
+
"must",
|
|
471
|
+
"should",
|
|
472
|
+
"will",
|
|
473
|
+
"verify",
|
|
474
|
+
"validate",
|
|
475
|
+
"check",
|
|
476
|
+
"given",
|
|
477
|
+
"when",
|
|
478
|
+
"then",
|
|
479
|
+
]
|
|
480
|
+
):
|
|
481
|
+
# Check if acceptance criteria are measurable
|
|
482
|
+
findings.append(
|
|
483
|
+
AmbiguityFinding(
|
|
484
|
+
category=TaxonomyCategory.COMPLETION_SIGNALS,
|
|
485
|
+
status=AmbiguityStatus.PARTIAL,
|
|
486
|
+
description=f"Story {story.key} acceptance criteria may not be testable",
|
|
487
|
+
impact=0.5,
|
|
488
|
+
uncertainty=0.4,
|
|
489
|
+
question=f"Are the acceptance criteria for story {story.key} measurable and testable?",
|
|
490
|
+
related_sections=[f"features.{feature.key}.stories.{story.key}.acceptance"],
|
|
491
|
+
)
|
|
492
|
+
)
|
|
493
|
+
|
|
494
|
+
return findings
|
|
495
|
+
|
|
496
|
+
@beartype
|
|
497
|
+
def _scan_feature_completeness(self, plan_bundle: PlanBundle) -> list[AmbiguityFinding]:
|
|
498
|
+
"""Scan feature and story completeness."""
|
|
499
|
+
findings: list[AmbiguityFinding] = []
|
|
500
|
+
|
|
501
|
+
# Check features without stories
|
|
502
|
+
for feature in plan_bundle.features:
|
|
503
|
+
if not feature.stories:
|
|
504
|
+
findings.append(
|
|
505
|
+
AmbiguityFinding(
|
|
506
|
+
category=TaxonomyCategory.FEATURE_COMPLETENESS,
|
|
507
|
+
status=AmbiguityStatus.MISSING,
|
|
508
|
+
description=f"Feature {feature.key} has no stories",
|
|
509
|
+
impact=0.9,
|
|
510
|
+
uncertainty=0.8,
|
|
511
|
+
question=f"What user stories are needed for feature {feature.key}?",
|
|
512
|
+
related_sections=[f"features.{feature.key}.stories"],
|
|
513
|
+
)
|
|
514
|
+
)
|
|
515
|
+
|
|
516
|
+
# Check features without acceptance criteria
|
|
517
|
+
if not feature.acceptance:
|
|
518
|
+
findings.append(
|
|
519
|
+
AmbiguityFinding(
|
|
520
|
+
category=TaxonomyCategory.FEATURE_COMPLETENESS,
|
|
521
|
+
status=AmbiguityStatus.MISSING,
|
|
522
|
+
description=f"Feature {feature.key} has no acceptance criteria",
|
|
523
|
+
impact=0.7,
|
|
524
|
+
uncertainty=0.6,
|
|
525
|
+
question=f"What are the acceptance criteria for feature {feature.key}?",
|
|
526
|
+
related_sections=[f"features.{feature.key}.acceptance"],
|
|
527
|
+
)
|
|
528
|
+
)
|
|
529
|
+
|
|
530
|
+
# Check for incomplete requirements in outcomes
|
|
531
|
+
for outcome in feature.outcomes:
|
|
532
|
+
# Check for incomplete patterns like "System MUST Helper class" (missing verb/object)
|
|
533
|
+
incomplete_patterns = [
|
|
534
|
+
"system must",
|
|
535
|
+
"system should",
|
|
536
|
+
"must",
|
|
537
|
+
"should",
|
|
538
|
+
]
|
|
539
|
+
outcome_lower = outcome.lower()
|
|
540
|
+
# Check if outcome starts with pattern but is incomplete (missing verb after "must" or ends abruptly)
|
|
541
|
+
for pattern in incomplete_patterns:
|
|
542
|
+
if outcome_lower.startswith(pattern):
|
|
543
|
+
# Check if it's incomplete (e.g., "System MUST Helper class" - missing verb)
|
|
544
|
+
remaining = outcome_lower[len(pattern) :].strip()
|
|
545
|
+
# If remaining is just a noun phrase without a verb, it's likely incomplete
|
|
546
|
+
if (
|
|
547
|
+
remaining
|
|
548
|
+
and len(remaining.split()) < 3
|
|
549
|
+
and any(
|
|
550
|
+
keyword in remaining
|
|
551
|
+
for keyword in ["class", "helper", "module", "component", "service", "function"]
|
|
552
|
+
)
|
|
553
|
+
):
|
|
554
|
+
findings.append(
|
|
555
|
+
AmbiguityFinding(
|
|
556
|
+
category=TaxonomyCategory.FEATURE_COMPLETENESS,
|
|
557
|
+
status=AmbiguityStatus.PARTIAL,
|
|
558
|
+
description=f"Feature {feature.key} has incomplete requirement: '{outcome}' (missing verb/action)",
|
|
559
|
+
impact=0.6,
|
|
560
|
+
uncertainty=0.5,
|
|
561
|
+
question=f"Feature {feature.key} requirement '{outcome}' appears incomplete. What should the system do?",
|
|
562
|
+
related_sections=[f"features.{feature.key}.outcomes"],
|
|
563
|
+
)
|
|
564
|
+
)
|
|
565
|
+
break
|
|
566
|
+
|
|
567
|
+
# Check for generic tasks in stories
|
|
568
|
+
for story in feature.stories:
|
|
569
|
+
if story.tasks:
|
|
570
|
+
generic_patterns = [
|
|
571
|
+
"implement",
|
|
572
|
+
"create",
|
|
573
|
+
"add",
|
|
574
|
+
"set up",
|
|
575
|
+
]
|
|
576
|
+
generic_tasks = [
|
|
577
|
+
task
|
|
578
|
+
for task in story.tasks
|
|
579
|
+
if any(
|
|
580
|
+
pattern in task.lower()
|
|
581
|
+
and not any(
|
|
582
|
+
detail in task.lower()
|
|
583
|
+
for detail in ["file", "path", "method", "class", "component", "module", "function"]
|
|
584
|
+
)
|
|
585
|
+
for pattern in generic_patterns
|
|
586
|
+
)
|
|
587
|
+
]
|
|
588
|
+
if generic_tasks:
|
|
589
|
+
findings.append(
|
|
590
|
+
AmbiguityFinding(
|
|
591
|
+
category=TaxonomyCategory.FEATURE_COMPLETENESS,
|
|
592
|
+
status=AmbiguityStatus.PARTIAL,
|
|
593
|
+
description=f"Story {story.key} has generic tasks without implementation details: {', '.join(generic_tasks[:2])}",
|
|
594
|
+
impact=0.4,
|
|
595
|
+
uncertainty=0.3,
|
|
596
|
+
question=f"Story {story.key} has generic tasks. Should these include file paths, method names, or component references?",
|
|
597
|
+
related_sections=[f"features.{feature.key}.stories.{story.key}.tasks"],
|
|
598
|
+
)
|
|
599
|
+
)
|
|
600
|
+
|
|
601
|
+
return findings
|