tapps-agents 3.5.39__py3-none-any.whl → 3.5.40__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tapps_agents/__init__.py +2 -2
- tapps_agents/agents/enhancer/agent.py +2728 -2728
- tapps_agents/agents/implementer/agent.py +35 -13
- tapps_agents/agents/reviewer/agent.py +43 -10
- tapps_agents/agents/reviewer/scoring.py +59 -68
- tapps_agents/agents/reviewer/tools/__init__.py +24 -0
- tapps_agents/agents/reviewer/tools/ruff_grouping.py +250 -0
- tapps_agents/agents/reviewer/tools/scoped_mypy.py +284 -0
- tapps_agents/beads/__init__.py +11 -0
- tapps_agents/beads/hydration.py +213 -0
- tapps_agents/beads/specs.py +206 -0
- tapps_agents/cli/commands/health.py +19 -3
- tapps_agents/cli/commands/simple_mode.py +842 -676
- tapps_agents/cli/commands/task.py +219 -0
- tapps_agents/cli/commands/top_level.py +13 -0
- tapps_agents/cli/main.py +658 -651
- tapps_agents/cli/parsers/top_level.py +1978 -1881
- tapps_agents/core/config.py +1622 -1622
- tapps_agents/core/init_project.py +3012 -2897
- tapps_agents/epic/markdown_sync.py +105 -0
- tapps_agents/epic/orchestrator.py +1 -2
- tapps_agents/epic/parser.py +427 -423
- tapps_agents/experts/adaptive_domain_detector.py +0 -2
- tapps_agents/experts/knowledge/api-design-integration/api-security-patterns.md +15 -15
- tapps_agents/experts/knowledge/api-design-integration/external-api-integration.md +19 -44
- tapps_agents/health/checks/outcomes.backup_20260204_064058.py +324 -0
- tapps_agents/health/checks/outcomes.backup_20260204_064256.py +324 -0
- tapps_agents/health/checks/outcomes.backup_20260204_064600.py +324 -0
- tapps_agents/health/checks/outcomes.py +134 -46
- tapps_agents/health/orchestrator.py +12 -4
- tapps_agents/hooks/__init__.py +33 -0
- tapps_agents/hooks/config.py +140 -0
- tapps_agents/hooks/events.py +135 -0
- tapps_agents/hooks/executor.py +128 -0
- tapps_agents/hooks/manager.py +143 -0
- tapps_agents/session/__init__.py +19 -0
- tapps_agents/session/manager.py +256 -0
- tapps_agents/simple_mode/code_snippet_handler.py +382 -0
- tapps_agents/simple_mode/intent_parser.py +29 -4
- tapps_agents/simple_mode/orchestrators/base.py +185 -59
- tapps_agents/simple_mode/orchestrators/build_orchestrator.py +2667 -2642
- tapps_agents/simple_mode/orchestrators/fix_orchestrator.py +2 -2
- tapps_agents/simple_mode/workflow_suggester.py +37 -3
- tapps_agents/workflow/agent_handlers/implementer_handler.py +18 -3
- tapps_agents/workflow/cursor_executor.py +2196 -2118
- tapps_agents/workflow/direct_execution_fallback.py +16 -3
- tapps_agents/workflow/message_formatter.py +2 -1
- tapps_agents/workflow/parallel_executor.py +43 -4
- tapps_agents/workflow/parser.py +375 -357
- tapps_agents/workflow/rules_generator.py +337 -337
- tapps_agents/workflow/skill_invoker.py +9 -3
- {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.40.dist-info}/METADATA +5 -1
- {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.40.dist-info}/RECORD +57 -53
- tapps_agents/agents/analyst/SKILL.md +0 -85
- tapps_agents/agents/architect/SKILL.md +0 -80
- tapps_agents/agents/debugger/SKILL.md +0 -66
- tapps_agents/agents/designer/SKILL.md +0 -78
- tapps_agents/agents/documenter/SKILL.md +0 -95
- tapps_agents/agents/enhancer/SKILL.md +0 -189
- tapps_agents/agents/implementer/SKILL.md +0 -117
- tapps_agents/agents/improver/SKILL.md +0 -55
- tapps_agents/agents/ops/SKILL.md +0 -64
- tapps_agents/agents/orchestrator/SKILL.md +0 -238
- tapps_agents/agents/planner/story_template.md +0 -37
- tapps_agents/agents/reviewer/templates/quality-dashboard.html.j2 +0 -150
- tapps_agents/agents/tester/SKILL.md +0 -71
- {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.40.dist-info}/WHEEL +0 -0
- {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.40.dist-info}/entry_points.txt +0 -0
- {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.40.dist-info}/licenses/LICENSE +0 -0
- {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.40.dist-info}/top_level.txt +0 -0
tapps_agents/epic/parser.py
CHANGED
|
@@ -1,423 +1,427 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Epic Document Parser
|
|
3
|
-
|
|
4
|
-
Parses Epic PRD markdown documents to extract stories, dependencies, and acceptance criteria.
|
|
5
|
-
Supports BMAD-standard Epic format.
|
|
6
|
-
"""
|
|
7
|
-
|
|
8
|
-
import re
|
|
9
|
-
from pathlib import Path
|
|
10
|
-
from typing import Any
|
|
11
|
-
|
|
12
|
-
from ..workflow.common_enums import Priority
|
|
13
|
-
|
|
14
|
-
from .models import AcceptanceCriterion, EpicDocument, Story
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
class EpicParser:
|
|
18
|
-
"""
|
|
19
|
-
Parser for Epic markdown documents.
|
|
20
|
-
|
|
21
|
-
Supports parsing Epic documents in the format:
|
|
22
|
-
- Epic Goal
|
|
23
|
-
- Epic Description
|
|
24
|
-
- Stories (numbered X.Y)
|
|
25
|
-
- Dependencies
|
|
26
|
-
- Acceptance Criteria
|
|
27
|
-
- Execution Notes
|
|
28
|
-
- Definition of Done
|
|
29
|
-
"""
|
|
30
|
-
|
|
31
|
-
def __init__(self, project_root: Path | None = None):
|
|
32
|
-
"""
|
|
33
|
-
Initialize Epic parser.
|
|
34
|
-
|
|
35
|
-
Args:
|
|
36
|
-
project_root: Root directory of the project (defaults to current directory)
|
|
37
|
-
"""
|
|
38
|
-
self.project_root = project_root or Path.cwd()
|
|
39
|
-
|
|
40
|
-
def parse(self, epic_path: Path | str) -> EpicDocument:
|
|
41
|
-
"""
|
|
42
|
-
Parse an Epic document.
|
|
43
|
-
|
|
44
|
-
Args:
|
|
45
|
-
epic_path: Path to Epic markdown file
|
|
46
|
-
|
|
47
|
-
Returns:
|
|
48
|
-
Parsed EpicDocument
|
|
49
|
-
|
|
50
|
-
Raises:
|
|
51
|
-
FileNotFoundError: If Epic file doesn't exist
|
|
52
|
-
ValueError: If parsing fails
|
|
53
|
-
"""
|
|
54
|
-
epic_path = Path(epic_path)
|
|
55
|
-
if not epic_path.is_absolute():
|
|
56
|
-
# Try relative to project root, then docs/prd/
|
|
57
|
-
if (self.project_root / epic_path).exists():
|
|
58
|
-
epic_path = self.project_root / epic_path
|
|
59
|
-
elif (self.project_root / "docs" / "prd" / epic_path.name).exists():
|
|
60
|
-
epic_path = self.project_root / "docs" / "prd" / epic_path.name
|
|
61
|
-
else:
|
|
62
|
-
epic_path = self.project_root / epic_path
|
|
63
|
-
|
|
64
|
-
if not epic_path.exists():
|
|
65
|
-
raise FileNotFoundError(f"Epic document not found: {epic_path}")
|
|
66
|
-
|
|
67
|
-
content = epic_path.read_text(encoding="utf-8")
|
|
68
|
-
|
|
69
|
-
# Extract Epic metadata
|
|
70
|
-
epic_number = self._extract_epic_number(content, epic_path)
|
|
71
|
-
title = self._extract_title(content)
|
|
72
|
-
goal = self._extract_goal(content)
|
|
73
|
-
description = self._extract_description(content)
|
|
74
|
-
priority = self._extract_priority(content)
|
|
75
|
-
timeline = self._extract_timeline(content)
|
|
76
|
-
prerequisites = self._extract_prerequisites(content)
|
|
77
|
-
execution_notes = self._extract_execution_notes(content)
|
|
78
|
-
definition_of_done = self._extract_definition_of_done(content)
|
|
79
|
-
status = self._extract_status(content)
|
|
80
|
-
|
|
81
|
-
# Extract stories
|
|
82
|
-
stories = self._extract_stories(content, epic_number)
|
|
83
|
-
|
|
84
|
-
return EpicDocument(
|
|
85
|
-
epic_number=epic_number,
|
|
86
|
-
title=title,
|
|
87
|
-
goal=goal,
|
|
88
|
-
description=description,
|
|
89
|
-
stories=stories,
|
|
90
|
-
priority=priority,
|
|
91
|
-
timeline=timeline,
|
|
92
|
-
prerequisites=prerequisites,
|
|
93
|
-
execution_notes=execution_notes,
|
|
94
|
-
definition_of_done=definition_of_done,
|
|
95
|
-
status=status,
|
|
96
|
-
file_path=epic_path,
|
|
97
|
-
)
|
|
98
|
-
|
|
99
|
-
def _extract_epic_number(self, content: str, file_path: Path) -> int:
|
|
100
|
-
"""Extract Epic number from content or filename."""
|
|
101
|
-
# Try to extract from title: "# Epic 8: ..."
|
|
102
|
-
match = re.search(r"^#\s+Epic\s+(\d+):", content, re.MULTILINE)
|
|
103
|
-
if match:
|
|
104
|
-
return int(match.group(1))
|
|
105
|
-
|
|
106
|
-
# Try to extract from filename: "epic-8-*.md"
|
|
107
|
-
match = re.search(r"epic-(\d+)", file_path.name, re.IGNORECASE)
|
|
108
|
-
if match:
|
|
109
|
-
return int(match.group(1))
|
|
110
|
-
|
|
111
|
-
raise ValueError(f"Could not extract Epic number from {file_path}")
|
|
112
|
-
|
|
113
|
-
def _extract_title(self, content: str) -> str:
|
|
114
|
-
"""Extract Epic title from content."""
|
|
115
|
-
# Title is usually in the first heading: "# Epic 8: Title"
|
|
116
|
-
match = re.search(r"^#\s+Epic\s+\d+:\s*(.+)$", content, re.MULTILINE)
|
|
117
|
-
if match:
|
|
118
|
-
return match.group(1).strip()
|
|
119
|
-
return "Untitled Epic"
|
|
120
|
-
|
|
121
|
-
def _extract_goal(self, content: str) -> str:
|
|
122
|
-
"""Extract Epic Goal section."""
|
|
123
|
-
goal_match = re.search(
|
|
124
|
-
r"##\s+Epic\s+Goal\s*\n\n(.+?)(?=\n##|\Z)", content, re.DOTALL
|
|
125
|
-
)
|
|
126
|
-
if goal_match:
|
|
127
|
-
return goal_match.group(1).strip()
|
|
128
|
-
return ""
|
|
129
|
-
|
|
130
|
-
def _extract_description(self, content: str) -> str:
|
|
131
|
-
"""Extract Epic Description section."""
|
|
132
|
-
desc_match = re.search(
|
|
133
|
-
r"##\s+Epic\s+Description\s*\n\n(.+?)(?=\n##\s+(?:Stories|Execution|Definition)|\Z)",
|
|
134
|
-
content,
|
|
135
|
-
re.DOTALL,
|
|
136
|
-
)
|
|
137
|
-
if desc_match:
|
|
138
|
-
return desc_match.group(1).strip()
|
|
139
|
-
return ""
|
|
140
|
-
|
|
141
|
-
def _extract_priority(self, content: str) -> Priority | None:
|
|
142
|
-
"""Extract priority from content."""
|
|
143
|
-
# Look for "Priority: High" or similar patterns
|
|
144
|
-
match = re.search(
|
|
145
|
-
r"(?:Priority|priority):\s*(\w+)", content, re.IGNORECASE | re.MULTILINE
|
|
146
|
-
)
|
|
147
|
-
if not match:
|
|
148
|
-
return None
|
|
149
|
-
priority_str = match.group(1).strip().lower()
|
|
150
|
-
try:
|
|
151
|
-
return Priority(priority_str)
|
|
152
|
-
except ValueError:
|
|
153
|
-
# If priority string doesn't match enum, return None
|
|
154
|
-
return None
|
|
155
|
-
|
|
156
|
-
def _extract_timeline(self, content: str) -> str | None:
|
|
157
|
-
"""Extract timeline from content."""
|
|
158
|
-
# Look for "Timeline: 1-2 weeks" or similar
|
|
159
|
-
match = re.search(
|
|
160
|
-
r"(?:Timeline|timeline):\s*(.+?)(?:\n|$)", content, re.IGNORECASE | re.MULTILINE
|
|
161
|
-
)
|
|
162
|
-
return match.group(1).strip() if match else None
|
|
163
|
-
|
|
164
|
-
def _extract_prerequisites(self, content: str) -> list[str]:
|
|
165
|
-
"""Extract prerequisites from Execution Notes."""
|
|
166
|
-
prereq_match = re.search(
|
|
167
|
-
r"###\s+Prerequisites\s*\n\n(.+?)(?=\n###|\Z)", content, re.DOTALL | re.IGNORECASE
|
|
168
|
-
)
|
|
169
|
-
if not prereq_match:
|
|
170
|
-
return []
|
|
171
|
-
|
|
172
|
-
prereq_text = prereq_match.group(1)
|
|
173
|
-
# Extract list items
|
|
174
|
-
items = re.findall(r"^[-*]\s*(.+)$", prereq_text, re.MULTILINE)
|
|
175
|
-
return [item.strip() for item in items]
|
|
176
|
-
|
|
177
|
-
def _extract_execution_notes(self, content: str) -> dict[str, Any]:
|
|
178
|
-
"""Extract execution notes section."""
|
|
179
|
-
notes_match = re.search(
|
|
180
|
-
r"##\s+Execution\s+Notes\s*\n\n(.+?)(?=\n##\s+Definition|\Z)",
|
|
181
|
-
content,
|
|
182
|
-
re.DOTALL | re.IGNORECASE,
|
|
183
|
-
)
|
|
184
|
-
if not notes_match:
|
|
185
|
-
return {}
|
|
186
|
-
|
|
187
|
-
notes_text = notes_match.group(1)
|
|
188
|
-
notes: dict[str, Any] = {}
|
|
189
|
-
|
|
190
|
-
# Extract subsections
|
|
191
|
-
prereq_match = re.search(
|
|
192
|
-
r"###\s+Prerequisites\s*\n\n(.+?)(?=\n###|\Z)", notes_text, re.DOTALL | re.IGNORECASE
|
|
193
|
-
)
|
|
194
|
-
if prereq_match:
|
|
195
|
-
notes["prerequisites"] = prereq_match.group(1).strip()
|
|
196
|
-
|
|
197
|
-
tech_match = re.search(
|
|
198
|
-
r"###\s+Technical\s+Decisions\s+Required\s*\n\n(.+?)(?=\n###|\Z)",
|
|
199
|
-
notes_text,
|
|
200
|
-
re.DOTALL | re.IGNORECASE,
|
|
201
|
-
)
|
|
202
|
-
if tech_match:
|
|
203
|
-
notes["technical_decisions"] = tech_match.group(1).strip()
|
|
204
|
-
|
|
205
|
-
risk_match = re.search(
|
|
206
|
-
r"###\s+Risk\s+Mitigation\s*\n\n(.+?)(?=\n###|\Z)",
|
|
207
|
-
notes_text,
|
|
208
|
-
re.DOTALL | re.IGNORECASE,
|
|
209
|
-
)
|
|
210
|
-
if risk_match:
|
|
211
|
-
notes["risk_mitigation"] = risk_match.group(1).strip()
|
|
212
|
-
|
|
213
|
-
return notes
|
|
214
|
-
|
|
215
|
-
def _extract_definition_of_done(self, content: str) -> list[str]:
|
|
216
|
-
"""Extract Definition of Done checklist items."""
|
|
217
|
-
dod_match = re.search(
|
|
218
|
-
r"##\s+Definition\s+of\s+Done\s*\n\n(.+?)(?=\n##\s+Status|\Z)",
|
|
219
|
-
content,
|
|
220
|
-
re.DOTALL | re.IGNORECASE,
|
|
221
|
-
)
|
|
222
|
-
if not dod_match:
|
|
223
|
-
return []
|
|
224
|
-
|
|
225
|
-
dod_text = dod_match.group(1)
|
|
226
|
-
# Extract checklist items: "- [ ] ..." or "- [x] ..."
|
|
227
|
-
items = re.findall(r"^[-*]\s*\[[ xX]\]\s*(.+)$", dod_text, re.MULTILINE)
|
|
228
|
-
return [item.strip() for item in items]
|
|
229
|
-
|
|
230
|
-
def _extract_status(self, content: str) -> str | None:
|
|
231
|
-
"""Extract Epic status."""
|
|
232
|
-
# Look for "## Status: ✅ COMPLETE" or similar
|
|
233
|
-
match = re.search(
|
|
234
|
-
r"##\s+Status:\s*(.+?)(?:\n|$)", content, re.IGNORECASE | re.MULTILINE
|
|
235
|
-
)
|
|
236
|
-
return match.group(1).strip() if match else None
|
|
237
|
-
|
|
238
|
-
def _extract_stories(self, content: str, epic_number: int) -> list[Story]:
|
|
239
|
-
"""Extract all stories from content."""
|
|
240
|
-
stories: list[Story] = []
|
|
241
|
-
|
|
242
|
-
# Find Stories section
|
|
243
|
-
stories_match = re.search(
|
|
244
|
-
r"##\s+Stories\s*\n\n(.+?)(?=\n##\s+(?:Execution|Definition)|\Z)",
|
|
245
|
-
content,
|
|
246
|
-
re.DOTALL,
|
|
247
|
-
)
|
|
248
|
-
if not stories_match:
|
|
249
|
-
return stories
|
|
250
|
-
|
|
251
|
-
stories_text = stories_match.group(1)
|
|
252
|
-
|
|
253
|
-
# Split by numbered stories: "1. **Story X.Y: Title**" or "**Story X.Y: Title**"
|
|
254
|
-
story_pattern = r"(\d+\.\s*\*\*Story\s+(\d+)\.(\d+):\s*(.+?)\*\*|"
|
|
255
|
-
story_pattern += r"\*\*Story\s+(\d+)\.(\d+):\s*(.+?)\*\*)"
|
|
256
|
-
story_pattern += r"(.+?)(?=\d+\.\s*\*\*Story|\*\*Story|\Z)"
|
|
257
|
-
|
|
258
|
-
matches = re.finditer(story_pattern, stories_text, re.DOTALL)
|
|
259
|
-
|
|
260
|
-
for match in matches:
|
|
261
|
-
# Handle both patterns
|
|
262
|
-
if match.group(2): # Pattern 1: "1. **Story X.Y: Title**"
|
|
263
|
-
story_num = int(match.group(3))
|
|
264
|
-
title = match.group(4).strip()
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
)
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
#
|
|
333
|
-
# - "
|
|
334
|
-
# - "
|
|
335
|
-
# - "
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
r"
|
|
339
|
-
r"
|
|
340
|
-
r"
|
|
341
|
-
r"Story\s+(\d+)\.(\d+)
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
dep_id
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
"
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
)
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
#
|
|
392
|
-
in_degree: dict[str, int] = {
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
1
|
+
"""
|
|
2
|
+
Epic Document Parser
|
|
3
|
+
|
|
4
|
+
Parses Epic PRD markdown documents to extract stories, dependencies, and acceptance criteria.
|
|
5
|
+
Supports BMAD-standard Epic format.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import re
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
from ..workflow.common_enums import Priority
|
|
13
|
+
|
|
14
|
+
from .models import AcceptanceCriterion, EpicDocument, Story
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class EpicParser:
|
|
18
|
+
"""
|
|
19
|
+
Parser for Epic markdown documents.
|
|
20
|
+
|
|
21
|
+
Supports parsing Epic documents in the format:
|
|
22
|
+
- Epic Goal
|
|
23
|
+
- Epic Description
|
|
24
|
+
- Stories (numbered X.Y)
|
|
25
|
+
- Dependencies
|
|
26
|
+
- Acceptance Criteria
|
|
27
|
+
- Execution Notes
|
|
28
|
+
- Definition of Done
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __init__(self, project_root: Path | None = None):
|
|
32
|
+
"""
|
|
33
|
+
Initialize Epic parser.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
project_root: Root directory of the project (defaults to current directory)
|
|
37
|
+
"""
|
|
38
|
+
self.project_root = project_root or Path.cwd()
|
|
39
|
+
|
|
40
|
+
def parse(self, epic_path: Path | str) -> EpicDocument:
|
|
41
|
+
"""
|
|
42
|
+
Parse an Epic document.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
epic_path: Path to Epic markdown file
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
Parsed EpicDocument
|
|
49
|
+
|
|
50
|
+
Raises:
|
|
51
|
+
FileNotFoundError: If Epic file doesn't exist
|
|
52
|
+
ValueError: If parsing fails
|
|
53
|
+
"""
|
|
54
|
+
epic_path = Path(epic_path)
|
|
55
|
+
if not epic_path.is_absolute():
|
|
56
|
+
# Try relative to project root, then docs/prd/
|
|
57
|
+
if (self.project_root / epic_path).exists():
|
|
58
|
+
epic_path = self.project_root / epic_path
|
|
59
|
+
elif (self.project_root / "docs" / "prd" / epic_path.name).exists():
|
|
60
|
+
epic_path = self.project_root / "docs" / "prd" / epic_path.name
|
|
61
|
+
else:
|
|
62
|
+
epic_path = self.project_root / epic_path
|
|
63
|
+
|
|
64
|
+
if not epic_path.exists():
|
|
65
|
+
raise FileNotFoundError(f"Epic document not found: {epic_path}")
|
|
66
|
+
|
|
67
|
+
content = epic_path.read_text(encoding="utf-8")
|
|
68
|
+
|
|
69
|
+
# Extract Epic metadata
|
|
70
|
+
epic_number = self._extract_epic_number(content, epic_path)
|
|
71
|
+
title = self._extract_title(content)
|
|
72
|
+
goal = self._extract_goal(content)
|
|
73
|
+
description = self._extract_description(content)
|
|
74
|
+
priority = self._extract_priority(content)
|
|
75
|
+
timeline = self._extract_timeline(content)
|
|
76
|
+
prerequisites = self._extract_prerequisites(content)
|
|
77
|
+
execution_notes = self._extract_execution_notes(content)
|
|
78
|
+
definition_of_done = self._extract_definition_of_done(content)
|
|
79
|
+
status = self._extract_status(content)
|
|
80
|
+
|
|
81
|
+
# Extract stories
|
|
82
|
+
stories = self._extract_stories(content, epic_number)
|
|
83
|
+
|
|
84
|
+
return EpicDocument(
|
|
85
|
+
epic_number=epic_number,
|
|
86
|
+
title=title,
|
|
87
|
+
goal=goal,
|
|
88
|
+
description=description,
|
|
89
|
+
stories=stories,
|
|
90
|
+
priority=priority,
|
|
91
|
+
timeline=timeline,
|
|
92
|
+
prerequisites=prerequisites,
|
|
93
|
+
execution_notes=execution_notes,
|
|
94
|
+
definition_of_done=definition_of_done,
|
|
95
|
+
status=status,
|
|
96
|
+
file_path=epic_path,
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
def _extract_epic_number(self, content: str, file_path: Path) -> int:
|
|
100
|
+
"""Extract Epic number from content or filename."""
|
|
101
|
+
# Try to extract from title: "# Epic 8: ..."
|
|
102
|
+
match = re.search(r"^#\s+Epic\s+(\d+):", content, re.MULTILINE)
|
|
103
|
+
if match:
|
|
104
|
+
return int(match.group(1))
|
|
105
|
+
|
|
106
|
+
# Try to extract from filename: "epic-8-*.md"
|
|
107
|
+
match = re.search(r"epic-(\d+)", file_path.name, re.IGNORECASE)
|
|
108
|
+
if match:
|
|
109
|
+
return int(match.group(1))
|
|
110
|
+
|
|
111
|
+
raise ValueError(f"Could not extract Epic number from {file_path}")
|
|
112
|
+
|
|
113
|
+
def _extract_title(self, content: str) -> str:
|
|
114
|
+
"""Extract Epic title from content."""
|
|
115
|
+
# Title is usually in the first heading: "# Epic 8: Title"
|
|
116
|
+
match = re.search(r"^#\s+Epic\s+\d+:\s*(.+)$", content, re.MULTILINE)
|
|
117
|
+
if match:
|
|
118
|
+
return match.group(1).strip()
|
|
119
|
+
return "Untitled Epic"
|
|
120
|
+
|
|
121
|
+
def _extract_goal(self, content: str) -> str:
|
|
122
|
+
"""Extract Epic Goal section."""
|
|
123
|
+
goal_match = re.search(
|
|
124
|
+
r"##\s+Epic\s+Goal\s*\n\n(.+?)(?=\n##|\Z)", content, re.DOTALL
|
|
125
|
+
)
|
|
126
|
+
if goal_match:
|
|
127
|
+
return goal_match.group(1).strip()
|
|
128
|
+
return ""
|
|
129
|
+
|
|
130
|
+
def _extract_description(self, content: str) -> str:
|
|
131
|
+
"""Extract Epic Description section."""
|
|
132
|
+
desc_match = re.search(
|
|
133
|
+
r"##\s+Epic\s+Description\s*\n\n(.+?)(?=\n##\s+(?:Stories|Execution|Definition)|\Z)",
|
|
134
|
+
content,
|
|
135
|
+
re.DOTALL,
|
|
136
|
+
)
|
|
137
|
+
if desc_match:
|
|
138
|
+
return desc_match.group(1).strip()
|
|
139
|
+
return ""
|
|
140
|
+
|
|
141
|
+
def _extract_priority(self, content: str) -> Priority | None:
|
|
142
|
+
"""Extract priority from content."""
|
|
143
|
+
# Look for "Priority: High" or similar patterns
|
|
144
|
+
match = re.search(
|
|
145
|
+
r"(?:Priority|priority):\s*(\w+)", content, re.IGNORECASE | re.MULTILINE
|
|
146
|
+
)
|
|
147
|
+
if not match:
|
|
148
|
+
return None
|
|
149
|
+
priority_str = match.group(1).strip().lower()
|
|
150
|
+
try:
|
|
151
|
+
return Priority(priority_str)
|
|
152
|
+
except ValueError:
|
|
153
|
+
# If priority string doesn't match enum, return None
|
|
154
|
+
return None
|
|
155
|
+
|
|
156
|
+
def _extract_timeline(self, content: str) -> str | None:
|
|
157
|
+
"""Extract timeline from content."""
|
|
158
|
+
# Look for "Timeline: 1-2 weeks" or similar
|
|
159
|
+
match = re.search(
|
|
160
|
+
r"(?:Timeline|timeline):\s*(.+?)(?:\n|$)", content, re.IGNORECASE | re.MULTILINE
|
|
161
|
+
)
|
|
162
|
+
return match.group(1).strip() if match else None
|
|
163
|
+
|
|
164
|
+
def _extract_prerequisites(self, content: str) -> list[str]:
|
|
165
|
+
"""Extract prerequisites from Execution Notes."""
|
|
166
|
+
prereq_match = re.search(
|
|
167
|
+
r"###\s+Prerequisites\s*\n\n(.+?)(?=\n###|\Z)", content, re.DOTALL | re.IGNORECASE
|
|
168
|
+
)
|
|
169
|
+
if not prereq_match:
|
|
170
|
+
return []
|
|
171
|
+
|
|
172
|
+
prereq_text = prereq_match.group(1)
|
|
173
|
+
# Extract list items
|
|
174
|
+
items = re.findall(r"^[-*]\s*(.+)$", prereq_text, re.MULTILINE)
|
|
175
|
+
return [item.strip() for item in items]
|
|
176
|
+
|
|
177
|
+
def _extract_execution_notes(self, content: str) -> dict[str, Any]:
|
|
178
|
+
"""Extract execution notes section."""
|
|
179
|
+
notes_match = re.search(
|
|
180
|
+
r"##\s+Execution\s+Notes\s*\n\n(.+?)(?=\n##\s+Definition|\Z)",
|
|
181
|
+
content,
|
|
182
|
+
re.DOTALL | re.IGNORECASE,
|
|
183
|
+
)
|
|
184
|
+
if not notes_match:
|
|
185
|
+
return {}
|
|
186
|
+
|
|
187
|
+
notes_text = notes_match.group(1)
|
|
188
|
+
notes: dict[str, Any] = {}
|
|
189
|
+
|
|
190
|
+
# Extract subsections
|
|
191
|
+
prereq_match = re.search(
|
|
192
|
+
r"###\s+Prerequisites\s*\n\n(.+?)(?=\n###|\Z)", notes_text, re.DOTALL | re.IGNORECASE
|
|
193
|
+
)
|
|
194
|
+
if prereq_match:
|
|
195
|
+
notes["prerequisites"] = prereq_match.group(1).strip()
|
|
196
|
+
|
|
197
|
+
tech_match = re.search(
|
|
198
|
+
r"###\s+Technical\s+Decisions\s+Required\s*\n\n(.+?)(?=\n###|\Z)",
|
|
199
|
+
notes_text,
|
|
200
|
+
re.DOTALL | re.IGNORECASE,
|
|
201
|
+
)
|
|
202
|
+
if tech_match:
|
|
203
|
+
notes["technical_decisions"] = tech_match.group(1).strip()
|
|
204
|
+
|
|
205
|
+
risk_match = re.search(
|
|
206
|
+
r"###\s+Risk\s+Mitigation\s*\n\n(.+?)(?=\n###|\Z)",
|
|
207
|
+
notes_text,
|
|
208
|
+
re.DOTALL | re.IGNORECASE,
|
|
209
|
+
)
|
|
210
|
+
if risk_match:
|
|
211
|
+
notes["risk_mitigation"] = risk_match.group(1).strip()
|
|
212
|
+
|
|
213
|
+
return notes
|
|
214
|
+
|
|
215
|
+
def _extract_definition_of_done(self, content: str) -> list[str]:
|
|
216
|
+
"""Extract Definition of Done checklist items."""
|
|
217
|
+
dod_match = re.search(
|
|
218
|
+
r"##\s+Definition\s+of\s+Done\s*\n\n(.+?)(?=\n##\s+Status|\Z)",
|
|
219
|
+
content,
|
|
220
|
+
re.DOTALL | re.IGNORECASE,
|
|
221
|
+
)
|
|
222
|
+
if not dod_match:
|
|
223
|
+
return []
|
|
224
|
+
|
|
225
|
+
dod_text = dod_match.group(1)
|
|
226
|
+
# Extract checklist items: "- [ ] ..." or "- [x] ..."
|
|
227
|
+
items = re.findall(r"^[-*]\s*\[[ xX]\]\s*(.+)$", dod_text, re.MULTILINE)
|
|
228
|
+
return [item.strip() for item in items]
|
|
229
|
+
|
|
230
|
+
def _extract_status(self, content: str) -> str | None:
|
|
231
|
+
"""Extract Epic status."""
|
|
232
|
+
# Look for "## Status: ✅ COMPLETE" or similar
|
|
233
|
+
match = re.search(
|
|
234
|
+
r"##\s+Status:\s*(.+?)(?:\n|$)", content, re.IGNORECASE | re.MULTILINE
|
|
235
|
+
)
|
|
236
|
+
return match.group(1).strip() if match else None
|
|
237
|
+
|
|
238
|
+
def _extract_stories(self, content: str, epic_number: int) -> list[Story]:
|
|
239
|
+
"""Extract all stories from content."""
|
|
240
|
+
stories: list[Story] = []
|
|
241
|
+
|
|
242
|
+
# Find Stories section
|
|
243
|
+
stories_match = re.search(
|
|
244
|
+
r"##\s+Stories\s*\n\n(.+?)(?=\n##\s+(?:Execution|Definition)|\Z)",
|
|
245
|
+
content,
|
|
246
|
+
re.DOTALL,
|
|
247
|
+
)
|
|
248
|
+
if not stories_match:
|
|
249
|
+
return stories
|
|
250
|
+
|
|
251
|
+
stories_text = stories_match.group(1)
|
|
252
|
+
|
|
253
|
+
# Split by numbered stories: "1. **Story X.Y: Title**" or "**Story X.Y: Title**"
|
|
254
|
+
story_pattern = r"(\d+\.\s*\*\*Story\s+(\d+)\.(\d+):\s*(.+?)\*\*|"
|
|
255
|
+
story_pattern += r"\*\*Story\s+(\d+)\.(\d+):\s*(.+?)\*\*)"
|
|
256
|
+
story_pattern += r"(.+?)(?=\d+\.\s*\*\*Story|\*\*Story|\Z)"
|
|
257
|
+
|
|
258
|
+
matches = re.finditer(story_pattern, stories_text, re.DOTALL)
|
|
259
|
+
|
|
260
|
+
for match in matches:
|
|
261
|
+
# Handle both patterns; body is always group 8: (alt1|alt2)(.+?)
|
|
262
|
+
if match.group(2): # Pattern 1: "1. **Story X.Y: Title**"
|
|
263
|
+
story_num = int(match.group(3))
|
|
264
|
+
title = match.group(4).strip()
|
|
265
|
+
else: # Pattern 2: "**Story X.Y: Title**"
|
|
266
|
+
story_num = int(match.group(6))
|
|
267
|
+
title = match.group(7).strip()
|
|
268
|
+
body = match.group(8)
|
|
269
|
+
|
|
270
|
+
# Extract description (everything before acceptance criteria)
|
|
271
|
+
desc_match = re.search(
|
|
272
|
+
r"^(.+?)(?=\n\s*-\s*Acceptance\s+criteria:|\n\s*Acceptance\s+criteria:|\Z)",
|
|
273
|
+
body,
|
|
274
|
+
re.DOTALL | re.IGNORECASE,
|
|
275
|
+
)
|
|
276
|
+
description = desc_match.group(1).strip() if desc_match else body.strip()
|
|
277
|
+
|
|
278
|
+
# Extract acceptance criteria
|
|
279
|
+
acceptance_criteria = self._extract_acceptance_criteria(body)
|
|
280
|
+
|
|
281
|
+
# Extract dependencies (look for "depends on", "requires", etc.)
|
|
282
|
+
dependencies = self._extract_story_dependencies(body, epic_number, story_num)
|
|
283
|
+
|
|
284
|
+
# Extract story points if present
|
|
285
|
+
story_points = self._extract_story_points(body)
|
|
286
|
+
|
|
287
|
+
story = Story(
|
|
288
|
+
epic_number=epic_number,
|
|
289
|
+
story_number=story_num,
|
|
290
|
+
title=title,
|
|
291
|
+
description=description,
|
|
292
|
+
acceptance_criteria=acceptance_criteria,
|
|
293
|
+
dependencies=dependencies,
|
|
294
|
+
story_points=story_points,
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
stories.append(story)
|
|
298
|
+
|
|
299
|
+
return stories
|
|
300
|
+
|
|
301
|
+
def _extract_acceptance_criteria(self, story_text: str) -> list[AcceptanceCriterion]:
|
|
302
|
+
"""Extract acceptance criteria from story text."""
|
|
303
|
+
criteria: list[AcceptanceCriterion] = []
|
|
304
|
+
|
|
305
|
+
# Look for "Acceptance criteria:" or "- Acceptance criteria:"
|
|
306
|
+
ac_match = re.search(
|
|
307
|
+
r"(?:Acceptance\s+criteria|Acceptance\s+Criteria):\s*\n(.+?)(?=\n\s*\d+\.|\Z)",
|
|
308
|
+
story_text,
|
|
309
|
+
re.DOTALL | re.IGNORECASE,
|
|
310
|
+
)
|
|
311
|
+
if not ac_match:
|
|
312
|
+
return criteria
|
|
313
|
+
|
|
314
|
+
ac_text = ac_match.group(1)
|
|
315
|
+
|
|
316
|
+
# Extract list items (lines starting with "-" or numbered)
|
|
317
|
+
items = re.findall(r"^[-*]\s*(.+)$", ac_text, re.MULTILINE)
|
|
318
|
+
for item in items:
|
|
319
|
+
item = item.strip()
|
|
320
|
+
if item:
|
|
321
|
+
criteria.append(AcceptanceCriterion(description=item))
|
|
322
|
+
|
|
323
|
+
return criteria
|
|
324
|
+
|
|
325
|
+
def _extract_story_dependencies(
|
|
326
|
+
self, story_text: str, epic_number: int, story_number: int
|
|
327
|
+
) -> list[str]:
|
|
328
|
+
"""Extract story dependencies."""
|
|
329
|
+
dependencies: list[str] = []
|
|
330
|
+
|
|
331
|
+
# Look for dependency patterns:
|
|
332
|
+
# - "depends on Story X.Y"
|
|
333
|
+
# - "requires Story X.Y"
|
|
334
|
+
# - "prerequisite: Story X.Y"
|
|
335
|
+
# - "Story X.Y must be completed first"
|
|
336
|
+
patterns = [
|
|
337
|
+
r"depends?\s+on\s+Story\s+(\d+)\.(\d+)",
|
|
338
|
+
r"requires?\s+Story\s+(\d+)\.(\d+)",
|
|
339
|
+
r"prerequisite:?\s+Story\s+(\d+)\.(\d+)",
|
|
340
|
+
r"Story\s+(\d+)\.(\d+)\s+must\s+be\s+completed",
|
|
341
|
+
r"after\s+Story\s+(\d+)\.(\d+)",
|
|
342
|
+
]
|
|
343
|
+
|
|
344
|
+
for pattern in patterns:
|
|
345
|
+
matches = re.finditer(pattern, story_text, re.IGNORECASE)
|
|
346
|
+
for match in matches:
|
|
347
|
+
dep_epic = int(match.group(1))
|
|
348
|
+
dep_story = int(match.group(2))
|
|
349
|
+
dep_id = f"{dep_epic}.{dep_story}"
|
|
350
|
+
if dep_id not in dependencies:
|
|
351
|
+
dependencies.append(dep_id)
|
|
352
|
+
|
|
353
|
+
return dependencies
|
|
354
|
+
|
|
355
|
+
def _extract_story_points(self, story_text: str) -> int | None:
|
|
356
|
+
"""Extract story points if present."""
|
|
357
|
+
# Look for "Story points: 3" or "Points: 5"
|
|
358
|
+
match = re.search(
|
|
359
|
+
r"(?:Story\s+points?|Points?):\s*(\d+)", story_text, re.IGNORECASE
|
|
360
|
+
)
|
|
361
|
+
return int(match.group(1)) if match else None
|
|
362
|
+
|
|
363
|
+
def build_dependency_graph(self, epic: EpicDocument) -> dict[str, list[str]]:
|
|
364
|
+
"""
|
|
365
|
+
Build dependency graph for stories.
|
|
366
|
+
|
|
367
|
+
Returns:
|
|
368
|
+
Dictionary mapping story_id -> list of dependent story_ids
|
|
369
|
+
"""
|
|
370
|
+
graph: dict[str, list[str]] = {}
|
|
371
|
+
|
|
372
|
+
for story in epic.stories:
|
|
373
|
+
graph[story.story_id] = story.dependencies.copy()
|
|
374
|
+
|
|
375
|
+
return graph
|
|
376
|
+
|
|
377
|
+
def topological_sort(self, epic: EpicDocument) -> list[Story]:
|
|
378
|
+
"""
|
|
379
|
+
Topologically sort stories by dependencies.
|
|
380
|
+
|
|
381
|
+
Returns:
|
|
382
|
+
List of stories in execution order (dependencies first)
|
|
383
|
+
|
|
384
|
+
Raises:
|
|
385
|
+
ValueError: If circular dependencies detected
|
|
386
|
+
"""
|
|
387
|
+
graph = self.build_dependency_graph(epic)
|
|
388
|
+
story_map = {s.story_id: s for s in epic.stories}
|
|
389
|
+
|
|
390
|
+
# Kahn's algorithm: in_degree[story] = number of prerequisites
|
|
391
|
+
# graph[story] = deps means story depends on deps, so edges dep->story
|
|
392
|
+
in_degree: dict[str, int] = {
|
|
393
|
+
s.story_id: len(s.dependencies) for s in epic.stories
|
|
394
|
+
}
|
|
395
|
+
|
|
396
|
+
# Reverse graph: for each dep, which stories depend on it
|
|
397
|
+
rev_graph: dict[str, list[str]] = {s.story_id: [] for s in epic.stories}
|
|
398
|
+
for story_id, deps in graph.items():
|
|
399
|
+
for dep in deps:
|
|
400
|
+
if dep in rev_graph:
|
|
401
|
+
rev_graph[dep].append(story_id)
|
|
402
|
+
|
|
403
|
+
# Queue: stories with no prerequisites
|
|
404
|
+
queue: list[str] = [sid for sid, degree in in_degree.items() if degree == 0]
|
|
405
|
+
result: list[Story] = []
|
|
406
|
+
|
|
407
|
+
while queue:
|
|
408
|
+
story_id = queue.pop(0)
|
|
409
|
+
result.append(story_map[story_id])
|
|
410
|
+
|
|
411
|
+
# Decrement in_degree of stories that depended on this one
|
|
412
|
+
for dependent in rev_graph.get(story_id, []):
|
|
413
|
+
in_degree[dependent] -= 1
|
|
414
|
+
if in_degree[dependent] == 0:
|
|
415
|
+
queue.append(dependent)
|
|
416
|
+
|
|
417
|
+
# Check for circular dependencies
|
|
418
|
+
if len(result) != len(epic.stories):
|
|
419
|
+
remaining = set(s.story_id for s in epic.stories) - set(
|
|
420
|
+
s.story_id for s in result
|
|
421
|
+
)
|
|
422
|
+
raise ValueError(
|
|
423
|
+
f"Circular dependencies detected. Stories involved: {', '.join(remaining)}"
|
|
424
|
+
)
|
|
425
|
+
|
|
426
|
+
return result
|
|
427
|
+
|